diff --git a/docs/about-us/beta-and-experimental-features.md b/docs/about-us/beta-and-experimental-features.md
index b681c86f814..9e6deba1b71 100644
--- a/docs/about-us/beta-and-experimental-features.md
+++ b/docs/about-us/beta-and-experimental-features.md
@@ -128,10 +128,8 @@ Please note: no additional experimental features are allowed to be enabled in Cl
| [allow_experimental_join_right_table_sorting](/operations/settings/settings#allow_experimental_join_right_table_sorting) | `0` |
| [allow_statistics_optimize](/operations/settings/settings#allow_statistics_optimize) | `0` |
| [allow_experimental_statistics](/operations/settings/settings#allow_experimental_statistics) | `0` |
+| [use_statistics_cache](/operations/settings/settings#use_statistics_cache) | `0` |
| [allow_experimental_full_text_index](/operations/settings/settings#allow_experimental_full_text_index) | `0` |
-| [allow_experimental_live_view](/operations/settings/settings#allow_experimental_live_view) | `0` |
-| [live_view_heartbeat_interval](/operations/settings/settings#live_view_heartbeat_interval) | `15` |
-| [max_live_view_insert_blocks_before_refresh](/operations/settings/settings#max_live_view_insert_blocks_before_refresh) | `64` |
| [allow_experimental_window_view](/operations/settings/settings#allow_experimental_window_view) | `0` |
| [window_view_clean_interval](/operations/settings/settings#window_view_clean_interval) | `60` |
| [window_view_heartbeat_interval](/operations/settings/settings#window_view_heartbeat_interval) | `15` |
@@ -159,6 +157,7 @@ Please note: no additional experimental features are allowed to be enabled in Cl
| [allow_experimental_ytsaurus_dictionary_source](/operations/settings/settings#allow_experimental_ytsaurus_dictionary_source) | `0` |
| [distributed_plan_force_shuffle_aggregation](/operations/settings/settings#distributed_plan_force_shuffle_aggregation) | `0` |
| [enable_join_runtime_filters](/operations/settings/settings#enable_join_runtime_filters) | `0` |
+| [join_runtime_filter_exact_values_limit](/operations/settings/settings#join_runtime_filter_exact_values_limit) | `10000` |
| [join_runtime_bloom_filter_bytes](/operations/settings/settings#join_runtime_bloom_filter_bytes) | `524288` |
| [join_runtime_bloom_filter_hash_functions](/operations/settings/settings#join_runtime_bloom_filter_hash_functions) | `3` |
| [rewrite_in_to_join](/operations/settings/settings#rewrite_in_to_join) | `0` |
diff --git a/docs/integrations/language-clients/java/client/_snippets/_v0_7.mdx b/docs/integrations/language-clients/java/client/_snippets/_v0_7.mdx
deleted file mode 100644
index 37e317052bb..00000000000
--- a/docs/integrations/language-clients/java/client/_snippets/_v0_7.mdx
+++ /dev/null
@@ -1,336 +0,0 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-Java client library to communicate with a DB server through its protocols. Current implementation supports only [HTTP interface](/interfaces/http). The library provides own API to send requests to a server.
-
-:::warning Deprecation
-This library will be deprecated soon. Use the latest [Java Client](/integrations/language-clients/java/client/client.mdx) for new projects
-:::
-
-## Setup {#setup}
-
-
-
-
-```xml
-
-
- com.clickhouse
- clickhouse-http-client
- 0.7.2
-
-```
-
-
-
-
-```kotlin
-// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client
-implementation("com.clickhouse:clickhouse-http-client:0.7.2")
-```
-
-
-
-```groovy
-// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client
-implementation 'com.clickhouse:clickhouse-http-client:0.7.2'
-```
-
-
-
-
-Since version `0.5.0`, the driver uses a new client http library that needs to be added as a dependency.
-
-
-
-
-```xml
-
-
- org.apache.httpcomponents.client5
- httpclient5
- 5.3.1
-
-```
-
-
-
-
-```kotlin
-// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5
-implementation("org.apache.httpcomponents.client5:httpclient5:5.3.1")
-```
-
-
-
-```groovy
-// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5
-implementation 'org.apache.httpcomponents.client5:httpclient5:5.3.1'
-```
-
-
-
-
-## Initialization {#initialization}
-
-Connection URL Format: `protocol://host[:port][/database][?param[=value][¶m[=value]][#tag[,tag]]`, for example:
-
-- `http://localhost:8443?ssl=true&sslmode=NONE`
-- `https://(https://explorer@play.clickhouse.com:443`
-
-Connect to a single node:
-
-```java showLineNumbers
-ClickHouseNode server = ClickHouseNode.of("http://localhost:8123/default?compress=0");
-```
-Connect to a cluster with multiple nodes:
-
-```java showLineNumbers
-ClickHouseNodes servers = ClickHouseNodes.of(
- "jdbc:ch:http://server1.domain,server2.domain,server3.domain/my_db"
- + "?load_balancing_policy=random&health_check_interval=5000&failover=2");
-```
-
-## Query API {#query-api}
-
-```java showLineNumbers
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers)
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("select * from numbers limit :limit")
- .params(1000)
- .executeAndWait()) {
- ClickHouseResponseSummary summary = response.getSummary();
- long totalRows = summary.getTotalRowsToRead();
-}
-```
-
-## Streaming Query API {#streaming-query-api}
-
-```java showLineNumbers
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers)
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("select * from numbers limit :limit")
- .params(1000)
- .executeAndWait()) {
- for (ClickHouseRecord r : response.records()) {
- int num = r.getValue(0).asInteger();
- // type conversion
- String str = r.getValue(0).asString();
- LocalDate date = r.getValue(0).asDate();
- }
-}
-```
-
-See [complete code example](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L73) in the [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client).
-
-## Insert API {#insert-api}
-
-```java showLineNumbers
-
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers).write()
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("insert into my_table select c2, c3 from input('c1 UInt8, c2 String, c3 Int32')")
- .data(myInputStream) // `myInputStream` is source of data in RowBinary format
- .executeAndWait()) {
- ClickHouseResponseSummary summary = response.getSummary();
- summary.getWrittenRows();
-}
-```
-
-See [complete code example](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L39) in the [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client).
-
-**RowBinary Encoding**
-
-RowBinary format is described on its [page](/interfaces/formats/RowBinaryWithNamesAndTypes).
-
-There is an example of [code](https://github.com/ClickHouse/clickhouse-kafka-connect/blob/main/src/main/java/com/clickhouse/kafka/connect/sink/db/ClickHouseWriter.java#L622).
-
-## Features {#features}
-### Compression {#compression}
-
-The client will by default use LZ4 compression, which requires this dependency:
-
-
-
-
-```xml
-
-
- org.lz4
- lz4-java
- 1.8.0
-
-```
-
-
-
-
-```kotlin
-// https://mvnrepository.com/artifact/org.lz4/lz4-java
-implementation("org.lz4:lz4-java:1.8.0")
-```
-
-
-
-```groovy
-// https://mvnrepository.com/artifact/org.lz4/lz4-java
-implementation 'org.lz4:lz4-java:1.8.0'
-```
-
-
-
-
-You can choose to use gzip instead by setting `compress_algorithm=gzip` in the connection URL.
-
-Alternatively, you can disable compression a few ways.
-
-1. Disable by setting `compress=0` in the connection URL: `http://localhost:8123/default?compress=0`
-2. Disable via the client configuration:
-
-```java showLineNumbers
-ClickHouseClient client = ClickHouseClient.builder()
- .config(new ClickHouseConfig(Map.of(ClickHouseClientOption.COMPRESS, false)))
- .nodeSelector(ClickHouseNodeSelector.of(ClickHouseProtocol.HTTP))
- .build();
-```
-
-See the [compression documentation](/data-compression/compression-modes) to learn more about different compression options.
-
-### Multiple queries {#multiple-queries}
-
-Execute multiple queries in a worker thread one after another within same session:
-
-```java showLineNumbers
-CompletableFuture> future = ClickHouseClient.send(servers.apply(servers.getNodeSelector()),
- "create database if not exists my_base",
- "use my_base",
- "create table if not exists test_table(s String) engine=Memory",
- "insert into test_table values('1')('2')('3')",
- "select * from test_table limit 1",
- "truncate table test_table",
- "drop table if exists test_table");
-List results = future.get();
-```
-
-### Named Parameters {#named-parameters}
-
-You can pass parameters by name rather than relying solely on their position in the parameter list. This capability is available using `params` function.
-
-```java showLineNumbers
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers)
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("select * from my_table where name=:name limit :limit")
- .params("Ben", 1000)
- .executeAndWait()) {
- //...
- }
-}
-```
-
-:::note Parameters
-All `params` signatures involving `String` type (`String`, `String[]`, `Map`) assume the keys being passed are valid ClickHouse SQL strings. For instance:
-
-```java showLineNumbers
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers)
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("select * from my_table where name=:name")
- .params(Map.of("name","'Ben'"))
- .executeAndWait()) {
- //...
- }
-}
-```
-
-If you prefer not to parse String objects to ClickHouse SQL manually, you can use the helper function `ClickHouseValues.convertToSqlExpression` located at `com.clickhouse.data`:
-
-```java showLineNumbers
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
- ClickHouseResponse response = client.read(servers)
- .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
- .query("select * from my_table where name=:name")
- .params(Map.of("name", ClickHouseValues.convertToSqlExpression("Ben's")))
- .executeAndWait()) {
- //...
- }
-}
-```
-
-In the example above, `ClickHouseValues.convertToSqlExpression` will escape the inner single quote, and surround the variable with a valid single quotes.
-
-Other types, such as `Integer`, `UUID`, `Array` and `Enum` will be converted automatically inside `params`.
-:::
-
-## Node Discovery {#node-discovery}
-
-Java client provides the ability to discover ClickHouse nodes automatically. Auto-discovery is disabled by default. To manually enable it, set `auto_discovery` to `true`:
-
-```java
-properties.setProperty("auto_discovery", "true");
-```
-
-Or in the connection URL:
-
-```plaintext
-jdbc:ch://my-server/system?auto_discovery=true
-```
-
-If auto-discovery is enabled, there is no need to specify all ClickHouse nodes in the connection URL. Nodes specified in the URL will be treated as seeds, and the Java client will automatically discover more nodes from system tables and/or clickhouse-keeper or zookeeper.
-
-The following options are responsible for auto-discovery configuration:
-
-| Property | Default | Description |
-|-------------------------|---------|-------------------------------------------------------------------------------------------------------|
-| auto_discovery | `false` | Whether the client should discover more nodes from system tables and/or clickhouse-keeper/zookeeper. |
-| node_discovery_interval | `0` | Node discovery interval in milliseconds, zero or negative value means one-time discovery. |
-| node_discovery_limit | `100` | Maximum number of nodes that can be discovered at a time; zero or negative value means no limit. |
-
-### Load Balancing {#load-balancing}
-
-The Java client chooses a ClickHouse node to send requests to, according to the load-balancing policy. In general, the load-balancing policy is responsible for the following things:
-
-1. Get a node from a managed node list.
-2. Managing node's status.
-3. Optionally schedule a background process for node discovery (if auto-discovery is enabled) and run a health check.
-
-Here is a list of options to configure load balancing:
-
-| Property | Default | Description |
-|-----------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| load_balancing_policy | `""` | The load-balancing policy can be one of: `firstAlive` - request is sent to the first healthy node from the managed node list`random` - request is sent to a random node from the managed node list `roundRobin` - request is sent to each node from the managed node list, in turn.full qualified class name implementing `ClickHouseLoadBalancingPolicy` - custom load balancing policyIf it is not specified the request is sent to the first node from the managed node list |
-| load_balancing_tags | `""` | Load balancing tags for filtering out nodes. Requests are sent only to nodes that have the specified tags |
-| health_check_interval | `0` | Health check interval in milliseconds, zero or negative value means one-time. |
-| health_check_method | `ClickHouseHealthCheckMethod.SELECT_ONE` | Health check method. Can be one of: `ClickHouseHealthCheckMethod.SELECT_ONE` - check with `select 1` query `ClickHouseHealthCheckMethod.PING` - protocol-specific check, which is generally faster |
-| node_check_interval | `0` | Node check interval in milliseconds, negative number is treated as zero. The node status is checked if the specified amount of time has passed since the last check.
The difference between `health_check_interval` and `node_check_interval` is that the `health_check_interval` option schedules the background job, which checks the status for the list of nodes (all or faulty), but `node_check_interval` specifies the amount of time has passed since the last check for the particular node |
-| check_all_nodes | `false` | Whether to perform a health check against all nodes or just faulty ones. |
-
-### Failover and retry {#failover-and-retry}
-
-Java client provides configuration options to set up failover and retry behavior for failed queries:
-
-| Property | Default | Description |
-|-------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| failover | `0` | Maximum number of times a failover can happen for a request. Zero or a negative value means no failover. Failover sends the failed request to a different node (according to the load-balancing policy) in order to recover from failover. |
-| retry | `0` | Maximum number of times retry can happen for a request. Zero or a negative value means no retry. Retry sends a request to the same node and only if the ClickHouse server returns the `NETWORK_ERROR` error code |
-| repeat_on_session_lock | `true` | Whether to repeat execution when the session is locked until timed out(according to `session_timeout` or `connect_timeout`). The failed request is repeated if the ClickHouse server returns the `SESSION_IS_LOCKED` error code |
-
-### Adding custom http headers {#adding-custom-http-headers}
-
-Java client support HTTP/S transport layer in case we want to add custom HTTP headers to the request.
-We should use the custom_http_headers property, and the headers need to be `,` separated. The header key/value should be divided using `=`
-
-## Java Client support {#java-client-support}
-
-```java
-options.put("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test");
-```
-
-## JDBC Driver {#jdbc-driver}
-
-```java
-properties.setProperty("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test");
-```
diff --git a/docs/integrations/language-clients/java/client/_snippets/_v0_8.mdx b/docs/integrations/language-clients/java/client/_snippets/_v0_8.mdx
deleted file mode 100644
index 7ddf0d2b6b7..00000000000
--- a/docs/integrations/language-clients/java/client/_snippets/_v0_8.mdx
+++ /dev/null
@@ -1,783 +0,0 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-import WideTableWrapper from '@site/src/components/WideTableWrapper/WideTableWrapper';
-
-Java client library to communicate with a DB server through its protocols. The current implementation only supports the [HTTP interface](/interfaces/http).
-The library provides its own API to send requests to a server. The library also provides tools to work with different binary data formats (RowBinary* & Native*).
-
-## Setup {#setup}
-
-- Maven Central (project web page): https://mvnrepository.com/artifact/com.clickhouse/client-v2
-- Nightly builds (repository link): https://central.sonatype.com/repository/maven-snapshots/
-- Old Nightly builds artifactory (repository link): https://s01.oss.sonatype.org/content/repositories/snapshots/
-
-
-
-
-```xml
-
- com.clickhouse
- client-v2
- 0.9.1
-
-```
-
-
-
-
-```kotlin
-// https://mvnrepository.com/artifact/com.clickhouse/client-v2
-implementation("com.clickhouse:client-v2:0.9.1")
-```
-
-
-
-```groovy
-// https://mvnrepository.com/artifact/com.clickhouse/client-v2
-implementation 'com.clickhouse:client-v2:0.9.1'
-```
-
-
-
-
-## Initialization {#initialization}
-
-The Client object is initialized by `com.clickhouse.client.api.Client.Builder#build()`. Each client has its own context and no objects are shared between them.
-The Builder has configuration methods for convenient setup.
-
-Example:
-```java showLineNumbers
- Client client = new Client.Builder()
- .addEndpoint("https://clickhouse-cloud-instance:8443/")
- .setUsername(user)
- .setPassword(password)
- .build();
-```
-
-`Client` is `AutoCloseable` and should be closed when not needed anymore.
-
-### Authentication {#authentication}
-
-Authentication is configured per client at the initialization phase. There are three authentication methods supported: by password, by access token, by SSL Client Certificate.
-
-Authentication by a password requires setting user name password by calling `setUsername(String)` and `setPassword(String)`:
-```java showLineNumbers
- Client client = new Client.Builder()
- .addEndpoint("https://clickhouse-cloud-instance:8443/")
- .setUsername(user)
- .setPassword(password)
- .build();
-```
-
-Authentication by an access token requires setting access token by calling `setAccessToken(String)`:
-```java showLineNumbers
- Client client = new Client.Builder()
- .addEndpoint("https://clickhouse-cloud-instance:8443/")
- .setAccessToken(userAccessToken)
- .build();
-```
-
-Authentication by a SSL Client Certificate require setting username, enabling SSL Authentication, setting a client certificate and a client key by calling `setUsername(String)`, `useSSLAuthentication(boolean)`, `setClientCertificate(String)` and `setClientKey(String)` accordingly:
-```java showLineNumbers
-Client client = new Client.Builder()
- .useSSLAuthentication(true)
- .setUsername("some_user")
- .setClientCertificate("some_user.crt")
- .setClientKey("some_user.key")
-```
-
-:::note
-SSL Authentication may be hard to troubleshoot on production because many errors from SSL libraries provide not enough information. For example, if client certificate and key do not match then server will terminate connection immediately (in case of HTTP it will be connection initiation stage where no HTTP requests are send so no response is sent).
-
-Please use tools like [openssl](https://docs.openssl.org/master/man1/openssl/) to verify certificates and keys:
-- check key integrity: `openssl rsa -in [key-file.key] -check -noout`
-- check client certificate has matching CN for a user:
- - get CN from an user certificate - `openssl x509 -noout -subject -in [user.cert]`
- - verify same value is set in database `select name, auth_type, auth_params from system.users where auth_type = 'ssl_certificate'` (query will output `auth_params` with something like ` {"common_names":["some_user"]}`)
-
-:::
-
-## Configuration {#configuration}
-
-All settings are defined by instance methods (a.k.a configuration methods) that make the scope and context of each value clear.
-Major configuration parameters are defined in one scope (client or operation) and do not override each other.
-
-Configuration is defined during client creation. See `com.clickhouse.client.api.Client.Builder`.
-
-## Client Configuration {#client-configuration}
-
-| Configuration Method | Arguments | Description |
-|-----------------------|-----------------------------|:--------------------------------------------|
-| `addEndpoint(String endpoint)` | - `endpoint` - URL formatted a server address. | Adds a server endpoint to list of available servers. Currently only one endpoint is supported.
Default: `none`
Enum: `none`
Key: `none` |
-| `addEndpoint(Protocol protocol, String host, int port, boolean secure)` | - `protocol` - connection protocol `com.clickhouse.client.api.enums.Protocol#HTTP`.
- `host` - IP or hostname of a server.
- `secure` - if communication should use secure version of the protocol (HTTPS) | Adds a server endpoint to list of available servers. Currently only one endpoint is supported.
Default: `none`
Enum: `none`
Key: `none` |
-| `setOption(String key, String value)` | - `key` - String key of the client configuration option.
- `value` - String value of the option | Sets raw value of client options. Useful when reading configuration from properties files. |
-| `setUsername(String username)` | - `username` - User's username to use while authentication | Sets username for an authentication method that is selected by further configuration
Default: `default`
Enum: `ClientConfigProperties.USER`
Key: `user` |
-| `setPassword(String password)` | - `password` - secret value for password authentication | Sets a secret for password authentication and effectively selects as authentication method
Default: -
Enum: `ClientConfigProperties.PASSWORD`
Key: `password` |
-| `setAccessToken(String accessToken)` | - `accessToken` - String representation of an access token | Sets an access token to authenticate with a sets corresponding authentication method
Default: -
Enum: `ClientConfigProperties.ACCESS_TOKEN`
Key: `access_token` |
-| `useSSLAuthentication(boolean useSSLAuthentication)` | - `useSSLAuthentication` - flag that indicates if SSL auth should be used | Sets SSL Client Certificate as an authentication method.
Default: -
Enum: `ClientConfigProperties.SSL_AUTH`
Key: `ssl_authentication` |
-| `enableConnectionPool(boolean enable)` | - `enable` - flag that indicates if the option should be enabled | Sets if a connection pool is enabled
Default: `true`
Enum: `ClientConfigProperties.CONNECTION_POOL_ENABLED`
Key: `connection_pool_enabled` |
-| `setConnectTimeout(long timeout, ChronoUnit unit)` | - `timeout` - timeout in some time unit.
- `unit` - time unit of the `timeout` | Sets connection initiation timeout for any outgoing connection. This affects time wait on getting socket connect.
Default: -
Enum: `ClientConfigProperties.CONNECTION_TIMEOUT`
Key: `connection_timeout`|
-| `setConnectionRequestTimeout(long timeout, ChronoUnit unit)` | - `timeout` - timeout in some time unit.
- `unit` - time unit of the `timeout` | Sets connection request timeout. This take effect only for getting connection from a pool.
Default: `10000`
Enum: `ClientConfigProperties.CONNECTION_REQUEST_TIMEOUT`
Key: `connection_request_timeout` |
-| `setMaxConnections(int maxConnections)` | - `maxConnections` - number of connections | Sets how many connections can a client open to each server endpoint.
Default: `10`
Enum: `ClientConfigProperties.HTTP_MAX_OPEN_CONNECTIONS`
Key: `max_open_connections` |
-| `setConnectionTTL(long timeout, ChronoUnit unit)` | - `timeout` - timeout in some time unit.
- `unit` - time unit of the `timeout` | Sets connection TTL after which connection will be considered as not active
Default: `-1`
Enum: `ClientConfigProperties.CONNECTION_TTL`
Key: `connection_ttl`|
-| `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | - `timeout` - timeout in some time unit.
- `unit` - time unit of the `timeout` | Sets HTTP connection keep-alive timeout. This option may be used to disable Keep-Alive by setting timeout to zero - `0`
Default: -
Enum: `ClientConfigProperties.HTTP_KEEP_ALIVE_TIMEOUT`
Key: `http_keep_alive_timeout` |
-| `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | - `strategy` - enum `com.clickhouse.client.api.ConnectionReuseStrategy` constant | Selects which strategy connection pool should use: `LIFO` if connection should be reused as soon as they are returned to a pool or `FIFO` to use connection in the order they become available (returned connection are not used immediately).
Default: `FIFO`
Enum: `ClientConfigProperties.CONNECTION_REUSE_STRATEGY`
Key: `connection_reuse_strategy`|
-| `setSocketTimeout(long timeout, ChronoUnit unit`)` | - `timeout` - timeout in some time unit.
- `unit` - time unit of the `timeout` | Sets socket timeout that affects read and write operations
Default: `0`
Enum: `ClientConfigProperties.SOCKET_OPERATION_TIMEOUT`
Key: `socket_timeout`|
-| `setSocketRcvbuf(long size)` | - `size` - size in bytes | Sets TCP socket receive buffer. This buffer out of the JVM memory.
Default: `8196`
Enum: `ClientConfigProperties.SOCKET_RCVBUF_OPT`
Key: `socket_rcvbuf` |
-| `setSocketSndbuf(long size)` | - `size` - size in bytes | Sets TCP socket receive buffer. This buffer out of the JVM memory.
Default: `8196`
Enum: `ClientConfigProperties.SOCKET_SNDBUF_OPT`
Key: `socket_sndbuf` |
-| `setSocketKeepAlive(boolean value)` | - `value` - flag that indicates if option should be enabled. | Sets option `SO_KEEPALIVE` for every TCP socket created by the client. TCP Keep Alive enables mechanism that will check liveness of the connection and will help to detect abruptly terminated ones.
Default: -
Enum: `ClientConfigProperties.SOCKET_KEEPALIVE_OPT`
Key: `socket_keepalive` |
-| `setSocketTcpNodelay(boolean value)` | - `value` - flag that indicates if option should be enabled. | Sets option `SO_NODELAY` for every TCP socket created by the client. This TCP option will make socket to push data as soon as possible.
Default: -
Enum: `ClientConfigProperties.SOCKET_TCP_NO_DELAY_OPT`
Key: `socket_tcp_nodelay`|
-| `setSocketLinger(int secondsToWait)` | - `secondsToWait` - number of seconds. | Set linger time for every TCP socket created by the client.
Default: -
Enum: `ClientConfigProperties.SOCKET_LINGER_OPT`
Key: `socket_linger`|
-| `compressServerResponse(boolean enabled)` | - `enabled` - flag that indicates if the option should be enabled | Sets if server should compress its responses.
Default: `true`
Enum: `ClientConfigProperties.COMPRESS_SERVER_RESPONSE`
Key: `compress` |
-| `compressClientRequest(boolean enabled)` | - `enabled` - flag that indicates if the option should be enabled | Sets if client should compress its requests.
Default: `false`
Enum: `ClientConfigProperties.COMPRESS_CLIENT_REQUEST`
Key: `decompress` |
-| `useHttpCompression(boolean enabled)` | - `enabled` - flag that indicates if the option should be enabled | Sets if HTTP compression should be used for client/server communications if corresponding options are enabled |
-| `appCompressedData(boolean enabled)` | - `enabled` - flag that indicates if the option should be enabled | Tell client that compression will be handled by application.
Default: `false`
Enum: `ClientConfigProperties.APP_COMPRESSED_DATA`
Key: `app_compressed_data` |
-| `setLZ4UncompressedBufferSize(int size)` | - `size` - size in bytes | Sets size of a buffer that will receive uncompressed portion of a data stream. If buffer is underestimated - a new one will be created and corresponding warning will be present in logs.
Default: `65536`
Enum: `ClientConfigProperties.COMPRESSION_LZ4_UNCOMPRESSED_BUF_SIZE`
Key: `compression.lz4.uncompressed_buffer_size`|
-| `disableNativeCompression` | - `disable` - flag that indicates if the option should be disabled | Disable native compression. If set to true then native compression will be disabled.
Default: `false`
Enum: `ClientConfigProperties.DISABLE_NATIVE_COMPRESSION`
Key: `disable_native_compression` |
-| `setDefaultDatabase(String database)` | - `database` - name of a database | Sets default database.
Default: `default`
Enum: `ClientConfigProperties.DATABASE`
Key: `database` |
-| `addProxy(ProxyType type, String host, int port)` | - `type` - proxy type.
- `host` - proxy host name or IP Address.
- `port` - proxy port | Sets proxy to be used for communication with a server. Setting proxy is required if proxy requires authentication.
Default: -
Enum: `ClientConfigProperties.PROXY_TYPE`
Key: `proxy_type`
Default: -
Enum: `ClientConfigProperties.PROXY_HOST`
Key: `proxy_host`
Default: -
Enum: `ClientConfigProperties.PROXY_PORT`
Key: `proxy_port` |
-| `setProxyCredentials(String user, String pass)` | - `user` - proxy username.
- `pass` - password | Sets user credentials to authenticate with a proxy.
Default: -
Enum: `ClientConfigProperties.PROXY_USER`
Key: `proxy_user`
Default: -
Enum: `ClientConfigProperties.PROXY_PASSWORD`
Key: `proxy_password` |
-| `setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | - `timeout` - timeout in some time unit.
- `timeUnit` - time unit of the `timeout` | Sets maximum execution timeout for queries
Default: `0`
Enum: `ClientConfigProperties.MAX_EXECUTION_TIME`
Key: `max_execution_time`|
-| `setHttpCookiesEnabled(boolean enabled)` | `enabled` - flag that indicates if the option should be enabled | Set if HTTP cookies should be remembered and sent to server back. |
-| `setSSLTrustStore(String path)` | `path` - file path on local (client side) system | Sets if client should use SSL truststore for server host validation.
Default: -
Enum: `ClientConfigProperties.SSL_TRUST_STORE`
Key: `trust_store` |
-| `setSSLTrustStorePassword(String password)` | `password` - secret value | Sets password to be used to unlock SSL truststore specified by `setSSLTrustStore(String path)`
Default: -
Enum: `ClientConfigProperties.SSL_KEY_STORE_PASSWORD`
Key: `key_store_password` |
-| `setSSLTrustStoreType(String type)` | `type` - truststore type name | Sets type of the truststore specified by `setSSLTrustStore(String path)`.
Default: -
Enum: `ClientConfigProperties.SSL_KEYSTORE_TYPE`
Key: `key_store_type` |
-| `setRootCertificate(String path)` | `path` - file path on local (client side) system | Sets if client should use specified root (CA) certificate for server host to validation.
Default: -
Enum: `ClientConfigProperties.CA_CERTIFICATE`
Key: `sslrootcert` |
-| `setClientCertificate(String path)` | `path` - file path on local (client side) system | Sets client certificate path to be used while initiating SSL connection and to be used by SSL authentication.
Default: -
Enum: `ClientConfigProperties.SSL_CERTIFICATE`
Key: `sslcert`|
-| `setClientKey(String path)` | `path` - file path on local (client side) system | Sets client private key to be used for encrypting SSL communication with a server.
Default: -
Enum: `ClientConfigProperties.SSL_KEY`
Key: `ssl_key`|
-| `useServerTimeZone(boolean useServerTimeZone)` | `useServerTimeZone` - flag that indicates if the option should be enabled | Sets if client should use server timezone when decoding DateTime and Date column values. If enabled then server timezone should be set by `setServerTimeZone(String timeZone)`
Default: `true`
Enum: `ClientConfigProperties.USE_SERVER_TIMEZONE`
Key: `use_server_time_zone`|
-| `useTimeZone(String timeZone)` | `timeZone` - string value of java valid timezone ID (see `java.time.ZoneId`) | Sets if specified timezone should be used when decoding DateTime and Date column values. Will override server timezone.
Default: -
Enum: `ClientConfigProperties.USE_TIMEZONE`
Key: `use_time_zone` |
-| `setServerTimeZone(String timeZone)` | `timeZone` - string value of java valid timezone ID (see `java.time.ZoneId`) | Sets server side timezone. UTC timezone will be used by default.
Default: `UTC`
Enum: `ClientConfigProperties.SERVER_TIMEZONE`
Key: `server_time_zone` |
-| `useAsyncRequests(boolean async)` | `async` - flag that indicates if the option should be enabled. | Sets if client should execute request in a separate thread. Disabled by default because application knows better how to organize multi-threaded tasks and running tasks in separate thread do not help with performance.
Default: `false`
Enum: `ClientConfigProperties.ASYNC_OPERATIONS`
Key: `async` |
-| `setSharedOperationExecutor(ExecutorService executorService)` | `executorService` - instance of executor service. | Sets executor service for operation tasks.
Default: `none`
Enum: `none`
Key: `none`|
-| `setClientNetworkBufferSize(int size)` | - `size` - size in bytes | Sets size of a buffer in application memory space that is used to copy data back-and-forth between socket and application. Greater reduces system calls to TCP stack, but affects how much memory is spent on every connection. This buffer is also subject for GC because connections are shortlive. Also keep in mind that allocating big continuous block of memory might be a problem.
Default: `300000`
Enum: `ClientConfigProperties.CLIENT_NETWORK_BUFFER_SIZE`
Key: `client_network_buffer_size`|
-| `retryOnFailures(ClientFaultCause ...causes)` | - `causes` - enum constant of `com.clickhouse.client.api.ClientFaultCause` | Sets recoverable/retriable fault types.
Default: `NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout`
Enum: `ClientConfigProperties.CLIENT_RETRY_ON_FAILURE`
Key: `client_retry_on_failures` |
-| `setMaxRetries(int maxRetries)` | - `maxRetries` - number of retries | Sets maximum number of retries for failures defined by `retryOnFailures(ClientFaultCause ...causes)`
Default: `3`
Enum: `ClientConfigProperties.RETRY_ON_FAILURE`
Key: `retry` |
-| `allowBinaryReaderToReuseBuffers(boolean reuse)` | - `reuse` - flag that indicates if the option should be enabled | Most datasets contain numeric data encoded as small byte sequences. By default reader will allocate required buffer, read data into it and then transform into a target Number class. That may cause significant GC preasure because of many small objects are being allocated and released. If this option is enabled then reader will use preallocated buffers to do numbers transcoding. It is safe because each reader has own set of buffers and readers are used by one thread. |
-| `httpHeader(String key, String value)` | - `key` - HTTP header key.
- `value` - string value of the header. | Sets value for a single HTTP header. Previous value is overridden.
Default: `none`
Enum: `none`
Key: `none` |
-| `httpHeader(String key, Collection values)` | - `key` - HTTP header key.
- `values` - list of string values. | Sets values for a single HTTP header. Previous value is overridden.
Default: `none`
Enum: `none`
Key: `none` |
-| `httpHeaders(Map headers)` | - `header` - map with HTTP headers and their values. | Sets multiple HTTP header values at a time.
Default: `none`
Enum: `none`
Key: `none` |
-| `serverSetting(String name, String value)` | - `name` - name of a query level setting.
- `value` - string value of the setting. | Sets what settings to pass to server along with each query. Individual operation settings may override it. The [List of settings](/operations/settings/query-level)
Default: `none`
Enum: `none`
Key: `none` |
-| `serverSetting(String name, Collection values)` | - `name` - name of a query level setting.
- `values` - string values of the setting. |Sets what settings to pass to server along with each query. Individual operation settings may override it. The [List of settings](/operations/settings/query-level). This method is useful to set settings with multiple values, for example [roles](/interfaces/http#setting-role-with-query-parameters)
Default: `none`
Enum: `none`
Key: `none` |
-| `columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)` | - `strategy` - implementation of a column-field matching strategy | Sets custom strategy to be used for matching DTO class fields and DB columns when registering DTO.
Default: `none`
Enum: `none`
Key: `none` |
-| `useHTTPBasicAuth(boolean useBasicAuth)` | - `useBasicAuth` - flag that indicates if the option should be enabled | Sets if basic HTTP authentication should be used for user-password authentication. Default is enabled. Using this type of authentication resolves issues with passwords containing special characters that cannot be transferred over HTTP headers.
Default: `true`
Enum: `ClientConfigProperties.HTTP_USE_BASIC_AUTH`
Key: `http_use_basic_auth` |
-| `setClientName(String clientName)` | - `clientName` - a string representing application name | Sets additional information about calling application. This string will be passed to server as a client name. In case of HTTP protocol it will be passed as a `User-Agent` header.
Default: -
Enum: `ClientConfigProperties.CLIENT_NAME`
Key: `client_name`|
-| `useBearerTokenAuth(String bearerToken)` | - `bearerToken` - an encoded bearer token | Specifies whether to use Bearer Authentication and what token to use. The token will be sent as is, so it should be encoded before passing to this method.
Default: -
Enum: `ClientConfigProperties.BEARERTOKEN_AUTH`
Key: `bearer_token` |
-| `registerClientMetrics(Object registry, String name)` | - `registry` - Micrometer registry instance
- `name` - metrics group name | Registers sensors with Micrometer (https://micrometer.io/) registry instance. |
-| `setServerVersion(String version)` | - `version` - string value of a server version | Sets server version to avoid version detection.
Default: -
Enum: `ClientConfigProperties.SERVER_VERSION`
Key: `server_version` |
-| `typeHintMapping(Map typeHintMapping)` | - `typeHintMapping` - map of type hints | Sets type hint mapping for ClickHouse types. For example, to make multidimesional arrays be present as Java containers instead of own Array objects.
Default: -
Enum: `ClientConfigProperties.TYPE_HINT_MAPPING`
Key: `type_hint_mapping` |
-| `sslSocketSNI(String sni)` | - `sni` - string value of a server name | Sets server name to be used for SNI (Server Name Indication) in SSL/TLS connection.
Default: -
Enum: `ClientConfigProperties.SSL_SOCKET_SNI`
Key: `ssl_socket_sni` |
-
-
-### Server Settings
-
-Server side settings can be set on the client level once while creation (see `serverSetting` method of the `Builder`) and on operation level (see `serverSetting` for operation settings class).
-
-```java showLineNumbers
- try (Client client = new Client.Builder().addEndpoint(Protocol.HTTP, "localhost", mockServer.port(), false)
- .setUsername("default")
- .setPassword(ClickHouseServerForTest.getPassword())
- .compressClientRequest(true)
-
- // Client level
- .serverSetting("max_threads", "10")
- .serverSetting("async_insert", "1")
- .serverSetting("roles", Arrays.asList("role1", "role2"))
-
- .build()) {
-
- // Operation level
- QuerySettings querySettings = new QuerySettings();
- querySettings.serverSetting("session_timezone", "Europe/Zurich");
-
- ...
-}
-```
-When options are set via `setOption` method (either the `Client.Builder` or operation settings class) then server settings name should be prefixed with `clickhouse_setting_`. The `com.clickhouse.client.api.ClientConfigProperties#serverSetting()` may be handy in this case.
-
-### Custom HTTP Header
-
-Custom HTTP headers can be set for all operations (client level) or a single one (operation level).
-```java showLineNumbers
-
-QuerySettings settings = new QuerySettings()
- .httpHeader(HttpHeaders.REFERER, clientReferer)
- .setQueryId(qId);
-
-```
-
-When options are set via `setOption` method (either the `Client.Builder` or operation settings class) then custom header name should be prefixed with `http_header_`. Method `com.clickhouse.client.api.ClientConfigProperties#httpHeader()` may be handy in this case.
-
-## Common Definitions {#common-definitions}
-
-### ClickHouseFormat {#clickhouseformat}
-
-Enum of [supported formats](/interfaces/formats). It includes all formats that ClickHouse supports.
-
-* `raw` - user should transcode raw data
-* `full` - the client can transcode data by itself and accepts a raw data stream
-* `-` - operation not supported by ClickHouse for this format
-
-This client version supports:
-
-| Format | Input | Output |
-|-------------------------------------------------------------------------------------------------------------------------------|:------:|:-------:|
-| [TabSeparated](/interfaces/formats/TabSeparated) | raw | raw |
-| [TabSeparatedRaw](/interfaces/formats/TabSeparatedRaw) | raw | raw |
-| [TabSeparatedWithNames](/interfaces/formats/TabSeparatedWithNames) | raw | raw |
-| [TabSeparatedWithNamesAndTypes](/interfaces/formats/TabSeparatedWithNamesAndTypes) | raw | raw |
-| [TabSeparatedRawWithNames](/interfaces/formats/TabSeparatedRawWithNames) | raw | raw |
-| [TabSeparatedRawWithNamesAndTypes](/interfaces/formats/TabSeparatedRawWithNamesAndTypes) | raw | raw |
-| [Template](/interfaces/formats/Template) | raw | raw |
-| [TemplateIgnoreSpaces](/interfaces/formats/TemplateIgnoreSpaces) | raw | - |
-| [CSV](/interfaces/formats/CSV) | raw | raw |
-| [CSVWithNames](/interfaces/formats/CSVWithNames) | raw | raw |
-| [CSVWithNamesAndTypes](/interfaces/formats/CSVWithNamesAndTypes) | raw | raw |
-| [CustomSeparated](/interfaces/formats/CustomSeparated) | raw | raw |
-| [CustomSeparatedWithNames](/interfaces/formats/CustomSeparatedWithNames) | raw | raw |
-| [CustomSeparatedWithNamesAndTypes](/interfaces/formats/CustomSeparatedWithNamesAndTypes) | raw | raw |
-| [SQLInsert](/interfaces/formats/SQLInsert) | - | raw |
-| [Values](/interfaces/formats/Values) | raw | raw |
-| [Vertical](/interfaces/formats/Vertical) | - | raw |
-| [JSON](/interfaces/formats/JSON) | raw | raw |
-| [JSONAsString](/interfaces/formats/JSONAsString) | raw | - |
-| [JSONAsObject](/interfaces/formats/JSONAsObject) | raw | - |
-| [JSONStrings](/interfaces/formats/JSONStrings) | raw | raw |
-| [JSONColumns](/interfaces/formats/JSONColumns) | raw | raw |
-| [JSONColumnsWithMetadata](/interfaces/formats/JSONColumnsWithMetadata) | raw | raw |
-| [JSONCompact](/interfaces/formats/JSONCompact) | raw | raw |
-| [JSONCompactStrings](/interfaces/formats/JSONCompactStrings) | - | raw |
-| [JSONCompactColumns](/interfaces/formats/JSONCompactColumns) | raw | raw |
-| [JSONEachRow](/interfaces/formats/JSONEachRow) | raw | raw |
-| [PrettyJSONEachRow](/interfaces/formats/PrettyJSONEachRow) | - | raw |
-| [JSONEachRowWithProgress](/interfaces/formats/JSONEachRowWithProgress) | - | raw |
-| [JSONStringsEachRow](/interfaces/formats/JSONStringsEachRow) | raw | raw |
-| [JSONStringsEachRowWithProgress](/interfaces/formats/JSONStringsEachRowWithProgress) | - | raw |
-| [JSONCompactEachRow](/interfaces/formats/JSONCompactEachRow) | raw | raw |
-| [JSONCompactEachRowWithNames](/interfaces/formats/JSONCompactEachRowWithNames) | raw | raw |
-| [JSONCompactEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactEachRowWithNamesAndTypes) | raw | raw |
-| [JSONCompactStringsEachRow](/interfaces/formats/JSONCompactStringsEachRow) | raw | raw |
-| [JSONCompactStringsEachRowWithNames](/interfaces/formats/JSONCompactStringsEachRowWithNames) | raw | raw |
-| [JSONCompactStringsEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactStringsEachRowWithNamesAndTypes) | raw | raw |
-| [JSONObjectEachRow](/interfaces/formats/JSONObjectEachRow) | raw | raw |
-| [BSONEachRow](/interfaces/formats/BSONEachRow) | raw | raw |
-| [TSKV](/interfaces/formats/TSKV) | raw | raw |
-| [Pretty](/interfaces/formats/Pretty) | - | raw |
-| [PrettyNoEscapes](/interfaces/formats/PrettyNoEscapes) | - | raw |
-| [PrettyMonoBlock](/interfaces/formats/PrettyMonoBlock) | - | raw |
-| [PrettyNoEscapesMonoBlock](/interfaces/formats/PrettyNoEscapesMonoBlock) | - | raw |
-| [PrettyCompact](/interfaces/formats/PrettyCompact) | - | raw |
-| [PrettyCompactNoEscapes](/interfaces/formats/PrettyCompactNoEscapes) | - | raw |
-| [PrettyCompactMonoBlock](/interfaces/formats/PrettyCompactMonoBlock) | - | raw |
-| [PrettyCompactNoEscapesMonoBlock](/interfaces/formats/PrettyCompactNoEscapesMonoBlock) | - | raw |
-| [PrettySpace](/interfaces/formats/PrettySpace) | - | raw |
-| [PrettySpaceNoEscapes](/interfaces/formats/PrettySpaceNoEscapes) | - | raw |
-| [PrettySpaceMonoBlock](/interfaces/formats/PrettySpaceMonoBlock) | - | raw |
-| [PrettySpaceNoEscapesMonoBlock](/interfaces/formats/PrettySpaceNoEscapesMonoBlock) | - | raw |
-| [Prometheus](/interfaces/formats/Prometheus) | - | raw |
-| [Protobuf](/interfaces/formats/Protobuf) | raw | raw |
-| [ProtobufSingle](/interfaces/formats/ProtobufSingle) | raw | raw |
-| [ProtobufList](/interfaces/formats/ProtobufList) | raw | raw |
-| [Avro](/interfaces/formats/Avro) | raw | raw |
-| [AvroConfluent](/interfaces/formats/AvroConfluent) | raw | - |
-| [Parquet](/interfaces/formats/Parquet) | raw | raw |
-| [ParquetMetadata](/interfaces/formats/ParquetMetadata) | raw | - |
-| [Arrow](/interfaces/formats/Arrow) | raw | raw |
-| [ArrowStream](/interfaces/formats/ArrowStream) | raw | raw |
-| [ORC](/interfaces/formats/ORC) | raw | raw |
-| [One](/interfaces/formats/One) | raw | - |
-| [Npy](/interfaces/formats/Npy) | raw | raw |
-| [RowBinary](/interfaces/formats/RowBinary) | full | full |
-| [RowBinaryWithNames](/interfaces/formats/RowBinaryWithNamesAndTypes) | full | full |
-| [RowBinaryWithNamesAndTypes](/interfaces/formats/RowBinaryWithNamesAndTypes) | full | full |
-| [RowBinaryWithDefaults](/interfaces/formats/RowBinaryWithDefaults) | full | - |
-| [Native](/interfaces/formats/Native) | full | raw |
-| [Null](/interfaces/formats/Null) | - | raw |
-| [XML](/interfaces/formats/XML) | - | raw |
-| [CapnProto](/interfaces/formats/CapnProto) | raw | raw |
-| [LineAsString](/interfaces/formats/LineAsString) | raw | raw |
-| [Regexp](/interfaces/formats/Regexp) | raw | - |
-| [RawBLOB](/interfaces/formats/RawBLOB) | raw | raw |
-| [MsgPack](/interfaces/formats/MsgPack) | raw | raw |
-| [MySQLDump](/interfaces/formats/MySQLDump) | raw | - |
-| [DWARF](/interfaces/formats/DWARF) | raw | - |
-| [Markdown](/interfaces/formats/Markdown) | - | raw |
-| [Form](/interfaces/formats/Form) | raw | - |
-
-## Insert API {#insert-api}
-
-### insert(String tableName, InputStream data, ClickHouseFormat format) {#insertstring-tablename-inputstream-data-clickhouseformat-format}
-
-Accepts data as an `InputStream` of bytes in the specified format. It is expected that `data` is encoded in the `format`.
-
-**Signatures**
-
-```java
-CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format, InsertSettings settings)
-CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format)
-```
-
-**Parameters**
-
-`tableName` - a target table name.
-
-`data` - an input stream of an encoded data.
-
-`format` - a format in which the data is encoded.
-
-`settings` - request settings.
-
-**Return value**
-
-Future of `InsertResponse` type - result of the operation and additional information like server side metrics.
-
-**Examples**
-
-```java showLineNumbers
-try (InputStream dataStream = getDataStream()) {
- try (InsertResponse response = client.insert(TABLE_NAME, dataStream, ClickHouseFormat.JSONEachRow,
- insertSettings).get(3, TimeUnit.SECONDS)) {
-
- log.info("Insert finished: {} rows written", response.getMetrics().getMetric(ServerMetrics.NUM_ROWS_WRITTEN).getLong());
- } catch (Exception e) {
- log.error("Failed to write JSONEachRow data", e);
- throw new RuntimeException(e);
- }
-}
-
-```
-
-### insert(String tableName, List<?> data, InsertSettings settings) {#insertstring-tablename-listlt-data-insertsettings-settings}
-
-Sends a write request to database. The list of objects is converted into an efficient format and then is sent to a server. The class of the list items should be registered up-front using `register(Class, TableSchema)` method.
-
-**Signatures**
-```java
-client.insert(String tableName, List> data, InsertSettings settings)
-client.insert(String tableName, List> data)
-```
-
-**Parameters**
-
-`tableName` - name of the target table.
-
-`data` - collection DTO (Data Transfer Object) objects.
-
-`settings` - request settings.
-
-**Return value**
-
-Future of `InsertResponse` type - the result of the operation and additional information like server side metrics.
-
-**Examples**
-
-```java showLineNumbers
-// Important step (done once) - register class to pre-compile object serializer according to the table schema.
-client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME));
-
-List events = loadBatch();
-
-try (InsertResponse response = client.insert(TABLE_NAME, events).get()) {
- // handle response, then it will be closed and connection that served request will be released.
-}
-```
-
-### insert(String tableName, DataStreamWriter writer, ClickHouseFormat format, InsertSettings settings)
-**Beta**
-
-This API method allows to pass a writer object that will encode data directly into an output stream. Data will be compressed by the client.
-There is a configuration option in `InsertSettings` called `appCompressedData` that allows to turn off client compression and let application to send compressed stream.
-Examples shows major usecases this API was designed for.
-
-`com.clickhouse.client.api.DataStreamWriter` is a functional interface with a method `onOutput` that is called by the client when output stream is ready for data to be written. This interface has
-another method `onRetry` with default implementation. This method is called when retry logic is triggered and mainly used to reset data source if applicable.
-
-
-**Signatures**
-```java
-CompletableFuture insert(String tableName, // name of destination table
- DataStreamWriter writer, // data writer instance
- ClickHouseFormat format, // data format in which the writer encodes data
- InsertSettings settings) // operation settings
-```
-
-**Parameters**
-
-`tableName` - name of the target table.
-
-`writer` - data writer instance.
-
-`format` - data format in which the writer encodes data.
-
-`settings` - request settings.
-
-**Return value**
-
-Future of `InsertResponse` type - the result of the operation and additional information like server side metrics.
-
-**Examples**
-
-Writing a collection of JSON objects encoded as string values using `JSONEachRow` format:
-```java showLineNumbers
-
-final int EXECUTE_CMD_TIMEOUT = 10; // seconds
-final String tableName = "events";
-final String tableCreate = "CREATE TABLE \"" + tableName + "\" " +
- " (name String, " +
- " v1 Float32, " +
- " v2 Float32, " +
- " attrs Nullable(String), " +
- " corrected_time DateTime('UTC') DEFAULT now()," +
- " special_attr Nullable(Int8) DEFAULT -1)" +
- " Engine = MergeTree ORDER by ()";
-
-client.execute("DROP TABLE IF EXISTS " + tableName).get(EXECUTE_CMD_TIMEOUT, TimeUnit.SECONDS);
-client.execute(createTableSQL).get(EXECUTE_CMD_TIMEOUT, TimeUnit.SECONDS);
-
-String correctedTime = Instant.now().atZone(ZoneId.of("UTC")).format(DataTypeUtils.DATETIME_FORMATTER);
-String[] rows = new String[] {
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\", \"corrected_time\": \"" + correctedTime + "\", \"special_attr\": 10}",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\", \"corrected_time\": \"" + correctedTime + "\"}",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\" }",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6 }",
-};
-
-
-try (InsertResponse response = client.insert(tableName, out -> {
- // writing raw bytes
- for (String row : rows) {
- out.write(row.getBytes());
- }
-
-}, ClickHouseFormat.JSONEachRow, new InsertSettings()).get()) {
-
- System.out.println("Rows written: " + response.getWrittenRows());
-}
-
-```
-
-Writing already compressed data:
-```java showLineNumbers
-String tableName = "very_long_table_name_with_uuid_" + UUID.randomUUID().toString().replace('-', '_');
-String tableCreate = "CREATE TABLE \"" + tableName + "\" " +
- " (name String, " +
- " v1 Float32, " +
- " v2 Float32, " +
- " attrs Nullable(String), " +
- " corrected_time DateTime('UTC') DEFAULT now()," +
- " special_attr Nullable(Int8) DEFAULT -1)" +
- " Engine = MergeTree ORDER by ()";
-
-client.execute("DROP TABLE IF EXISTS " + tableName).get(EXECUTE_CMD_TIMEOUT, TimeUnit.SECONDS);
-client.execute(createTableSQL).get(EXECUTE_CMD_TIMEOUT, TimeUnit.SECONDS);
-
-String correctedTime = Instant.now().atZone(ZoneId.of("UTC")).format(DataTypeUtils.DATETIME_FORMATTER);
-String[] data = new String[] {
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\", \"corrected_time\": \"" + correctedTime + "\", \"special_attr\": 10}",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\", \"corrected_time\": \"" + correctedTime + "\"}",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6, \"attrs\": \"a=1,b=2,c=5\" }",
- "{ \"name\": \"foo1\", \"v1\": 0.3, \"v2\": 0.6 }",
-};
-
-
-// This step is only for showcase. Real application would have already compressed data.
-byte[][] compressedData = new byte[data.length][];
-for (int i = 0 ; i < data.length; i++) {
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- GZIPOutputStream gz = new GZIPOutputStream(baos);
- gz.write(data[i].getBytes(StandardCharsets.UTF_8));
- gz.finish();
- compressedData[i] = baos.toByteArray();
-}
-
-InsertSettings insertSettings = new InsertSettings()
- .appCompressedData(true, "gzip"); // defining compression algorithm (sent via HTTP headers)
-
-try (InsertResponse response = client.insert(tableName, out -> {
- // Writing data
- for (byte[] row : compressedData) {
- out.write(row);
- }
-}, ClickHouseFormat.JSONEachRow, insertSettings).get()) {
- System.out.println("Rows written: " + response.getWrittenRows());
-}
-
-```
-
-### InsertSettings {#insertsettings}
-
-Configuration options for insert operations.
-
-**Configuration methods**
-
-| Method | Description |
-|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|
-| `setQueryId(String queryId)` | Sets query ID that will be assigned to the operation. Default: `null`. |
-| `setDeduplicationToken(String token)` | Sets the deduplication token. This token will be sent to the server and can be used to identify the query. Default: `null`. |
-| `setInputStreamCopyBufferSize(int size)` | Copy buffer size. The buffer is used during write operations to copy data from user-provided input stream to an output stream. Default: `8196`. |
-| `serverSetting(String name, String value)` | Sets individual server settings for an operation. |
-| `serverSetting(String name, Collection values)` | Sets individual server settings with multiple values for an operation. Items of the collection should be `String` values. |
-| `setDBRoles(Collection dbRoles)` | Sets DB roles to be set before executing an operation. Items of the collection should be `String` values. |
-| `setOption(String option, Object value)` | Sets a configuration option in raw format. This is not a server setting. |
-
-### InsertResponse {#insertresponse}
-
-Response object that holds result of insert operation. It is only available if the client got response from a server.
-
-:::note
-This object should be closed as soon as possible to release a connection because the connection cannot be re-used until all data of previous response is fully read.
-:::
-
-| Method | Description |
-|-----------------------------|------------------------------------------------------------------------------------------------------|
-| `OperationMetrics getMetrics()` | Returns object with operation metrics. |
-| `String getQueryId()` | Returns query ID assigned for the operation by the application (through operation settings or by server). |
-
-## Query API {#query-api}
-
-### query(String sqlQuery) {#querystring-sqlquery}
-
-Sends `sqlQuery` as is. Response format is set by query settings. `QueryResponse` will hold a reference to the response stream that should be consumed by a reader for the supportig format.
-
-**Signatures**
-
-```java
-CompletableFuture query(String sqlQuery, QuerySettings settings)
-CompletableFuture query(String sqlQuery)
-```
-
-**Parameters**
-
-`sqlQuery` - a single SQL statement. The Query is sent as is to a server.
-
-`settings` - request settings.
-
-**Return value**
-
-Future of `QueryResponse` type - a result dataset and additional information like server side metrics. The Response object should be closed after consuming the dataset.
-
-**Examples**
-
-```java
-final String sql = "select * from " + TABLE_NAME + " where title <> '' limit 10";
-
-// Default format is RowBinaryWithNamesAndTypesFormatReader so reader have all information about columns
-try (QueryResponse response = client.query(sql).get(3, TimeUnit.SECONDS);) {
-
- // Create a reader to access the data in a convenient way
- ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response);
-
- while (reader.hasNext()) {
- reader.next(); // Read the next record from stream and parse it
-
- // get values
- double id = reader.getDouble("id");
- String title = reader.getString("title");
- String url = reader.getString("url");
-
- // collecting data
- }
-} catch (Exception e) {
- log.error("Failed to read data", e);
-}
-
-// put business logic outside of the reading block to release http connection asap.
-```
-
-### query(String sqlQuery, Map<String, Object> queryParams, QuerySettings settings) {#querystring-sqlquery-mapltstring-object-queryparams-querysettings-settings}
-
-Sends `sqlQuery` as is. Additionally will send query parameters so the server can compile the SQL expression.
-
-**Signatures**
-```java
-CompletableFuture query(String sqlQuery, Map queryParams, QuerySettings settings)
-```
-
-**Parameters**
-
-`sqlQuery` - sql expression with placeholders `{}`.
-
-`queryParams` - map of variables to complete the sql expression on server.
-
-`settings` - request settings.
-
-**Return value**
-
-Future of `QueryResponse` type - a result dataset and additional information like server side metrics. The Response object should be closed after consuming the dataset.
-
-**Examples**
-
-```java showLineNumbers
-
-// define parameters. They will be sent to the server along with the request.
-Map queryParams = new HashMap<>();
-queryParams.put("param1", 2);
-
-try (QueryResponse response =
- client.query("SELECT * FROM " + table + " WHERE col1 >= {param1:UInt32}", queryParams, new QuerySettings()).get()) {
-
- // Create a reader to access the data in a convenient way
- ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response);
-
- while (reader.hasNext()) {
- reader.next(); // Read the next record from stream and parse it
-
- // reading data
- }
-
-} catch (Exception e) {
- log.error("Failed to read data", e);
-}
-
-```
-
-### queryAll(String sqlQuery) {#queryallstring-sqlquery}
-
-Queries a data in `RowBinaryWithNamesAndTypes` format. Returns the result as a collection. Read performance is the same as with the reader but more memory is required to hold the whole dataset.
-
-**Signatures**
-```java
-List queryAll(String sqlQuery)
-```
-
-**Parameters**
-
-`sqlQuery` - sql expression to query data from a server.
-
-**Return value**
-
-Complete dataset represented by a list of `GenericRecord` objects that provide access in row style for the result data.
-
-**Examples**
-
-```java showLineNumbers
-try {
- log.info("Reading whole table and process record by record");
- final String sql = "select * from " + TABLE_NAME + " where title <> ''";
-
- // Read whole result set and process it record by record
- client.queryAll(sql).forEach(row -> {
- double id = row.getDouble("id");
- String title = row.getString("title");
- String url = row.getString("url");
-
- log.info("id: {}, title: {}, url: {}", id, title, url);
- });
-} catch (Exception e) {
- log.error("Failed to read data", e);
-}
-```
-
-### QuerySettings {#querysettings}
-
-Configuration options for query operations.
-
-**Configuration methods**
-
-| Method | Description |
-|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|
-| `setQueryId(String queryId)` | Sets query ID that will be assigned to the operation. |
-| `setFormat(ClickHouseFormat format)` | Sets response format. See `RowBinaryWithNamesAndTypes` for the full list. |
-| `setMaxExecutionTime(Integer maxExecutionTime)` | Sets operation execution time on server. Will not affect read timeout. |
-| `waitEndOfQuery(Boolean waitEndOfQuery)` | Requests the server to wait for the end of the query before sending a response. |
-| `setUseServerTimeZone(Boolean useServerTimeZone)` | Server timezone (see client config) will be used to parse date/time types in the result of an operation. Default `false`. |
-| `setUseTimeZone(String timeZone)` | Requests server to use `timeZone` for time conversion. See [session_timezone](/operations/settings/settings#session_timezone). |
-| `serverSetting(String name, String value)` | Sets individual server settings for an operation. |
-| `serverSetting(String name, Collection values)` | Sets individual server settings with multiple values for an operation. Items of the collection should be `String` values. |
-| `setDBRoles(Collection dbRoles)` | Sets DB roles to be set before executing an operation. Items of the collection should be `String` values. |
-| `setOption(String option, Object value)` | Sets a configuration option in raw format. This is not a server setting. |
-
-### QueryResponse {#queryresponse}
-
-Response object that holds result of query execution. It is only available if the client got a response from a server.
-
-:::note
-This object should be closed as soon as possible to release a connection because the connection cannot be re-used until all data of previous response is fully read.
-:::
-
-| Method | Description |
-|-------------------------------------|------------------------------------------------------------------------------------------------------|
-| `ClickHouseFormat getFormat()` | Returns a format in which data in the response is encoded. |
-| `InputStream getInputStream()` | Returns uncompressed byte stream of data in the specified format. |
-| `OperationMetrics getMetrics()` | Returns object with operation metrics. |
-| `String getQueryId()` | Returns query ID assigned for the operation by the application (through operation settings or by server). |
-| `TimeZone getTimeZone()` | Returns timezone that should be used for handling Date/DateTime types in the response. |
-
-### Examples {#examples}
-
-- Example code is available in [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2)
-- Reference Spring Service [implementation](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service)
-
-## Common API {#common-api}
-
-### getTableSchema(String table) {#gettableschemastring-table}
-
-Fetches table schema for the `table`.
-
-**Signatures**
-
-```java
-TableSchema getTableSchema(String table)
-TableSchema getTableSchema(String table, String database)
-```
-
-**Parameters**
-
-`table` - table name for which schema data should be fetched.
-
-`database` - database where the target table is defined.
-
-**Return value**
-
-Returns a `TableSchema` object with list of table columns.
-
-### getTableSchemaFromQuery(String sql) {#gettableschemafromquerystring-sql}
-
-Fetches schema from a SQL statement.
-
-**Signatures**
-
-```java
-TableSchema getTableSchemaFromQuery(String sql)
-```
-
-**Parameters**
-
-`sql` - "SELECT" SQL statement which schema should be returned.
-
-**Return value**
-
-Returns a `TableSchema` object with columns matching the `sql` expression.
-
-### TableSchema {#tableschema}
-
-### register(Class<?> clazz, TableSchema schema) {#registerclasslt-clazz-tableschema-schema}
-
-Compiles serialization and deserialization layer for the Java Class to use for writing/reading data with `schema`. The method will create a serializer and deserializer for the pair getter/setter and corresponding column.
-Column match is found by extracting its name from a method name. For example, `getFirstName` will be for the column `first_name` or `firstname`.
-
-**Signatures**
-
-```java
-void register(Class> clazz, TableSchema schema)
-```
-
-**Parameters**
-
-`clazz` - Class representing the POJO used to read/write data.
-
-`schema` - Data schema to use for matching with POJO properties.
-
-**Examples**
-
-```java showLineNumbers
-client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME));
-```
-
-## Usage Examples {#usage-examples}
-
-Complete examples code is stored in the repo in a 'example` [folder](https://github.com/ClickHouse/clickhouse-java/tree/main/examples):
-
-- [client-v2](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2) - main set of examples.
-- [demo-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service) - example of how to use the client in a Spring Boot application.
-- [demo-kotlin-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-kotlin-service) - example of how to use the client in Ktor (Kotlin) application.
diff --git a/docs/integrations/language-clients/java/client/client.mdx b/docs/integrations/language-clients/java/client/client.mdx
index 2dd48bab795..705e9d4545f 100644
--- a/docs/integrations/language-clients/java/client/client.mdx
+++ b/docs/integrations/language-clients/java/client/client.mdx
@@ -9,16 +9,1328 @@ doc_type: 'reference'
---
import ClientVersionDropdown from '@theme/ClientVersionDropdown/ClientVersionDropdown';
-import v07 from './_snippets/_v0_7.mdx'
-import v08 from './_snippets/_v0_8.mdx'
+import Version from '@theme/ClientVersionDropdown/Version';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import WideTableWrapper from '@site/src/components/WideTableWrapper/WideTableWrapper';
+]}>
+
+
+
+Java client library to communicate with a DB server through its protocols. The current implementation only supports the [HTTP interface](/interfaces/http).
+The library provides its own API to send requests to a server. The library also provides tools to work with different binary data formats (RowBinary* & Native*).
+
+## Setup {#setup}
+
+- Maven Central (project web page): https://mvnrepository.com/artifact/com.clickhouse/client-v2
+- Nightly builds (repository link): https://central.sonatype.com/repository/maven-snapshots/
+- Old Nightly builds artifactory (repository link): https://s01.oss.sonatype.org/content/repositories/snapshots/
+
+
+
+
+```xml
+
+ com.clickhouse
+ client-v2
+ 0.9.4
+
+```
+
+
+
+
+```kotlin
+// https://mvnrepository.com/artifact/com.clickhouse/client-v2
+implementation("com.clickhouse:client-v2:0.9.4")
+```
+
+
+
+```groovy
+// https://mvnrepository.com/artifact/com.clickhouse/client-v2
+implementation 'com.clickhouse:client-v2:0.9.4'
+```
+
+
+
+
+
+## Initialization {#initialization}
+
+The Client object is initialized by `com.clickhouse.client.api.Client.Builder#build()`. Each client has its own context and no objects are shared between them.
+The Builder has configuration methods for convenient setup.
+
+Example:
+```java showLineNumbers
+ Client client = new Client.Builder()
+ .addEndpoint("https://clickhouse-cloud-instance:8443/")
+ .setUsername(user)
+ .setPassword(password)
+ .build();
+```
+
+`Client` is `AutoCloseable` and should be closed when not needed anymore.
+
+### Authentication {#authentication}
+
+Authentication is configured per client at the initialization phase. There are three authentication methods supported: by password, by access token, by SSL Client Certificate.
+
+Authentication by a password requires setting user name password by calling `setUsername(String)` and `setPassword(String)`:
+```java showLineNumbers
+ Client client = new Client.Builder()
+ .addEndpoint("https://clickhouse-cloud-instance:8443/")
+ .setUsername(user)
+ .setPassword(password)
+ .build();
+```
+
+Authentication by an access token requires setting access token by calling `setAccessToken(String)`:
+```java showLineNumbers
+ Client client = new Client.Builder()
+ .addEndpoint("https://clickhouse-cloud-instance:8443/")
+ .setAccessToken(userAccessToken)
+ .build();
+```
+
+Authentication by a SSL Client Certificate require setting username, enabling SSL Authentication, setting a client certificate and a client key by calling `setUsername(String)`, `useSSLAuthentication(boolean)`, `setClientCertificate(String)` and `setClientKey(String)` accordingly:
+```java showLineNumbers
+Client client = new Client.Builder()
+ .useSSLAuthentication(true)
+ .setUsername("some_user")
+ .setClientCertificate("some_user.crt")
+ .setClientKey("some_user.key")
+```
+
+:::note
+SSL Authentication may be hard to troubleshoot on production because many errors from SSL libraries provide not enough information. For example, if client certificate and key do not match then server will terminate connection immediately (in case of HTTP it will be connection initiation stage where no HTTP requests are send so no response is sent).
+
+Please use tools like [openssl](https://docs.openssl.org/master/man1/openssl/) to verify certificates and keys:
+- check key integrity: `openssl rsa -in [key-file.key] -check -noout`
+- check client certificate has matching CN for a user:
+ - get CN from an user certificate - `openssl x509 -noout -subject -in [user.cert]`
+ - verify same value is set in database `select name, auth_type, auth_params from system.users where auth_type = 'ssl_certificate'` (query will output `auth_params` with something like ` {"common_names":["some_user"]}`)
+
+:::
+
+## Configuration {#configuration}
+
+All settings are defined by instance methods (a.k.a configuration methods) that make the scope and context of each value clear.
+Major configuration parameters are defined in one scope (client or operation) and do not override each other.
+
+Configuration is defined during client creation. See `com.clickhouse.client.api.Client.Builder`.
+
+## Client Configuration {#client-configuration}
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `addEndpoint(String endpoint)` | `endpoint` - URL formatted server address | Adds a server endpoint to list of available servers. Currently only one endpoint is supported. | `none` | `none` |
+| `addEndpoint(Protocol protocol, String host, int port, boolean secure)` | `protocol` - connection protocol
`host` - IP or hostname
`secure` - use HTTPS | Adds a server endpoint to list of available servers. Currently only one endpoint is supported. | `none` | `none` |
+| `enableConnectionPool(boolean enable)` | `enable` - flag to enable/disable | Sets if a connection pool is enabled | `true` | `connection_pool_enabled` |
+| `setMaxConnections(int maxConnections)` | `maxConnections` - number of connections | Sets how many connections can a client open to each server endpoint. | `10` | `max_open_connections` |
+| `setConnectionTTL(long timeout, ChronoUnit unit)` | `timeout` - timeout value
`unit` - time unit | Sets connection TTL after which connection will be considered as not active | `-1` | `connection_ttl` |
+| `setKeepAliveTimeout(long timeout, ChronoUnit unit)` | `timeout` - timeout value
`unit` - time unit | Sets HTTP connection keep-alive timeout. Set to `0` to disable Keep-Alive. | - | `http_keep_alive_timeout` |
+| `setConnectionReuseStrategy(ConnectionReuseStrategy strategy)` | `strategy` - `LIFO` or `FIFO` | Selects which strategy connection pool should use | `FIFO` | `connection_reuse_strategy` |
+| `setDefaultDatabase(String database)` | `database` - name of a database | Sets default database. | `default` | `database` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setUsername(String username)` | `username` - username for authentication | Sets username for an authentication method that is selected by further configuration | `default` | `user` |
+| `setPassword(String password)` | `password` - secret value | Sets a secret for password authentication and effectively selects as authentication method | - | `password` |
+| `setAccessToken(String accessToken)` | `accessToken` - access token string | Sets an access token to authenticate with a sets corresponding authentication method | - | `access_token` |
+| `useSSLAuthentication(boolean useSSLAuthentication)` | `useSSLAuthentication` - flag to enable SSL auth | Sets SSL Client Certificate as an authentication method. | - | `ssl_authentication` |
+| `useHTTPBasicAuth(boolean useBasicAuth)` | `useBasicAuth` - flag to enable/disable | Sets if basic HTTP authentication should be used for user-password authentication. Resolves issues with passwords containing special characters. | `true` | `http_use_basic_auth` |
+| `useBearerTokenAuth(String bearerToken)` | `bearerToken` - an encoded bearer token | Specifies whether to use Bearer Authentication and what token to use. The token will be sent as is. | - | `bearer_token` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setConnectTimeout(long timeout, ChronoUnit unit)` | `timeout` - timeout value
`unit` - time unit | Sets connection initiation timeout for any outgoing connection. | - | `connection_timeout` |
+| `setConnectionRequestTimeout(long timeout, ChronoUnit unit)` | `timeout` - timeout value
`unit` - time unit | Sets connection request timeout. This take effect only for getting connection from a pool. | `10000` | `connection_request_timeout` |
+| `setSocketTimeout(long timeout, ChronoUnit unit)` | `timeout` - timeout value
`unit` - time unit | Sets socket timeout that affects read and write operations | `0` | `socket_timeout` |
+| `setExecutionTimeout(long timeout, ChronoUnit timeUnit)` | `timeout` - timeout value
`timeUnit` - time unit | Sets maximum execution timeout for queries | `0` | `max_execution_time` |
+| `retryOnFailures(ClientFaultCause ...causes)` | `causes` - enum constant of `ClientFaultCause` | Sets recoverable/retriable fault types. | `NoHttpResponse,ConnectTimeout,ConnectionRequestTimeout` | `client_retry_on_failures` |
+| `setMaxRetries(int maxRetries)` | `maxRetries` - number of retries | Sets maximum number of retries for failures defined by `retryOnFailures` | `3` | `retry` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setSocketRcvbuf(long size)` | `size` - size in bytes | Sets TCP socket receive buffer. This buffer out of the JVM memory. | `8196` | `socket_rcvbuf` |
+| `setSocketSndbuf(long size)` | `size` - size in bytes | Sets TCP socket send buffer. This buffer out of the JVM memory. | `8196` | `socket_sndbuf` |
+| `setSocketKeepAlive(boolean value)` | `value` - flag to enable/disable | Sets option `SO_KEEPALIVE` for every TCP socket. TCP Keep Alive enables mechanism that will check liveness of the connection. | - | `socket_keepalive` |
+| `setSocketTcpNodelay(boolean value)` | `value` - flag to enable/disable | Sets option `SO_NODELAY` for every TCP socket. This TCP option will make socket to push data as soon as possible. | - | `socket_tcp_nodelay` |
+| `setSocketLinger(int secondsToWait)` | `secondsToWait` - number of seconds | Set linger time for every TCP socket created by the client. | - | `socket_linger` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `compressServerResponse(boolean enabled)` | `enabled` - flag to enable/disable | Sets if server should compress its responses. | `true` | `compress` |
+| `compressClientRequest(boolean enabled)` | `enabled` - flag to enable/disable | Sets if client should compress its requests. | `false` | `decompress` |
+| `useHttpCompression(boolean enabled)` | `enabled` - flag to enable/disable | Sets if HTTP compression should be used for client/server communications if corresponding options are enabled | - | - |
+| `appCompressedData(boolean enabled)` | `enabled` - flag to enable/disable | Tell client that compression will be handled by application. | `false` | `app_compressed_data` |
+| `setLZ4UncompressedBufferSize(int size)` | `size` - size in bytes | Sets size of a buffer that will receive uncompressed portion of a data stream. | `65536` | `compression.lz4.uncompressed_buffer_size` |
+| `disableNativeCompression` | `disable` - flag to disable | Disable native compression. If set to true then native compression will be disabled. | `false` | `disable_native_compression` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setSSLTrustStore(String path)` | `path` - file path on local system | Sets if client should use SSL truststore for server host validation. | - | `trust_store` |
+| `setSSLTrustStorePassword(String password)` | `password` - secret value | Sets password to be used to unlock SSL truststore specified by `setSSLTrustStore` | - | `key_store_password` |
+| `setSSLTrustStoreType(String type)` | `type` - truststore type name | Sets type of the truststore specified by `setSSLTrustStore`. | - | `key_store_type` |
+| `setRootCertificate(String path)` | `path` - file path on local system | Sets if client should use specified root (CA) certificate for server host to validation. | - | `sslrootcert` |
+| `setClientCertificate(String path)` | `path` - file path on local system | Sets client certificate path to be used while initiating SSL connection and to be used by SSL authentication. | - | `sslcert` |
+| `setClientKey(String path)` | `path` - file path on local system | Sets client private key to be used for encrypting SSL communication with a server. | - | `ssl_key` |
+| `sslSocketSNI(String sni)` | `sni` - server name string | Sets server name to be used for SNI (Server Name Indication) in SSL/TLS connection. | - | `ssl_socket_sni` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `addProxy(ProxyType type, String host, int port)` | `type` - proxy type
`host` - proxy hostname or IP
`port` - proxy port | Sets proxy to be used for communication with a server. | - | `proxy_type`, `proxy_host`, `proxy_port` |
+| `setProxyCredentials(String user, String pass)` | `user` - proxy username
`pass` - password | Sets user credentials to authenticate with a proxy. | - | `proxy_user`, `proxy_password` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setHttpCookiesEnabled(boolean enabled)` | `enabled` - flag to enable/disable | Set if HTTP cookies should be remembered and sent to server back. | - | - |
+| `httpHeader(String key, String value)` | `key` - HTTP header key
`value` - string value | Sets value for a single HTTP header. Previous value is overridden. | `none` | `none` |
+| `httpHeader(String key, Collection values)` | `key` - HTTP header key
`values` - list of string values | Sets values for a single HTTP header. Previous value is overridden. | `none` | `none` |
+| `httpHeaders(Map headers)` | `headers` - map with HTTP headers | Sets multiple HTTP header values at a time. | `none` | `none` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `serverSetting(String name, String value)` | `name` - setting name
`value` - setting value | Sets what settings to pass to server along with each query. Individual operation settings may override it. [List of settings](/operations/settings/query-level) | `none` | `none` |
+| `serverSetting(String name, Collection values)` | `name` - setting name
`values` - setting values | Sets what settings to pass to server with multiple values, for example [roles](/interfaces/http#setting-role-with-query-parameters) | `none` | `none` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `useServerTimeZone(boolean useServerTimeZone)` | `useServerTimeZone` - flag to enable/disable | Sets if client should use server timezone when decoding DateTime and Date column values. | `true` | `use_server_time_zone` |
+| `useTimeZone(String timeZone)` | `timeZone` - java valid timezone ID | Sets if specified timezone should be used when decoding DateTime and Date column values. Will override server timezone. | - | `use_time_zone` |
+| `setServerTimeZone(String timeZone)` | `timeZone` - java valid timezone ID | Sets server side timezone. UTC timezone will be used by default. | `UTC` | `server_time_zone` |
+
+
+
+
+
+| Method | Arguments | Description | Default | Key |
+|--------|-----------|-------------|---------|-----|
+| `setOption(String key, String value)` | `key` - configuration option key
`value` - option value | Sets raw value of client options. Useful when reading configuration from properties files. | - | - |
+| `useAsyncRequests(boolean async)` | `async` - flag to enable/disable | Sets if client should execute request in a separate thread. Disabled by default because application knows better how to organize multi-threaded tasks. | `false` | `async` |
+| `setSharedOperationExecutor(ExecutorService executorService)` | `executorService` - executor service instance | Sets executor service for operation tasks. | `none` | `none` |
+| `setClientNetworkBufferSize(int size)` | `size` - size in bytes | Sets size of a buffer in application memory space that is used to copy data between socket and application. | `300000` | `client_network_buffer_size` |
+| `allowBinaryReaderToReuseBuffers(boolean reuse)` | `reuse` - flag to enable/disable | If enabled, reader will use preallocated buffers to do numbers transcoding. Reduces GC pressure for numeric data. | - | - |
+| `columnToMethodMatchingStrategy(ColumnToMethodMatchingStrategy strategy)` | `strategy` - matching strategy implementation | Sets custom strategy to be used for matching DTO class fields and DB columns when registering DTO. | `none` | `none` |
+| `setClientName(String clientName)` | `clientName` - application name string | Sets additional information about calling application. Will be passed as `User-Agent` header. | - | `client_name` |
+| `registerClientMetrics(Object registry, String name)` | `registry` - Micrometer registry instance
`name` - metrics group name | Registers sensors with Micrometer (https://micrometer.io/) registry instance. | - | - |
+| `setServerVersion(String version)` | `version` - server version string | Sets server version to avoid version detection. | - | `server_version` |
+| `typeHintMapping(Map typeHintMapping)` | `typeHintMapping` - map of type hints | Sets type hint mapping for ClickHouse types. For example, to make multidimensional arrays be present as Java containers. | - | `type_hint_mapping` |
+
+
+
+
+
+### Server Settings
+
+Server side settings can be set on the client level once while creation (see `serverSetting` method of the `Builder`) and on operation level (see `serverSetting` for operation settings class).
+
+```java showLineNumbers
+ try (Client client = new Client.Builder().addEndpoint(Protocol.HTTP, "localhost", mockServer.port(), false)
+ .setUsername("default")
+ .setPassword(ClickHouseServerForTest.getPassword())
+ .compressClientRequest(true)
+
+ // Client level
+ .serverSetting("max_threads", "10")
+ .serverSetting("async_insert", "1")
+ .serverSetting("roles", Arrays.asList("role1", "role2"))
+
+ .build()) {
+
+ // Operation level
+ QuerySettings querySettings = new QuerySettings();
+ querySettings.serverSetting("session_timezone", "Europe/Zurich");
+
+ ...
+}
+```
+When options are set via `setOption` method (either the `Client.Builder` or operation settings class) then server settings name should be prefixed with `clickhouse_setting_`. The `com.clickhouse.client.api.ClientConfigProperties#serverSetting()` may be handy in this case.
+
+### Custom HTTP Header
+
+Custom HTTP headers can be set for all operations (client level) or a single one (operation level).
+```java showLineNumbers
+
+QuerySettings settings = new QuerySettings()
+ .httpHeader(HttpHeaders.REFERER, clientReferer)
+ .setQueryId(qId);
+
+```
+
+When options are set via `setOption` method (either the `Client.Builder` or operation settings class) then custom header name should be prefixed with `http_header_`. Method `com.clickhouse.client.api.ClientConfigProperties#httpHeader()` may be handy in this case.
+
+## Common Definitions {#common-definitions}
+
+### ClickHouseFormat {#clickhouseformat}
+
+Enum of [supported formats](/interfaces/formats). It includes all formats that ClickHouse supports.
+
+* `raw` - user should transcode raw data
+* `full` - the client can transcode data by itself and accepts a raw data stream
+* `-` - operation not supported by ClickHouse for this format
+
+This client version supports:
+
+| Format | Input | Output |
+|-------------------------------------------------------------------------------------------------------------------------------|:------:|:-------:|
+| [TabSeparated](/interfaces/formats/TabSeparated) | raw | raw |
+| [TabSeparatedRaw](/interfaces/formats/TabSeparatedRaw) | raw | raw |
+| [TabSeparatedWithNames](/interfaces/formats/TabSeparatedWithNames) | raw | raw |
+| [TabSeparatedWithNamesAndTypes](/interfaces/formats/TabSeparatedWithNamesAndTypes) | raw | raw |
+| [TabSeparatedRawWithNames](/interfaces/formats/TabSeparatedRawWithNames) | raw | raw |
+| [TabSeparatedRawWithNamesAndTypes](/interfaces/formats/TabSeparatedRawWithNamesAndTypes) | raw | raw |
+| [Template](/interfaces/formats/Template) | raw | raw |
+| [TemplateIgnoreSpaces](/interfaces/formats/TemplateIgnoreSpaces) | raw | - |
+| [CSV](/interfaces/formats/CSV) | raw | raw |
+| [CSVWithNames](/interfaces/formats/CSVWithNames) | raw | raw |
+| [CSVWithNamesAndTypes](/interfaces/formats/CSVWithNamesAndTypes) | raw | raw |
+| [CustomSeparated](/interfaces/formats/CustomSeparated) | raw | raw |
+| [CustomSeparatedWithNames](/interfaces/formats/CustomSeparatedWithNames) | raw | raw |
+| [CustomSeparatedWithNamesAndTypes](/interfaces/formats/CustomSeparatedWithNamesAndTypes) | raw | raw |
+| [SQLInsert](/interfaces/formats/SQLInsert) | - | raw |
+| [Values](/interfaces/formats/Values) | raw | raw |
+| [Vertical](/interfaces/formats/Vertical) | - | raw |
+| [JSON](/interfaces/formats/JSON) | raw | raw |
+| [JSONAsString](/interfaces/formats/JSONAsString) | raw | - |
+| [JSONAsObject](/interfaces/formats/JSONAsObject) | raw | - |
+| [JSONStrings](/interfaces/formats/JSONStrings) | raw | raw |
+| [JSONColumns](/interfaces/formats/JSONColumns) | raw | raw |
+| [JSONColumnsWithMetadata](/interfaces/formats/JSONColumnsWithMetadata) | raw | raw |
+| [JSONCompact](/interfaces/formats/JSONCompact) | raw | raw |
+| [JSONCompactStrings](/interfaces/formats/JSONCompactStrings) | - | raw |
+| [JSONCompactColumns](/interfaces/formats/JSONCompactColumns) | raw | raw |
+| [JSONEachRow](/interfaces/formats/JSONEachRow) | raw | raw |
+| [PrettyJSONEachRow](/interfaces/formats/PrettyJSONEachRow) | - | raw |
+| [JSONEachRowWithProgress](/interfaces/formats/JSONEachRowWithProgress) | - | raw |
+| [JSONStringsEachRow](/interfaces/formats/JSONStringsEachRow) | raw | raw |
+| [JSONStringsEachRowWithProgress](/interfaces/formats/JSONStringsEachRowWithProgress) | - | raw |
+| [JSONCompactEachRow](/interfaces/formats/JSONCompactEachRow) | raw | raw |
+| [JSONCompactEachRowWithNames](/interfaces/formats/JSONCompactEachRowWithNames) | raw | raw |
+| [JSONCompactEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactEachRowWithNamesAndTypes) | raw | raw |
+| [JSONCompactStringsEachRow](/interfaces/formats/JSONCompactStringsEachRow) | raw | raw |
+| [JSONCompactStringsEachRowWithNames](/interfaces/formats/JSONCompactStringsEachRowWithNames) | raw | raw |
+| [JSONCompactStringsEachRowWithNamesAndTypes](/interfaces/formats/JSONCompactStringsEachRowWithNamesAndTypes) | raw | raw |
+| [JSONObjectEachRow](/interfaces/formats/JSONObjectEachRow) | raw | raw |
+| [BSONEachRow](/interfaces/formats/BSONEachRow) | raw | raw |
+| [TSKV](/interfaces/formats/TSKV) | raw | raw |
+| [Pretty](/interfaces/formats/Pretty) | - | raw |
+| [PrettyNoEscapes](/interfaces/formats/PrettyNoEscapes) | - | raw |
+| [PrettyMonoBlock](/interfaces/formats/PrettyMonoBlock) | - | raw |
+| [PrettyNoEscapesMonoBlock](/interfaces/formats/PrettyNoEscapesMonoBlock) | - | raw |
+| [PrettyCompact](/interfaces/formats/PrettyCompact) | - | raw |
+| [PrettyCompactNoEscapes](/interfaces/formats/PrettyCompactNoEscapes) | - | raw |
+| [PrettyCompactMonoBlock](/interfaces/formats/PrettyCompactMonoBlock) | - | raw |
+| [PrettyCompactNoEscapesMonoBlock](/interfaces/formats/PrettyCompactNoEscapesMonoBlock) | - | raw |
+| [PrettySpace](/interfaces/formats/PrettySpace) | - | raw |
+| [PrettySpaceNoEscapes](/interfaces/formats/PrettySpaceNoEscapes) | - | raw |
+| [PrettySpaceMonoBlock](/interfaces/formats/PrettySpaceMonoBlock) | - | raw |
+| [PrettySpaceNoEscapesMonoBlock](/interfaces/formats/PrettySpaceNoEscapesMonoBlock) | - | raw |
+| [Prometheus](/interfaces/formats/Prometheus) | - | raw |
+| [Protobuf](/interfaces/formats/Protobuf) | raw | raw |
+| [ProtobufSingle](/interfaces/formats/ProtobufSingle) | raw | raw |
+| [ProtobufList](/interfaces/formats/ProtobufList) | raw | raw |
+| [Avro](/interfaces/formats/Avro) | raw | raw |
+| [AvroConfluent](/interfaces/formats/AvroConfluent) | raw | - |
+| [Parquet](/interfaces/formats/Parquet) | raw | raw |
+| [ParquetMetadata](/interfaces/formats/ParquetMetadata) | raw | - |
+| [Arrow](/interfaces/formats/Arrow) | raw | raw |
+| [ArrowStream](/interfaces/formats/ArrowStream) | raw | raw |
+| [ORC](/interfaces/formats/ORC) | raw | raw |
+| [One](/interfaces/formats/One) | raw | - |
+| [Npy](/interfaces/formats/Npy) | raw | raw |
+| [RowBinary](/interfaces/formats/RowBinary) | full | full |
+| [RowBinaryWithNames](/interfaces/formats/RowBinaryWithNamesAndTypes) | full | full |
+| [RowBinaryWithNamesAndTypes](/interfaces/formats/RowBinaryWithNamesAndTypes) | full | full |
+| [RowBinaryWithDefaults](/interfaces/formats/RowBinaryWithDefaults) | full | - |
+| [Native](/interfaces/formats/Native) | full | raw |
+| [Null](/interfaces/formats/Null) | - | raw |
+| [XML](/interfaces/formats/XML) | - | raw |
+| [CapnProto](/interfaces/formats/CapnProto) | raw | raw |
+| [LineAsString](/interfaces/formats/LineAsString) | raw | raw |
+| [Regexp](/interfaces/formats/Regexp) | raw | - |
+| [RawBLOB](/interfaces/formats/RawBLOB) | raw | raw |
+| [MsgPack](/interfaces/formats/MsgPack) | raw | raw |
+| [MySQLDump](/interfaces/formats/MySQLDump) | raw | - |
+| [DWARF](/interfaces/formats/DWARF) | raw | - |
+| [Markdown](/interfaces/formats/Markdown) | - | raw |
+| [Form](/interfaces/formats/Form) | raw | - |
+
+## Insert API {#insert-api}
+
+### insert(String tableName, InputStream data, ClickHouseFormat format) {#insertstring-tablename-inputstream-data-clickhouseformat-format}
+
+Accepts data as an `InputStream` of bytes in the specified format. It is expected that `data` is encoded in the `format`.
+
+**Signatures**
+
+```java
+CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format, InsertSettings settings)
+CompletableFuture insert(String tableName, InputStream data, ClickHouseFormat format)
+```
+
+**Parameters**
+
+`tableName` - a target table name.
+
+`data` - an input stream of an encoded data.
+
+`format` - a format in which the data is encoded.
+
+`settings` - request settings.
+
+**Return value**
+
+Future of `InsertResponse` type - result of the operation and additional information like server side metrics.
+
+**Examples**
+
+```java showLineNumbers
+try (InputStream dataStream = getDataStream()) {
+ try (InsertResponse response = client.insert(TABLE_NAME, dataStream, ClickHouseFormat.JSONEachRow,
+ insertSettings).get(3, TimeUnit.SECONDS)) {
+
+ log.info("Insert finished: {} rows written", response.getMetrics().getMetric(ServerMetrics.NUM_ROWS_WRITTEN).getLong());
+ } catch (Exception e) {
+ log.error("Failed to write JSONEachRow data", e);
+ throw new RuntimeException(e);
+ }
+}
+
+```
+
+### insert(String tableName, List<?> data, InsertSettings settings) {#insertstring-tablename-listlt-data-insertsettings-settings}
+
+Sends a write request to database. The list of objects is converted into an efficient format and then is sent to a server. The class of the list items should be registered up-front using `register(Class, TableSchema)` method.
+
+**Signatures**
+```java
+client.insert(String tableName, List> data, InsertSettings settings)
+client.insert(String tableName, List> data)
+```
+
+**Parameters**
+
+`tableName` - name of the target table.
+
+`data` - collection DTO (Data Transfer Object) objects.
+
+`settings` - request settings.
+
+**Return value**
+
+Future of `InsertResponse` type - the result of the operation and additional information like server side metrics.
+
+**Examples**
+
+```java showLineNumbers
+// Important step (done once) - register class to pre-compile object serializer according to the table schema.
+client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME));
+
+List events = loadBatch();
+
+try (InsertResponse response = client.insert(TABLE_NAME, events).get()) {
+ // handle response, then it will be closed and connection that served request will be released.
+}
+```
+
+### InsertSettings {#insertsettings}
+
+Configuration options for insert operations.
+
+**Configuration methods**
+
+| Method | Description |
+|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|
+| `setQueryId(String queryId)` | Sets query ID that will be assigned to the operation. Default: `null`. |
+| `setDeduplicationToken(String token)` | Sets the deduplication token. This token will be sent to the server and can be used to identify the query. Default: `null`. |
+| `setInputStreamCopyBufferSize(int size)` | Copy buffer size. The buffer is used during write operations to copy data from user-provided input stream to an output stream. Default: `8196`. |
+| `serverSetting(String name, String value)` | Sets individual server settings for an operation. |
+| `serverSetting(String name, Collection values)` | Sets individual server settings with multiple values for an operation. Items of the collection should be `String` values. |
+| `setDBRoles(Collection dbRoles)` | Sets DB roles to be set before executing an operation. Items of the collection should be `String` values. |
+| `setOption(String option, Object value)` | Sets a configuration option in raw format. This is not a server setting. |
+
+### InsertResponse {#insertresponse}
+
+Response object that holds result of insert operation. It is only available if the client got response from a server.
+
+:::note
+This object should be closed as soon as possible to release a connection because the connection cannot be re-used until all data of previous response is fully read.
+:::
+
+| Method | Description |
+|-----------------------------|------------------------------------------------------------------------------------------------------|
+| `OperationMetrics getMetrics()` | Returns object with operation metrics. |
+| `String getQueryId()` | Returns query ID assigned for the operation by the application (through operation settings or by server). |
+
+## Query API {#query-api}
+
+### query(String sqlQuery) {#querystring-sqlquery}
+
+Sends `sqlQuery` as is. Response format is set by query settings. `QueryResponse` will hold a reference to the response stream that should be consumed by a reader for the supportig format.
+
+**Signatures**
+
+```java
+CompletableFuture query(String sqlQuery, QuerySettings settings)
+CompletableFuture query(String sqlQuery)
+```
+
+**Parameters**
+
+`sqlQuery` - a single SQL statement. The Query is sent as is to a server.
+
+`settings` - request settings.
+
+**Return value**
+
+Future of `QueryResponse` type - a result dataset and additional information like server side metrics. The Response object should be closed after consuming the dataset.
+
+**Examples**
+
+```java
+final String sql = "select * from " + TABLE_NAME + " where title <> '' limit 10";
+
+// Default format is RowBinaryWithNamesAndTypesFormatReader so reader have all information about columns
+try (QueryResponse response = client.query(sql).get(3, TimeUnit.SECONDS);) {
+
+ // Create a reader to access the data in a convenient way
+ ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response);
+
+ while (reader.hasNext()) {
+ reader.next(); // Read the next record from stream and parse it
+
+ // get values
+ double id = reader.getDouble("id");
+ String title = reader.getString("title");
+ String url = reader.getString("url");
+
+ // collecting data
+ }
+} catch (Exception e) {
+ log.error("Failed to read data", e);
+}
+
+// put business logic outside of the reading block to release http connection asap.
+```
+
+### query(String sqlQuery, Map<String, Object> queryParams, QuerySettings settings) {#querystring-sqlquery-mapltstring-object-queryparams-querysettings-settings}
+
+Sends `sqlQuery` as is. Additionally will send query parameters so the server can compile the SQL expression.
+
+**Signatures**
+```java
+CompletableFuture query(String sqlQuery, Map queryParams, QuerySettings settings)
+```
+
+**Parameters**
+
+`sqlQuery` - sql expression with placeholders `{}`.
+
+`queryParams` - map of variables to complete the sql expression on server.
+
+`settings` - request settings.
+
+**Return value**
+
+Future of `QueryResponse` type - a result dataset and additional information like server side metrics. The Response object should be closed after consuming the dataset.
+
+**Examples**
+
+```java showLineNumbers
+
+// define parameters. They will be sent to the server along with the request.
+Map queryParams = new HashMap<>();
+queryParams.put("param1", 2);
+
+try (QueryResponse response =
+ client.query("SELECT * FROM " + table + " WHERE col1 >= {param1:UInt32}", queryParams, new QuerySettings()).get()) {
+
+ // Create a reader to access the data in a convenient way
+ ClickHouseBinaryFormatReader reader = client.newBinaryFormatReader(response);
+
+ while (reader.hasNext()) {
+ reader.next(); // Read the next record from stream and parse it
+
+ // reading data
+ }
+
+} catch (Exception e) {
+ log.error("Failed to read data", e);
+}
+
+```
+
+### queryAll(String sqlQuery) {#queryallstring-sqlquery}
+
+Queries a data in `RowBinaryWithNamesAndTypes` format. Returns the result as a collection. Read performance is the same as with the reader but more memory is required to hold the whole dataset.
+
+**Signatures**
+```java
+List queryAll(String sqlQuery)
+```
+
+**Parameters**
+
+`sqlQuery` - sql expression to query data from a server.
+
+**Return value**
+
+Complete dataset represented by a list of `GenericRecord` objects that provide access in row style for the result data.
+
+**Examples**
+
+```java showLineNumbers
+try {
+ log.info("Reading whole table and process record by record");
+ final String sql = "select * from " + TABLE_NAME + " where title <> ''";
+
+ // Read whole result set and process it record by record
+ client.queryAll(sql).forEach(row -> {
+ double id = row.getDouble("id");
+ String title = row.getString("title");
+ String url = row.getString("url");
+
+ log.info("id: {}, title: {}, url: {}", id, title, url);
+ });
+} catch (Exception e) {
+ log.error("Failed to read data", e);
+}
+```
+
+### QuerySettings {#querysettings}
+
+Configuration options for query operations.
+
+**Configuration methods**
+
+| Method | Description |
+|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|
+| `setQueryId(String queryId)` | Sets query ID that will be assigned to the operation. |
+| `setFormat(ClickHouseFormat format)` | Sets response format. See `RowBinaryWithNamesAndTypes` for the full list. |
+| `setMaxExecutionTime(Integer maxExecutionTime)` | Sets operation execution time on server. Will not affect read timeout. |
+| `waitEndOfQuery(Boolean waitEndOfQuery)` | Requests the server to wait for the end of the query before sending a response. |
+| `setUseServerTimeZone(Boolean useServerTimeZone)` | Server timezone (see client config) will be used to parse date/time types in the result of an operation. Default `false`. |
+| `setUseTimeZone(String timeZone)` | Requests server to use `timeZone` for time conversion. See [session_timezone](/operations/settings/settings#session_timezone). |
+| `serverSetting(String name, String value)` | Sets individual server settings for an operation. |
+| `serverSetting(String name, Collection values)` | Sets individual server settings with multiple values for an operation. Items of the collection should be `String` values. |
+| `setDBRoles(Collection dbRoles)` | Sets DB roles to be set before executing an operation. Items of the collection should be `String` values. |
+| `setOption(String option, Object value)` | Sets a configuration option in raw format. This is not a server setting. |
+
+### QueryResponse {#queryresponse}
+
+Response object that holds result of query execution. It is only available if the client got a response from a server.
+
+:::note
+This object should be closed as soon as possible to release a connection because the connection cannot be re-used until all data of previous response is fully read.
+:::
+
+| Method | Description |
+|-------------------------------------|------------------------------------------------------------------------------------------------------|
+| `ClickHouseFormat getFormat()` | Returns a format in which data in the response is encoded. |
+| `InputStream getInputStream()` | Returns uncompressed byte stream of data in the specified format. |
+| `OperationMetrics getMetrics()` | Returns object with operation metrics. |
+| `String getQueryId()` | Returns query ID assigned for the operation by the application (through operation settings or by server). |
+| `TimeZone getTimeZone()` | Returns timezone that should be used for handling Date/DateTime types in the response. |
+
+### Examples {#examples}
+
+- Example code is available in [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2)
+- Reference Spring Service [implementation](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service)
+
+## Common API {#common-api}
+
+### getTableSchema(String table) {#gettableschemastring-table}
+
+Fetches table schema for the `table`.
+
+**Signatures**
+
+```java
+TableSchema getTableSchema(String table)
+TableSchema getTableSchema(String table, String database)
+```
+
+**Parameters**
+
+`table` - table name for which schema data should be fetched.
+
+`database` - database where the target table is defined.
+
+**Return value**
+
+Returns a `TableSchema` object with list of table columns.
+
+### getTableSchemaFromQuery(String sql) {#gettableschemafromquerystring-sql}
+
+Fetches schema from a SQL statement.
+
+**Signatures**
+
+```java
+TableSchema getTableSchemaFromQuery(String sql)
+```
+
+**Parameters**
+
+`sql` - "SELECT" SQL statement which schema should be returned.
+
+**Return value**
+
+Returns a `TableSchema` object with columns matching the `sql` expression.
+
+### TableSchema {#tableschema}
+
+### register(Class<?> clazz, TableSchema schema) {#registerclasslt-clazz-tableschema-schema}
+
+Compiles serialization and deserialization layer for the Java Class to use for writing/reading data with `schema`. The method will create a serializer and deserializer for the pair getter/setter and corresponding column.
+Column match is found by extracting its name from a method name. For example, `getFirstName` will be for the column `first_name` or `firstname`.
+
+**Signatures**
+
+```java
+void register(Class> clazz, TableSchema schema)
+```
+
+**Parameters**
+
+`clazz` - Class representing the POJO used to read/write data.
+
+`schema` - Data schema to use for matching with POJO properties.
+
+**Examples**
+
+```java showLineNumbers
+client.register(ArticleViewEvent.class, client.getTableSchema(TABLE_NAME));
+```
+
+## Usage Examples {#usage-examples}
+
+Complete examples code is stored in the repo in a 'example` [folder](https://github.com/ClickHouse/clickhouse-java/tree/main/examples):
+
+- [client-v2](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2) - main set of examples.
+- [demo-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-service) - example of how to use the client in a Spring Boot application.
+- [demo-kotlin-service](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/demo-kotlin-service) - example of how to use the client in Ktor (Kotlin) application.
+
+## Migration From V1 ( =< 0.7.x ) {#migration_from_v1}
+
+
+Old client (V1) was using `com.clickhouse.client.ClickHouseClient#builder` as start point. The new client (V2) uses similar pattern with `com.clickhouse.client.api.Client.Builder`. Main
+differences are:
+- no service loader is used to grab implementation. The `com.clickhouse.client.api.Client` is facade class for all kinds of implementation in the future.
+- a fewer sources of configuration: one is provided to the builder and one is with operation settings (`QuerySettings`, `InsertSettings`). Previous version had configuration per node and was loading
+env. variables in some cases.
+
+### Configuration Parameters Match {#migration_from_v1_config}
+
+There are 3 enum classes related to configuration in V1:
+- `com.clickhouse.client.config.ClickHouseDefaults` - configuration parameters that supposed to be set in most use cases. Like `USER` and `PASSWORD`.
+- `com.clickhouse.client.config.ClickHouseClientOption` - configuration parameters specific for the client. Like `HEALTH_CHECK_INTERVAL`.
+- `com.clickhouse.client.http.config.ClickHouseHttpOption` - configuration parameters specific for HTTP interface. Like `RECEIVE_QUERY_PROGRESS`.
+
+They were designed to group parameters and provide clear separation. However in some cases it lead to a confusion (is there a difference between `com.clickhouse.client.config.ClickHouseDefaults#ASYNC` and
+`com.clickhouse.client.config.ClickHouseClientOption#ASYNC`). The new V2 client uses `com.clickhouse.client.api.Client.Builder` as single dictionary of all possible client configuration options.There is
+`com.clickhouse.client.api.ClientConfigProperties` where all configuration parameter names are listed.
+
+Table below shows what old options are supported in the new client and their new meaning.
+
+**Legend:** ✔ = supported, ✗ = dropped
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#HOST` | `Client.Builder#addEndpoint` | |
+| `ClickHouseDefaults#PROTOCOL` | ✗ | Only HTTP supported in V2 |
+| `ClickHouseDefaults#DATABASE`
`ClickHouseClientOption#DATABASE` | `Client.Builder#setDefaultDatabase` | |
+| `ClickHouseDefaults#USER` | `Client.Builder#setUsername` | |
+| `ClickHouseDefaults#PASSWORD` | `Client.Builder#setPassword` | |
+| `ClickHouseClientOption#CONNECTION_TIMEOUT` | `Client.Builder#setConnectTimeout` | |
+| `ClickHouseClientOption#CONNECTION_TTL` | `Client.Builder#setConnectionTTL` | |
+| `ClickHouseHttpOption#MAX_OPEN_CONNECTIONS` | `Client.Builder#setMaxConnections` | |
+| `ClickHouseHttpOption#KEEP_ALIVE`
`ClickHouseHttpOption#KEEP_ALIVE_TIMEOUT` | `Client.Builder#setKeepAliveTimeout` | |
+| `ClickHouseHttpOption#CONNECTION_REUSE_STRATEGY` | `Client.Builder#setConnectionReuseStrategy` | |
+| `ClickHouseHttpOption#USE_BASIC_AUTHENTICATION` | `Client.Builder#useHTTPBasicAuth` | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#SSL_CERTIFICATE_TYPE` | ✗ | |
+| `ClickHouseDefaults#SSL_KEY_ALGORITHM` | ✗ | |
+| `ClickHouseDefaults#SSL_PROTOCOL` | ✗ | |
+| `ClickHouseClientOption#SSL` | ✗ | See `Client.Builder#addEndpoint` |
+| `ClickHouseClientOption#SSL_MODE` | ✗ | |
+| `ClickHouseClientOption#SSL_ROOT_CERTIFICATE` | `Client.Builder#setRootCertificate` | SSL Auth should be enabled by `useSSLAuthentication` |
+| `ClickHouseClientOption#SSL_CERTIFICATE` | `Client.Builder#setClientCertificate` | |
+| `ClickHouseClientOption#SSL_KEY` | `Client.Builder#setClientKey` | |
+| `ClickHouseClientOption#KEY_STORE_TYPE` | `Client.Builder#setSSLTrustStoreType` | |
+| `ClickHouseClientOption#TRUST_STORE` | `Client.Builder#setSSLTrustStore` | |
+| `ClickHouseClientOption#KEY_STORE_PASSWORD` | `Client.Builder#setSSLTrustStorePassword` | |
+| `ClickHouseClientOption#SSL_SOCKET_SNI` | `Client.Builder#sslSocketSNI` | |
+| `ClickHouseClientOption#CUSTOM_SOCKET_FACTORY` | ✗ | |
+| `ClickHouseClientOption#CUSTOM_SOCKET_FACTORY_OPTIONS` | ✗ | See `Client.Builder#sslSocketSNI` to set SNI |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#SOCKET_TIMEOUT` | `Client.Builder#setSocketTimeout` | |
+| `ClickHouseClientOption#SOCKET_REUSEADDR` | `Client.Builder#setSocketReuseAddress` | |
+| `ClickHouseClientOption#SOCKET_KEEPALIVE` | `Client.Builder#setSocketKeepAlive` | |
+| `ClickHouseClientOption#SOCKET_LINGER` | `Client.Builder#setSocketLinger` | |
+| `ClickHouseClientOption#SOCKET_IP_TOS` | ✗ | |
+| `ClickHouseClientOption#SOCKET_TCP_NODELAY` | `Client.Builder#setSocketTcpNodelay` | |
+| `ClickHouseClientOption#SOCKET_RCVBUF` | `Client.Builder#setSocketRcvbuf` | |
+| `ClickHouseClientOption#SOCKET_SNDBUF` | `Client.Builder#setSocketSndbuf` | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#COMPRESS` | `Client.Builder#compressServerResponse` | See also `useHttpCompression` |
+| `ClickHouseClientOption#DECOMPRESS` | `Client.Builder#compressClientRequest` | See also `useHttpCompression` |
+| `ClickHouseClientOption#COMPRESS_ALGORITHM` | ✗ | `LZ4` for non-http. Http uses `Accept-Encoding` |
+| `ClickHouseClientOption#DECOMPRESS_ALGORITHM` | ✗ | `LZ4` for non-http. Http uses `Content-Encoding` |
+| `ClickHouseClientOption#COMPRESS_LEVEL` | ✗ | |
+| `ClickHouseClientOption#DECOMPRESS_LEVEL` | ✗ | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#PROXY_TYPE` | `Client.Builder#addProxy` | |
+| `ClickHouseClientOption#PROXY_HOST` | `Client.Builder#addProxy` | |
+| `ClickHouseClientOption#PROXY_PORT` | `Client.Builder#addProxy` | |
+| `ClickHouseClientOption#PROXY_USERNAME` | `Client.Builder#setProxyCredentials` | |
+| `ClickHouseClientOption#PROXY_PASSWORD` | `Client.Builder#setProxyCredentials` | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#MAX_EXECUTION_TIME` | `Client.Builder#setExecutionTimeout` | |
+| `ClickHouseClientOption#RETRY` | `Client.Builder#setMaxRetries` | See also `retryOnFailures` |
+| `ClickHouseHttpOption#AHC_RETRY_ON_FAILURE` | `Client.Builder#retryOnFailures` | |
+| `ClickHouseClientOption#FAILOVER` | ✗ | |
+| `ClickHouseClientOption#REPEAT_ON_SESSION_LOCK` | ✗ | |
+| `ClickHouseClientOption#SESSION_ID` | ✗ | |
+| `ClickHouseClientOption#SESSION_CHECK` | ✗ | |
+| `ClickHouseClientOption#SESSION_TIMEOUT` | ✗ | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#SERVER_TIME_ZONE`
`ClickHouseClientOption#SERVER_TIME_ZONE` | `Client.Builder#setServerTimeZone` | |
+| `ClickHouseClientOption#USE_SERVER_TIME_ZONE` | `Client.Builder#useServerTimeZone` | |
+| `ClickHouseClientOption#USE_SERVER_TIME_ZONE_FOR_DATES` | | |
+| `ClickHouseClientOption#USE_TIME_ZONE` | `Client.Builder#useTimeZone` | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#BUFFER_SIZE` | `Client.Builder#setClientNetworkBufferSize` | |
+| `ClickHouseClientOption#BUFFER_QUEUE_VARIATION` | ✗ | |
+| `ClickHouseClientOption#READ_BUFFER_SIZE` | ✗ | |
+| `ClickHouseClientOption#WRITE_BUFFER_SIZE` | ✗ | |
+| `ClickHouseClientOption#REQUEST_CHUNK_SIZE` | ✗ | |
+| `ClickHouseClientOption#REQUEST_BUFFERING` | ✗ | |
+| `ClickHouseClientOption#RESPONSE_BUFFERING` | ✗ | |
+| `ClickHouseClientOption#MAX_BUFFER_SIZE` | ✗ | |
+| `ClickHouseClientOption#MAX_QUEUED_BUFFERS` | ✗ | |
+| `ClickHouseClientOption#MAX_QUEUED_REQUESTS` | ✗ | |
+| `ClickHouseClientOption#REUSE_VALUE_WRAPPER` | ✗ | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#ASYNC`
`ClickHouseClientOption#ASYNC` | `Client.Builder#useAsyncRequests` | |
+| `ClickHouseDefaults#MAX_SCHEDULER_THREADS` | ✗ | see `setSharedOperationExecutor` |
+| `ClickHouseDefaults#MAX_THREADS` | ✗ | see `setSharedOperationExecutor` |
+| `ClickHouseDefaults#THREAD_KEEPALIVE_TIMEOUT` | see `setSharedOperationExecutor` | |
+| `ClickHouseClientOption#MAX_THREADS_PER_CLIENT` | ✗ | |
+| `ClickHouseClientOption#MAX_CORE_THREAD_TTL` | ✗ | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseHttpOption#CUSTOM_HEADERS` | `Client.Builder#httpHeaders` | |
+| `ClickHouseHttpOption#CUSTOM_PARAMS` | ✗ | See `Client.Builder#serverSetting` |
+| `ClickHouseClientOption#CLIENT_NAME` | `Client.Builder#setClientName` | |
+| `ClickHouseHttpOption#CONNECTION_PROVIDER` | ✗ | |
+| `ClickHouseHttpOption#DEFAULT_RESPONSE` | ✗ | |
+| `ClickHouseHttpOption#SEND_HTTP_CLIENT_ID` | ✗ | |
+| `ClickHouseHttpOption#AHC_VALIDATE_AFTER_INACTIVITY` | ✗ | Always enabled when Apache Http Client is used |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#FORMAT`
`ClickHouseClientOption#FORMAT` | ✗ | Moved to operation settings (`QuerySettings` and `InsertSettings`) |
+| `ClickHouseClientOption#QUERY_ID` | ✗ | See `QuerySettings` and `InsertSettings` |
+| `ClickHouseClientOption#LOG_LEADING_COMMENT` | ✗ | See `QuerySettings#logComment` and `InsertSettings#logComment` |
+| `ClickHouseClientOption#MAX_RESULT_ROWS` | ✗ | Is server side setting |
+| `ClickHouseClientOption#RESULT_OVERFLOW_MODE` | ✗ | Is server side setting |
+| `ClickHouseHttpOption#RECEIVE_QUERY_PROGRESS` | ✗ | Server side setting |
+| `ClickHouseHttpOption#WAIT_END_OF_QUERY` | ✗ | Server side setting |
+| `ClickHouseHttpOption#REMEMBER_LAST_SET_ROLES` | `Client#setDBRoles` | Runtime config now. See also `QuerySettings#setDBRoles` and `InsertSettings#setDBRoles` |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseClientOption#AUTO_DISCOVERY` | ✗ | |
+| `ClickHouseClientOption#LOAD_BALANCING_POLICY` | ✗ | |
+| `ClickHouseClientOption#LOAD_BALANCING_TAGS` | ✗ | |
+| `ClickHouseClientOption#HEALTH_CHECK_INTERVAL` | ✗ | |
+| `ClickHouseClientOption#HEALTH_CHECK_METHOD` | ✗ | |
+| `ClickHouseClientOption#NODE_DISCOVERY_INTERVAL` | ✗ | |
+| `ClickHouseClientOption#NODE_DISCOVERY_LIMIT` | ✗ | |
+| `ClickHouseClientOption#NODE_CHECK_INTERVAL` | ✗ | |
+| `ClickHouseClientOption#NODE_GROUP_SIZE` | ✗ | |
+| `ClickHouseClientOption#CHECK_ALL_NODES` | ✗ | |
+
+
+
+
+
+| V1 Configuration | V2 Builder Method | Comments |
+|------------------|-------------------|----------|
+| `ClickHouseDefaults#AUTO_SESSION` | ✗ | Session support will be reviewed |
+| `ClickHouseDefaults#BUFFERING` | ✗ | |
+| `ClickHouseDefaults#MAX_REQUESTS` | ✗ | |
+| `ClickHouseDefaults#ROUNDING_MODE` | | |
+| `ClickHouseDefaults#SERVER_VERSION`
`ClickHouseClientOption#SERVER_VERSION` | `Client.Builder#setServerVersion` | |
+| `ClickHouseDefaults#SRV_RESOLVE` | ✗ | |
+| `ClickHouseClientOption#CUSTOM_SETTINGS` | | |
+| `ClickHouseClientOption#PRODUCT_NAME` | ✗ | Use client name |
+| `ClickHouseClientOption#RENAME_RESPONSE_COLUMN` | ✗ | |
+| `ClickHouseClientOption#SERVER_REVISION` | ✗ | |
+| `ClickHouseClientOption#TRANSACTION_TIMEOUT` | ✗ | |
+| `ClickHouseClientOption#WIDEN_UNSIGNED_TYPES` | ✗ | |
+| `ClickHouseClientOption#USE_BINARY_STRING` | ✗ | |
+| `ClickHouseClientOption#USE_BLOCKING_QUEUE` | ✗ | |
+| `ClickHouseClientOption#USE_COMPILATION` | ✗ | |
+| `ClickHouseClientOption#USE_OBJECTS_IN_ARRAYS` | ✗ | |
+| `ClickHouseClientOption#MAX_MAPPER_CACHE` | ✗ | |
+| `ClickHouseClientOption#MEASURE_REQUEST_TIME` | ✗ | |
+
+
+
+
+
+
+
+
+Java client library to communicate with a DB server through its protocols. Current implementation supports only [HTTP interface](/interfaces/http). The library provides own API to send requests to a server.
+
+:::warning Deprecation
+This library will be deprecated soon. Use the latest [Java Client](/integrations/language-clients/java/client/client.mdx) for new projects
+:::
+
+## Setup {#v1-setup}
+
+
+
+
+```xml
+
+
+ com.clickhouse
+ clickhouse-http-client
+ 0.7.2
+
+```
+
+
+
+
+```kotlin
+// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client
+implementation("com.clickhouse:clickhouse-http-client:0.7.2")
+```
+
+
+
+```groovy
+// https://mvnrepository.com/artifact/com.clickhouse/clickhouse-http-client
+implementation 'com.clickhouse:clickhouse-http-client:0.7.2'
+```
+
+
+
+
+Since version `0.5.0`, the driver uses a new client http library that needs to be added as a dependency.
+
+
+
+
+```xml
+
+
+ org.apache.httpcomponents.client5
+ httpclient5
+ 5.3.1
+
+```
+
+
+
+
+```kotlin
+// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5
+implementation("org.apache.httpcomponents.client5:httpclient5:5.3.1")
+```
+
+
+
+```groovy
+// https://mvnrepository.com/artifact/org.apache.httpcomponents.client5/httpclient5
+implementation 'org.apache.httpcomponents.client5:httpclient5:5.3.1'
+```
+
+
+
+
+## Initialization {#v1-initialization}
+
+Connection URL Format: `protocol://host[:port][/database][?param[=value][¶m[=value]][#tag[,tag]]`, for example:
+
+- `http://localhost:8443?ssl=true&sslmode=NONE`
+- `https://(https://explorer@play.clickhouse.com:443`
+
+Connect to a single node:
+
+```java showLineNumbers
+ClickHouseNode server = ClickHouseNode.of("http://localhost:8123/default?compress=0");
+```
+Connect to a cluster with multiple nodes:
+
+```java showLineNumbers
+ClickHouseNodes servers = ClickHouseNodes.of(
+ "jdbc:ch:http://server1.domain,server2.domain,server3.domain/my_db"
+ + "?load_balancing_policy=random&health_check_interval=5000&failover=2");
+```
+
+## Query API {#v1-query-api}
+
+```java showLineNumbers
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers)
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("select * from numbers limit :limit")
+ .params(1000)
+ .executeAndWait()) {
+ ClickHouseResponseSummary summary = response.getSummary();
+ long totalRows = summary.getTotalRowsToRead();
+}
+```
+
+## Streaming Query API {#v1-streaming-query-api}
+
+```java showLineNumbers
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers)
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("select * from numbers limit :limit")
+ .params(1000)
+ .executeAndWait()) {
+ for (ClickHouseRecord r : response.records()) {
+ int num = r.getValue(0).asInteger();
+ // type conversion
+ String str = r.getValue(0).asString();
+ LocalDate date = r.getValue(0).asDate();
+ }
+}
+```
+
+See [complete code example](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L73) in the [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client).
+
+## Insert API {#v1-insert-api}
+
+```java showLineNumbers
+
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers).write()
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("insert into my_table select c2, c3 from input('c1 UInt8, c2 String, c3 Int32')")
+ .data(myInputStream) // `myInputStream` is source of data in RowBinary format
+ .executeAndWait()) {
+ ClickHouseResponseSummary summary = response.getSummary();
+ summary.getWrittenRows();
+}
+```
+
+See [complete code example](https://github.com/ClickHouse/clickhouse-java/blob/main/examples/client/src/main/java/com/clickhouse/examples/jdbc/Main.java#L39) in the [repo](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client).
+
+**RowBinary Encoding**
+
+RowBinary format is described on its [page](/interfaces/formats/RowBinaryWithNamesAndTypes).
+
+There is an example of [code](https://github.com/ClickHouse/clickhouse-kafka-connect/blob/main/src/main/java/com/clickhouse/kafka/connect/sink/db/ClickHouseWriter.java#L622).
+
+## Features {#v1-features}
+### Compression {#v1-compression}
+
+The client will by default use LZ4 compression, which requires this dependency:
+
+
+
+
+```xml
+
+
+ org.lz4
+ lz4-java
+ 1.8.0
+
+```
+
+
+
+
+```kotlin
+// https://mvnrepository.com/artifact/org.lz4/lz4-java
+implementation("org.lz4:lz4-java:1.8.0")
+```
+
+
+
+```groovy
+// https://mvnrepository.com/artifact/org.lz4/lz4-java
+implementation 'org.lz4:lz4-java:1.8.0'
+```
+
+
+
+
+You can choose to use gzip instead by setting `compress_algorithm=gzip` in the connection URL.
+
+Alternatively, you can disable compression a few ways.
+
+1. Disable by setting `compress=0` in the connection URL: `http://localhost:8123/default?compress=0`
+2. Disable via the client configuration:
+
+```java showLineNumbers
+ClickHouseClient client = ClickHouseClient.builder()
+ .config(new ClickHouseConfig(Map.of(ClickHouseClientOption.COMPRESS, false)))
+ .nodeSelector(ClickHouseNodeSelector.of(ClickHouseProtocol.HTTP))
+ .build();
+```
+
+See the [compression documentation](/data-compression/compression-modes) to learn more about different compression options.
+
+### Multiple queries {#v1-multiple-queries}
+
+Execute multiple queries in a worker thread one after another within same session:
+
+```java showLineNumbers
+CompletableFuture> future = ClickHouseClient.send(servers.apply(servers.getNodeSelector()),
+ "create database if not exists my_base",
+ "use my_base",
+ "create table if not exists test_table(s String) engine=Memory",
+ "insert into test_table values('1')('2')('3')",
+ "select * from test_table limit 1",
+ "truncate table test_table",
+ "drop table if exists test_table");
+List results = future.get();
+```
+
+### Named Parameters {#v1-named-parameters}
+
+You can pass parameters by name rather than relying solely on their position in the parameter list. This capability is available using `params` function.
+
+```java showLineNumbers
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers)
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("select * from my_table where name=:name limit :limit")
+ .params("Ben", 1000)
+ .executeAndWait()) {
+ //...
+ }
+}
+```
+
+:::note Parameters
+All `params` signatures involving `String` type (`String`, `String[]`, `Map`) assume the keys being passed are valid ClickHouse SQL strings. For instance:
+
+```java showLineNumbers
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers)
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("select * from my_table where name=:name")
+ .params(Map.of("name","'Ben'"))
+ .executeAndWait()) {
+ //...
+ }
+}
+```
+
+If you prefer not to parse String objects to ClickHouse SQL manually, you can use the helper function `ClickHouseValues.convertToSqlExpression` located at `com.clickhouse.data`:
+
+```java showLineNumbers
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP);
+ ClickHouseResponse response = client.read(servers)
+ .format(ClickHouseFormat.RowBinaryWithNamesAndTypes)
+ .query("select * from my_table where name=:name")
+ .params(Map.of("name", ClickHouseValues.convertToSqlExpression("Ben's")))
+ .executeAndWait()) {
+ //...
+ }
+}
+```
+
+In the example above, `ClickHouseValues.convertToSqlExpression` will escape the inner single quote, and surround the variable with a valid single quotes.
+
+Other types, such as `Integer`, `UUID`, `Array` and `Enum` will be converted automatically inside `params`.
+:::
+
+## Node Discovery {#v1-node-discovery}
+
+Java client provides the ability to discover ClickHouse nodes automatically. Auto-discovery is disabled by default. To manually enable it, set `auto_discovery` to `true`:
+
+```java
+properties.setProperty("auto_discovery", "true");
+```
+
+Or in the connection URL:
+
+```plaintext
+jdbc:ch://my-server/system?auto_discovery=true
+```
+
+If auto-discovery is enabled, there is no need to specify all ClickHouse nodes in the connection URL. Nodes specified in the URL will be treated as seeds, and the Java client will automatically discover more nodes from system tables and/or clickhouse-keeper or zookeeper.
+
+The following options are responsible for auto-discovery configuration:
+
+| Property | Default | Description |
+|-------------------------|---------|-------------------------------------------------------------------------------------------------------|
+| auto_discovery | `false` | Whether the client should discover more nodes from system tables and/or clickhouse-keeper/zookeeper. |
+| node_discovery_interval | `0` | Node discovery interval in milliseconds, zero or negative value means one-time discovery. |
+| node_discovery_limit | `100` | Maximum number of nodes that can be discovered at a time; zero or negative value means no limit. |
+
+### Load Balancing {#v1-load-balancing}
+
+The Java client chooses a ClickHouse node to send requests to, according to the load-balancing policy. In general, the load-balancing policy is responsible for the following things:
+
+1. Get a node from a managed node list.
+2. Managing node's status.
+3. Optionally schedule a background process for node discovery (if auto-discovery is enabled) and run a health check.
+
+Here is a list of options to configure load balancing:
+
+| Property | Default | Description |
+|-----------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| load_balancing_policy | `""` | The load-balancing policy can be one of: `firstAlive` - request is sent to the first healthy node from the managed node list`random` - request is sent to a random node from the managed node list `roundRobin` - request is sent to each node from the managed node list, in turn.full qualified class name implementing `ClickHouseLoadBalancingPolicy` - custom load balancing policyIf it is not specified the request is sent to the first node from the managed node list |
+| load_balancing_tags | `""` | Load balancing tags for filtering out nodes. Requests are sent only to nodes that have the specified tags |
+| health_check_interval | `0` | Health check interval in milliseconds, zero or negative value means one-time. |
+| health_check_method | `ClickHouseHealthCheckMethod.SELECT_ONE` | Health check method. Can be one of: `ClickHouseHealthCheckMethod.SELECT_ONE` - check with `select 1` query `ClickHouseHealthCheckMethod.PING` - protocol-specific check, which is generally faster |
+| node_check_interval | `0` | Node check interval in milliseconds, negative number is treated as zero. The node status is checked if the specified amount of time has passed since the last check.
The difference between `health_check_interval` and `node_check_interval` is that the `health_check_interval` option schedules the background job, which checks the status for the list of nodes (all or faulty), but `node_check_interval` specifies the amount of time has passed since the last check for the particular node |
+| check_all_nodes | `false` | Whether to perform a health check against all nodes or just faulty ones. |
+
+### Failover and retry {#v1-failover-and-retry}
+
+Java client provides configuration options to set up failover and retry behavior for failed queries:
+
+| Property | Default | Description |
+|-------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| failover | `0` | Maximum number of times a failover can happen for a request. Zero or a negative value means no failover. Failover sends the failed request to a different node (according to the load-balancing policy) in order to recover from failover. |
+| retry | `0` | Maximum number of times retry can happen for a request. Zero or a negative value means no retry. Retry sends a request to the same node and only if the ClickHouse server returns the `NETWORK_ERROR` error code |
+| repeat_on_session_lock | `true` | Whether to repeat execution when the session is locked until timed out(according to `session_timeout` or `connect_timeout`). The failed request is repeated if the ClickHouse server returns the `SESSION_IS_LOCKED` error code |
+
+### Adding custom http headers {#v1-adding-custom-http-headers}
+
+Java client support HTTP/S transport layer in case we want to add custom HTTP headers to the request.
+We should use the custom_http_headers property, and the headers need to be `,` separated. The header key/value should be divided using `=`
+
+## Java Client support {#v1-java-client-support}
+
+```java
+options.put("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test");
+```
+
+## JDBC Driver {#v1-jdbc-driver}
+
+```java
+properties.setProperty("custom_http_headers", "X-ClickHouse-Quota=test, X-ClickHouse-Test=test");
+```
+
+
+
+
diff --git a/docs/integrations/language-clients/java/index.md b/docs/integrations/language-clients/java/index.md
index b816ab32529..af2dfc28578 100644
--- a/docs/integrations/language-clients/java/index.md
+++ b/docs/integrations/language-clients/java/index.md
@@ -1,5 +1,6 @@
---
title: 'Java'
+sidebar_position: 1
keywords: ['clickhouse', 'java', 'jdbc', 'client', 'integrate', 'r2dbc']
description: 'Options for connecting to ClickHouse from Java'
slug: /integrations/java
@@ -20,7 +21,7 @@ import CodeBlock from '@theme/CodeBlock';
Java client is a library implementing own API that abstracts details of network communications with ClickHouse server. Currently HTTP Interface is supported only. The library provide utilities to work with different ClickHouse formats and other related functions.
-Java Client was developed far back in 2015. Its codebase became very hard to maintain, API is confusing, it is hard to optimize it further. So we have refactored it in 2024 into a new component `client-v2`. It has clear API, lighter codebase and more performance improvements, better ClickHouse formats support (RowBinary & Native mainly). JDBC will use this client in near feature.
+Java Client was developed far back in 2015. Its codebase became very hard to maintain, API is confusing, it is hard to optimize it further. So we have refactored it in 2024 into a new component `client-v2`. It has clear API, lighter codebase and more performance improvements, better ClickHouse formats support (RowBinary & Native mainly). JDBC will use this client in near feature.
### Supported data types {#supported-data-types}
@@ -83,7 +84,7 @@ Java Client was developed far back in 2015. Its codebase became very hard to mai
- AggregatedFunction - :warning: does not support `SELECT * FROM table ...`
- Decimal - `SET output_format_decimal_trailing_zeros=1` in 21.9+ for consistency
- Enum - can be treated as both string and integer
-- UInt64 - mapped to `long` in client-v1
+- UInt64 - mapped to `long` in client-v1
:::
### Features {#features}
@@ -94,7 +95,8 @@ Table of features of the clients:
|----------------------------------------------|:---------:|:---------:|:---------:|
| Http Connection |✔ |✔ | |
| Http Compression (LZ4) |✔ |✔ | |
-| Server Response Compression - LZ4 |✔ |✔ | |
+| Application Controlled Compression |✔ |✗ | |
+| Server Response Compression - LZ4 |✔ |✔ | |
| Client Request Compression - LZ4 |✔ |✔ | |
| HTTPS |✔ |✔ | |
| Client SSL Cert (mTLS) |✔ |✔ | |
@@ -109,6 +111,7 @@ Table of features of the clients:
| Log Comment |✔ |✔ | |
| Session Roles |✔ |✔ | |
| SSL Client Authentication |✔ |✔ | |
+| SNI Configuration |✔ |✗ | |
| Session timezone |✔ |✔ | |
JDBC Drive inherits same features as underlying client implementation. Other JDBC features are listed on its [page](/integrations/language-clients/java/jdbc).
@@ -122,7 +125,7 @@ JDBC Drive inherits same features as underlying client implementation. Other JDB
### Logging {#logging}
-Our Java language client uses [SLF4J](https://www.slf4j.org/) for logging. You can use any SLF4J-compatible logging framework, such as `Logback` or `Log4j`.
+Our Java language client uses [SLF4J](https://www.slf4j.org/) for logging. You can use any SLF4J-compatible logging framework, such as `Logback` or `Log4j`.
For example, if you are using Maven you could add the following dependency to your `pom.xml` file:
```xml title="pom.xml"
diff --git a/docs/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx b/docs/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx
deleted file mode 100644
index 65fdb8580be..00000000000
--- a/docs/integrations/language-clients/java/jdbc/_snippets/_v0_7.mdx
+++ /dev/null
@@ -1,387 +0,0 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-`clickhouse-jdbc` implements the standard JDBC interface. Being built on top of [clickhouse-client](/integrations/sql-clients/sql-console), it provides additional features like custom type mapping, transaction support, and standard synchronous `UPDATE` and `DELETE` statements, etc., so that it can be easily used with legacy applications and tools.
-
-:::note
-Latest JDBC (0.7.2) version uses Client-V1
-:::
-
-`clickhouse-jdbc` API is synchronous, and generally, it has more overheads(e.g., SQL parsing and type mapping/conversion, etc.). Consider [clickhouse-client](/integrations/sql-clients/sql-console) when performance is critical or if you prefer a more direct way to access ClickHouse.
-
-## Environment requirements {#environment-requirements}
-
-- [OpenJDK](https://openjdk.java.net) version >= 8
-
-### Setup {#setup}
-
-
-
-
- ```xml
-
-
- com.clickhouse
- clickhouse-jdbc
- 0.7.2
-
- shaded-all
-
- ```
-
-
-
-
- ```kotlin
- // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
- // use uber jar with all dependencies included, change classifier to http for smaller jar
- implementation("com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all")
- ```
-
-
-
- ```groovy
- // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
- // use uber jar with all dependencies included, change classifier to http for smaller jar
- implementation 'com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all'
- ```
-
-
-
-
-Since version `0.5.0`, we are using Apache HTTP Client that's packed the Client. Since there is not a shared version of the package, you need to add a logger as a dependency.
-
-
-
-
- ```xml
-
-
- org.slf4j
- slf4j-api
- 2.0.16
-
- ```
-
-
-
-
- ```kotlin
- // https://mvnrepository.com/artifact/org.slf4j/slf4j-api
- implementation("org.slf4j:slf4j-api:2.0.16")
- ```
-
-
-
- ```groovy
- // https://mvnrepository.com/artifact/org.slf4j/slf4j-api
- implementation 'org.slf4j:slf4j-api:2.0.16'
- ```
-
-
-
-
-## Configuration {#configuration}
-
-**Driver Class**: `com.clickhouse.jdbc.ClickHouseDriver`
-
-**URL Syntax**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]`, for example:
-
-- `jdbc:ch://localhost` is same as `jdbc:clickhouse:http://localhost:8123`
-- `jdbc:ch:https://localhost` is same as `jdbc:clickhouse:http://localhost:8443?ssl=true&sslmode=STRICT`
-- `jdbc:ch:grpc://localhost` is same as `jdbc:clickhouse:grpc://localhost:9100`
-
-**Connection Properties**:
-
-| Property | Default | Description |
-| ------------------------ | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `continueBatchOnError` | `false` | Whether to continue batch processing when error occurred |
-| `createDatabaseIfNotExist` | `false` | Whether to create database if it does not exist |
-| `custom_http_headers` | | comma separated custom http headers, for example: `User-Agent=client1,X-Gateway-Id=123` |
-| `custom_http_params` | | comma separated custom http query parameters, for example: `extremes=0,max_result_rows=100` |
-| `nullAsDefault` | `0` | `0` - treat null value as is and throw exception when inserting null into non-nullable column; `1` - treat null value as is and disable null-check for inserting; `2` - replace null to default value of corresponding data type for both query and insert |
-| `jdbcCompliance` | `true` | Whether to support standard synchronous UPDATE/DELETE and fake transaction |
-| `typeMappings` | | Customize mapping between ClickHouse data type and Java class, which will affect result of both [`getColumnType()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSetMetaData.html#getColumnType-int-) and [`getObject(Class<>?>`)](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-java.lang.String-java.lang.Class-). For example: `UInt128=java.lang.String,UInt256=java.lang.String` |
-| `wrapperObject` | `false` | Whether [`getObject()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-int-) should return java.sql.Array / java.sql.Struct for Array / Tuple. |
-
-Note: please refer to [JDBC specific configuration](https://github.com/ClickHouse/clickhouse-java/blob/main/clickhouse-jdbc/src/main/java/com/clickhouse/jdbc/JdbcConfig.java) for more.
-
-## Supported data types {#supported-data-types}
-
-JDBC Driver supports same data formats as client library does.
-
-:::note
-- AggregatedFunction - :warning: does not support `SELECT * FROM table ...`
-- Decimal - `SET output_format_decimal_trailing_zeros=1` in 21.9+ for consistency
-- Enum - can be treated as both string and integer
-- UInt64 - mapped to `long` (in client-v1)
-:::
-
-## Creating Connection {#creating-connection}
-
-```java
-String url = "jdbc:ch://my-server/system"; // use http protocol and port 8123 by default
-
-Properties properties = new Properties();
-
-ClickHouseDataSource dataSource = new ClickHouseDataSource(url, properties);
-try (Connection conn = dataSource.getConnection("default", "password");
- Statement stmt = conn.createStatement()) {
-}
-```
-
-## Simple Statement {#simple-statement}
-
-```java showLineNumbers
-
-try (Connection conn = dataSource.getConnection(...);
- Statement stmt = conn.createStatement()) {
- ResultSet rs = stmt.executeQuery("select * from numbers(50000)");
- while(rs.next()) {
- // ...
- }
-}
-```
-
-## Insert {#insert}
-
-:::note
-- Use `PreparedStatement` instead of `Statement`
-:::
-
-It's easier to use but slower performance compare to input function (see below):
-
-```java showLineNumbers
-try (PreparedStatement ps = conn.prepareStatement("insert into mytable(* except (description))")) {
- ps.setString(1, "test"); // id
- ps.setObject(2, LocalDateTime.now()); // timestamp
- ps.addBatch(); // parameters will be write into buffered stream immediately in binary format
- ...
- ps.executeBatch(); // stream everything on-hand into ClickHouse
-}
-```
-
-### With input table function {#with-input-table-function}
-
-An option with great performance characteristics:
-
-```java showLineNumbers
-try (PreparedStatement ps = conn.prepareStatement(
- "insert into mytable select col1, col2 from input('col1 String, col2 DateTime64(3), col3 Int32')")) {
- // The column definition will be parsed so the driver knows there are 3 parameters: col1, col2 and col3
- ps.setString(1, "test"); // col1
- ps.setObject(2, LocalDateTime.now()); // col2, setTimestamp is slow and not recommended
- ps.setInt(3, 123); // col3
- ps.addBatch(); // parameters will be write into buffered stream immediately in binary format
- ...
- ps.executeBatch(); // stream everything on-hand into ClickHouse
-}
-```
-- [input function doc](/sql-reference/table-functions/input/) whenever possible
-
-### Insert with placeholders {#insert-with-placeholders}
-
-This option is recommended only for small inserts because it would require a long SQL expression (that will be parsed on client side and it will consume CPU & Memory):
-
-```java showLineNumbers
-try (PreparedStatement ps = conn.prepareStatement("insert into mytable values(trim(?),?,?)")) {
- ps.setString(1, "test"); // id
- ps.setObject(2, LocalDateTime.now()); // timestamp
- ps.setString(3, null); // description
- ps.addBatch(); // append parameters to the query
- ...
- ps.executeBatch(); // issue the composed query: insert into mytable values(...)(...)...(...)
-}
-```
-
-## Handling DateTime and time zones {#handling-datetime-and-time-zones}
-
-Please to use `java.time.LocalDateTime` or `java.time.OffsetDateTime` instead of `java.sql.Timestamp`, and `java.time.LocalDate` instead of `java.sql.Date`.
-
-```java showLineNumbers
-try (PreparedStatement ps = conn.prepareStatement("select date_time from mytable where date_time > ?")) {
- ps.setObject(2, LocalDateTime.now());
- ResultSet rs = ps.executeQuery();
- while(rs.next()) {
- LocalDateTime dateTime = (LocalDateTime) rs.getObject(1);
- }
- ...
-}
-```
-
-## Handling `AggregateFunction` {#handling-aggregatefunction}
-
-:::note
-As of now, only `groupBitmap` is supported.
-:::
-
-```java showLineNumbers
-// batch insert using input function
-try (ClickHouseConnection conn = newConnection(props);
- Statement s = conn.createStatement();
- PreparedStatement stmt = conn.prepareStatement(
- "insert into test_batch_input select id, name, value from input('id Int32, name Nullable(String), desc Nullable(String), value AggregateFunction(groupBitmap, UInt32)')")) {
- s.execute("drop table if exists test_batch_input;"
- + "create table test_batch_input(id Int32, name Nullable(String), value AggregateFunction(groupBitmap, UInt32))engine=Memory");
- Object[][] objs = new Object[][] {
- new Object[] { 1, "a", "aaaaa", ClickHouseBitmap.wrap(1, 2, 3, 4, 5) },
- new Object[] { 2, "b", null, ClickHouseBitmap.wrap(6, 7, 8, 9, 10) },
- new Object[] { 3, null, "33333", ClickHouseBitmap.wrap(11, 12, 13) }
- };
- for (Object[] v : objs) {
- stmt.setInt(1, (int) v[0]);
- stmt.setString(2, (String) v[1]);
- stmt.setString(3, (String) v[2]);
- stmt.setObject(4, v[3]);
- stmt.addBatch();
- }
- int[] results = stmt.executeBatch();
- ...
-}
-
-// use bitmap as query parameter
-try (PreparedStatement stmt = conn.prepareStatement(
- "SELECT bitmapContains(my_bitmap, toUInt32(1)) as v1, bitmapContains(my_bitmap, toUInt32(2)) as v2 from {tt 'ext_table'}")) {
- stmt.setObject(1, ClickHouseExternalTable.builder().name("ext_table")
- .columns("my_bitmap AggregateFunction(groupBitmap,UInt32)").format(ClickHouseFormat.RowBinary)
- .content(new ByteArrayInputStream(ClickHouseBitmap.wrap(1, 3, 5).toBytes()))
- .asTempTable()
- .build());
- ResultSet rs = stmt.executeQuery();
- Assert.assertTrue(rs.next());
- Assert.assertEquals(rs.getInt(1), 1);
- Assert.assertEquals(rs.getInt(2), 0);
- Assert.assertFalse(rs.next());
-}
-```
-
-
-
-## Configuring HTTP library {#configuring-http-library}
-
-The ClickHouse JDBC connector supports three HTTP libraries: [`HttpClient`](https://docs.oracle.com/en/java/javase/11/docs/api/java.net.http/java/net/http/HttpClient.html), [`HttpURLConnection`](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/HttpURLConnection.html), and [Apache `HttpClient`](https://hc.apache.org/httpcomponents-client-5.2.x/).
-
-:::note
-`HttpClient` is only supported in JDK 11 or above.
-:::
-
-The JDBC driver uses `HttpClient` by default. You can change the HTTP library used by the ClickHouse JDBC connector by setting the following property:
-
-```java
-properties.setProperty("http_connection_provider", "APACHE_HTTP_CLIENT");
-```
-
-Here is a full list of the corresponding values:
-
-| Property Value | HTTP Library |
-|---------------------|---------------------|
-| HTTP_CLIENT | `HttpClient` |
-| HTTP_URL_CONNECTION | `HttpURLConnection` |
-| APACHE_HTTP_CLIENT | Apache `HttpClient` |
-
-
-
-## Connect to ClickHouse with SSL {#connect-to-clickhouse-with-ssl}
-
-To establish a secure JDBC connection to ClickHouse using SSL, you need to configure your JDBC properties to include SSL parameters. This typically involves specifying SSL properties such as `sslmode` and `sslrootcert` in your JDBC URL or Properties object.
-
-## SSL Properties {#ssl-properties}
-
-| Name | Default Value | Optional Values | Description |
-| ------------------ | ------------- | --------------- |----------------------------------------------------------------------------------|
-| `ssl` | false | true, false | Whether to enable SSL/TLS for the connection |
-| `sslmode` | strict | strict, none | Whether to verify SSL/TLS certificate |
-| `sslrootcert` | | | Path to SSL/TLS root certificates |
-| `sslcert` | | | Path to SSL/TLS certificate |
-| `sslkey` | | | RSA key in PKCS#8 format |
-| `key_store_type` | | JKS, PKCS12 | Specifies the type or format of the `KeyStore`/`TrustStore` file |
-| `trust_store` | | | Path to the `TrustStore` file |
-| `key_store_password` | | | Password needed to access the `KeyStore` file specified in the `KeyStore` config |
-
-These properties ensure that your Java application communicates with the ClickHouse server over an encrypted connection, enhancing data security during transmission.
-
-```java showLineNumbers
- String url = "jdbc:ch://your-server:8443/system";
-
- Properties properties = new Properties();
- properties.setProperty("ssl", "true");
- properties.setProperty("sslmode", "strict"); // NONE to trust all servers; STRICT for trusted only
- properties.setProperty("sslrootcert", "/mine.crt");
- try (Connection con = DriverManager
- .getConnection(url, properties)) {
-
- try (PreparedStatement stmt = con.prepareStatement(
-
- // place your code here
-
- }
- }
-```
-
-## Resolving JDBC Timeout on Large Inserts {#resolving-jdbc-timeout-on-large-inserts}
-
-When performing large inserts in ClickHouse with long execution times, you may encounter JDBC timeout errors like:
-
-```plaintext
-Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443]
-```
-
-These errors can disrupt the data insertion process and affect system stability. To address this issue you need to adjust a few timeout settings in the client's OS.
-
-### Mac OS {#mac-os}
-
-On Mac OS, the following settings can be adjusted to resolve the issue:
-
-- `net.inet.tcp.keepidle`: 60000
-- `net.inet.tcp.keepintvl`: 45000
-- `net.inet.tcp.keepinit`: 45000
-- `net.inet.tcp.keepcnt`: 8
-- `net.inet.tcp.always_keepalive`: 1
-
-### Linux {#linux}
-
-On Linux, the equivalent settings alone may not resolve the issue. Additional steps are required due to the differences in how Linux handles socket keep-alive settings. Follow these steps:
-
-1. Adjust the following Linux kernel parameters in `/etc/sysctl.conf` or a related configuration file:
-
-- `net.inet.tcp.keepidle`: 60000
-- `net.inet.tcp.keepintvl`: 45000
-- `net.inet.tcp.keepinit`: 45000
-- `net.inet.tcp.keepcnt`: 8
-- `net.inet.tcp.always_keepalive`: 1
-- `net.ipv4.tcp_keepalive_intvl`: 75
-- `net.ipv4.tcp_keepalive_probes`: 9
-- `net.ipv4.tcp_keepalive_time`: 60 (You may consider lowering this value from the default 300 seconds)
-
-2. After modifying the kernel parameters, apply the changes by running the following command:
-
-```shell
-sudo sysctl -p
- ```
-
-After Setting those settings, you need to ensure that your client enables the Keep Alive option on the socket:
-
-```java
-properties.setProperty("socket_keepalive", "true");
-```
-
-:::note
-Currently, you must use Apache HTTP Client library when setting the socket keep-alive, as the other two HTTP client libraries supported by `clickhouse-java` do not allow setting socket options. For a detailed guide, see [Configuring HTTP library](/integrations/language-clients/java/jdbc-v1#configuring-http-library).
-:::
-
-Alternatively, you can add equivalent parameters to the JDBC URL.
-
-The default socket and connection timeout for the JDBC driver is 30 seconds. The timeout can be increased to support large data insert operations. Use the `options` method on `ClickHouseClient` together with the `SOCKET_TIMEOUT` and `CONNECTION_TIMEOUT` options as defined by `ClickHouseClientOption`:
-
-```java showLineNumbers
-final int MS_12H = 12 * 60 * 60 * 1000; // 12 h in ms
-final String sql = "insert into table_a (c1, c2, c3) select c1, c2, c3 from table_b;";
-
-try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP)) {
- client.read(servers).write()
- .option(ClickHouseClientOption.SOCKET_TIMEOUT, MS_12H)
- .option(ClickHouseClientOption.CONNECTION_TIMEOUT, MS_12H)
- .query(sql)
- .executeAndWait();
-}
-```
\ No newline at end of file
diff --git a/docs/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx b/docs/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx
deleted file mode 100644
index 40d0084a125..00000000000
--- a/docs/integrations/language-clients/java/jdbc/_snippets/_v0_8.mdx
+++ /dev/null
@@ -1,225 +0,0 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-:::note
-`clickhouse-jdbc` implements the standard JDBC interface using the latest java client.
-We recommend using the latest java client directly if performance/direct access is critical.
-:::
-
-## Changes from 0.7.x {#changes-from-07x}
-In 0.8 we tried to make the driver more strictly follow the JDBC specification, so there are some removed features that may affect you:
-
-| Old Feature | Notes |
-|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Transaction Support | Early versions of the driver only **simulated** transaction support, which could have unexpected results. |
-| Response Column Renaming | `ResultSet` was mutable - for efficiency sake they're now read-only |
-| Multi-Statement SQL | Multi-statement support was only **simulated**, now it strictly follows 1:1 |
-| Named Parameters | Not part of the JDBC spec |
-| Stream-based `PreparedStatement` | Early version of the driver allowed for non-jdbc usage of `PreparedStatement` - if you desire such options, we recommend looking at the [Java Client](/integrations/language-clients/java/client/client.mdx) and its [examples](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2). |
-
-:::note
-`Date` is stored without timezone, while `DateTime` is stored with timezone. This can lead to unexpected results if you're not careful.
-:::
-
-## Environment requirements {#environment-requirements}
-
-- [OpenJDK](https://openjdk.java.net) version >= 8
-
-### Setup {#setup}
-
-
-
-
- ```xml
-
-
- com.clickhouse
- clickhouse-jdbc
- 0.9.1
- shaded-all
-
- ```
-
-
-
-
- ```kotlin
- // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
- implementation("com.clickhouse:clickhouse-jdbc:0.9.1:shaded-all")
- ```
-
-
-
- ```groovy
- // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
- implementation 'com.clickhouse:clickhouse-jdbc:0.9.1:shaded-all'
- ```
-
-
-
-
-## Configuration {#configuration}
-
-**Driver Class**: `com.clickhouse.jdbc.ClickHouseDriver`
-
-**URL Syntax**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]`, for example:
-
-- `jdbc:clickhouse:http://localhost:8123`
-- `jdbc:clickhouse:https://localhost:8443?ssl=true`
-
-**Connection Properties**:
-
-Beyond standard JDBC properties, the driver supports the ClickHouse-specific properties offered by the underlying [java client](/integrations/language-clients/java/client/client.mdx).
-Where possible methods will return an `SQLFeatureNotSupportedException` if the feature is not supported. Other custom properties include:
-
-| Property | Default | Description |
-|----------------------------------|---------|----------------------------------------------------------------|
-| `disable_frameworks_detection` | `true` | Disable frameworks detection for User-Agent |
-| `jdbc_ignore_unsupported_values` | `false` | Suppresses `SQLFeatureNotSupportedException` |
-| `clickhouse.jdbc.v1` | `false` | Use older JDBC implementation instead of new JDBC |
-| `default_query_settings` | `null` | Allows passing of default query settings with query operations |
-| `jdbc_resultset_auto_close` | `true` | Automatically closes `ResultSet` when `Statement` is closed |
-| `beta.row_binary_for_simple_insert` | `false` | Use `PreparedStatement` implementation based on `RowBinary` writer. Works only for `INSERT INTO ... VALUES` queries. |
-
-## Supported data types {#supported-data-types}
-
-JDBC Driver supports the same data formats as the underlying [java client](/integrations/language-clients/java/client/client.mdx).
-
-### Handling Dates, Times, and Timezones {#handling-dates-times-and-timezones}
-`java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp` can complicate how Timezones are calculated - though they're of course supported,
-you may want to consider using the [java.time](https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html) package. `ZonedDateTime` and
-`OffsetDateTime` are both great replacements for java.sql.Timestamp, java.sql.Date, and java.sql.Time.
-
-## Creating Connection {#creating-connection}
-
-```java
-String url = "jdbc:ch://my-server:8123/system";
-
-Properties properties = new Properties();
-DataSource dataSource = new DataSource(url, properties);//DataSource or DriverManager are the main entry points
-try (Connection conn = dataSource.getConnection()) {
-... // do something with the connection
-```
-
-## Supplying Credentials and Settings {#supplying-credentials-and-settings}
-
-```java showLineNumbers
-String url = "jdbc:ch://localhost:8123?jdbc_ignore_unsupported_values=true&socket_timeout=10";
-
-Properties info = new Properties();
-info.put("user", "default");
-info.put("password", "password");
-info.put("database", "some_db");
-
-//Creating a connection with DataSource
-DataSource dataSource = new DataSource(url, info);
-try (Connection conn = dataSource.getConnection()) {
-... // do something with the connection
-}
-
-//Alternate approach using the DriverManager
-try (Connection conn = DriverManager.getConnection(url, info)) {
-... // do something with the connection
-}
-```
-
-## Simple Statement {#simple-statement}
-
-```java showLineNumbers
-
-try (Connection conn = dataSource.getConnection(...);
- Statement stmt = conn.createStatement()) {
- ResultSet rs = stmt.executeQuery("select * from numbers(50000)");
- while(rs.next()) {
- // ...
- }
-}
-```
-
-## Insert {#insert}
-
-```java showLineNumbers
-try (PreparedStatement ps = conn.prepareStatement("INSERT INTO mytable VALUES (?, ?)")) {
- ps.setString(1, "test"); // id
- ps.setObject(2, LocalDateTime.now()); // timestamp
- ps.addBatch();
- ...
- ps.executeBatch(); // stream everything on-hand into ClickHouse
-}
-```
-
-## `HikariCP` {#hikaricp}
-
-```java showLineNumbers
-// connection pooling won't help much in terms of performance,
-// because the underlying implementation has its own pool.
-// for example: HttpURLConnection has a pool for sockets
-HikariConfig poolConfig = new HikariConfig();
-poolConfig.setConnectionTimeout(5000L);
-poolConfig.setMaximumPoolSize(20);
-poolConfig.setMaxLifetime(300_000L);
-poolConfig.setDataSource(new ClickHouseDataSource(url, properties));
-
-try (HikariDataSource ds = new HikariDataSource(poolConfig);
- Connection conn = ds.getConnection();
- Statement s = conn.createStatement();
- ResultSet rs = s.executeQuery("SELECT * FROM system.numbers LIMIT 3")) {
- while (rs.next()) {
- // handle row
- log.info("Integer: {}, String: {}", rs.getInt(1), rs.getString(1));//Same column but different types
- }
-}
-```
-
-## More Information {#more-information}
-For more information, see our [GitHub repository](https://github.com/ClickHouse/clickhouse-java) and [Java Client documentation](/integrations/language-clients/java/client/client.mdx).
-
-## Troubleshooting {#troubleshooting}
-### Logging {#logging}
-The driver uses [slf4j](https://www.slf4j.org/) for logging, and will use the first available implementation on the `classpath`.
-
-### Resolving JDBC Timeout on Large Inserts {#resolving-jdbc-timeout-on-large-inserts}
-
-When performing large inserts in ClickHouse with long execution times, you may encounter JDBC timeout errors like:
-
-```plaintext
-Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443]
-```
-These errors can disrupt the data insertion process and affect system stability. To address this issue you may need to adjust a few timeout settings in the client's OS.
-
-#### Mac OS {#mac-os}
-
-On Mac OS, the following settings can be adjusted to resolve the issue:
-
-- `net.inet.tcp.keepidle`: 60000
-- `net.inet.tcp.keepintvl`: 45000
-- `net.inet.tcp.keepinit`: 45000
-- `net.inet.tcp.keepcnt`: 8
-- `net.inet.tcp.always_keepalive`: 1
-
-#### Linux {#linux}
-
-On Linux, the equivalent settings alone may not resolve the issue. Additional steps are required due to the differences in how Linux handles socket keep-alive settings. Follow these steps:
-
-1. Adjust the following Linux kernel parameters in `/etc/sysctl.conf` or a related configuration file:
-
- - `net.inet.tcp.keepidle`: 60000
- - `net.inet.tcp.keepintvl`: 45000
- - `net.inet.tcp.keepinit`: 45000
- - `net.inet.tcp.keepcnt`: 8
- - `net.inet.tcp.always_keepalive`: 1
- - `net.ipv4.tcp_keepalive_intvl`: 75
- - `net.ipv4.tcp_keepalive_probes`: 9
- - `net.ipv4.tcp_keepalive_time`: 60 (You may consider lowering this value from the default 300 seconds)
-
-2. After modifying the kernel parameters, apply the changes by running the following command:
-
-```shell
-sudo sysctl -p
-```
-
-After Setting those settings, you need to ensure that your client enables the Keep Alive option on the socket:
-
-```java
-properties.setProperty("socket_keepalive", "true");
-```
diff --git a/docs/integrations/language-clients/java/jdbc/jdbc.mdx b/docs/integrations/language-clients/java/jdbc/jdbc.mdx
index 8f783293f1b..995e06330aa 100644
--- a/docs/integrations/language-clients/java/jdbc/jdbc.mdx
+++ b/docs/integrations/language-clients/java/jdbc/jdbc.mdx
@@ -9,16 +9,640 @@ doc_type: 'reference'
---
import ClientVersionDropdown from '@theme/ClientVersionDropdown/ClientVersionDropdown';
-import v07 from './_snippets/_v0_7.mdx'
-import v08 from './_snippets/_v0_8.mdx'
+import Version from '@theme/ClientVersionDropdown/Version';
+import WideTableWrapper from '@site/src/components/WideTableWrapper/WideTableWrapper';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+]}>
+
+
+
+:::note
+`clickhouse-jdbc` implements the standard JDBC interface using the latest java client.
+We recommend using the latest java client directly if performance/direct access is critical.
+:::
+
+## Changes from 0.7.x {#changes-from-07x}
+In 0.8 we tried to make the driver more strictly follow the JDBC specification, so there are some removed features that may affect you:
+
+| Old Feature | Notes |
+|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Transaction Support | Early versions of the driver only **simulated** transaction support, which could have unexpected results. |
+| Response Column Renaming | `ResultSet` was mutable - for efficiency sake they're now read-only |
+| Multi-Statement SQL | Multi-statement support was only **simulated**, now it strictly follows 1:1 |
+| Named Parameters | Not part of the JDBC spec |
+| Stream-based `PreparedStatement` | Early version of the driver allowed for non-jdbc usage of `PreparedStatement` - if you desire such options, we recommend looking at the [Java Client](/integrations/language-clients/java/client/client.mdx) and its [examples](https://github.com/ClickHouse/clickhouse-java/tree/main/examples/client-v2). |
+
+:::note
+`Date` is stored without timezone, while `DateTime` is stored with timezone. This can lead to unexpected results if you're not careful.
+:::
+
+## Environment requirements {#environment-requirements}
+
+- [OpenJDK](https://openjdk.java.net) version >= 8
+
+### Setup {#setup}
+
+
+
+
+ ```xml
+
+
+ com.clickhouse
+ clickhouse-jdbc
+ 0.9.4
+ all
+
+ ```
+
+
+
+
+ ```kotlin
+ // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
+ implementation("com.clickhouse:clickhouse-jdbc:0.9.4:all")
+ ```
+
+
+
+ ```groovy
+ // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
+ implementation 'com.clickhouse:clickhouse-jdbc:0.9.4:all'
+ ```
+
+
+
+
+## Configuration {#configuration}
+
+**Driver Class**: `com.clickhouse.jdbc.ClickHouseDriver`
+
+**URL Syntax**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]`, for example:
+
+- `jdbc:clickhouse:http://localhost:8123`
+- `jdbc:clickhouse:https://localhost:8443?ssl=true`
+
+### Connection Properties
+
+Beyond standard JDBC properties, the driver supports the ClickHouse-specific properties offered by the underlying [java client](/integrations/language-clients/java/client#client-configuration).
+Where possible methods will return an `SQLFeatureNotSupportedException` if the feature is not supported. Other custom properties include:
+
+| Property | Default | Description |
+|----------------------------------|---------|----------------------------------------------------------------|
+| `disable_frameworks_detection` | `true` | Disable frameworks detection for User-Agent |
+| `jdbc_ignore_unsupported_values` | `false` | Suppresses `SQLFeatureNotSupportedException` |
+| `clickhouse.jdbc.v1` | `false` | Use older JDBC implementation instead of new JDBC |
+| `default_query_settings` | `null` | Allows passing of default query settings with query operations |
+| `jdbc_resultset_auto_close` | `true` | Automatically closes `ResultSet` when `Statement` is closed |
+| `beta.row_binary_for_simple_insert` | `false` | Use `PreparedStatement` implementation based on `RowBinary` writer. Works only for `INSERT INTO ... VALUES` queries. |
+
+:::note Server Settings
+
+All server settings should be prefixed with `clickhouse_setting_` (same as for the client [configuration](/integrations/language-clients/java/client#server-settings)).
+:::
+
+## Supported data types {#supported-data-types}
+
+JDBC Driver supports the same data formats as the underlying [java client](/integrations/language-clients/java/client/client.mdx).
+
+### Handling Dates, Times, and Timezones {#handling-dates-times-and-timezones}
+`java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp` can complicate how Timezones are calculated - though they're of course supported,
+you may want to consider using the [java.time](https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html) package. `ZonedDateTime` and
+`OffsetDateTime` are both great replacements for java.sql.Timestamp, java.sql.Date, and java.sql.Time.
+
+## Creating Connection {#creating-connection}
+
+```java
+String url = "jdbc:ch://my-server:8123/system";
+
+Properties properties = new Properties();
+DataSource dataSource = new DataSource(url, properties);//DataSource or DriverManager are the main entry points
+try (Connection conn = dataSource.getConnection()) {
+... // do something with the connection
+```
+
+## Supplying Credentials and Settings {#supplying-credentials-and-settings}
+
+```java showLineNumbers
+String url = "jdbc:ch://localhost:8123?jdbc_ignore_unsupported_values=true&socket_timeout=10";
+
+Properties info = new Properties();
+info.put("user", "default");
+info.put("password", "password");
+info.put("database", "some_db");
+
+//Creating a connection with DataSource
+DataSource dataSource = new DataSource(url, info);
+try (Connection conn = dataSource.getConnection()) {
+... // do something with the connection
+}
+
+//Alternate approach using the DriverManager
+try (Connection conn = DriverManager.getConnection(url, info)) {
+... // do something with the connection
+}
+```
+
+## Simple Statement {#simple-statement}
+
+```java showLineNumbers
+
+try (Connection conn = dataSource.getConnection(...);
+ Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select * from numbers(50000)");
+ while(rs.next()) {
+ // ...
+ }
+}
+```
+
+## Insert {#insert}
+
+```java showLineNumbers
+try (PreparedStatement ps = conn.prepareStatement("INSERT INTO mytable VALUES (?, ?)")) {
+ ps.setString(1, "test"); // id
+ ps.setObject(2, LocalDateTime.now()); // timestamp
+ ps.addBatch();
+ ...
+ ps.executeBatch(); // stream everything on-hand into ClickHouse
+}
+```
+
+## `HikariCP` {#hikaricp}
+
+```java showLineNumbers
+// connection pooling won't help much in terms of performance,
+// because the underlying implementation has its own pool.
+// for example: HttpURLConnection has a pool for sockets
+HikariConfig poolConfig = new HikariConfig();
+poolConfig.setConnectionTimeout(5000L);
+poolConfig.setMaximumPoolSize(20);
+poolConfig.setMaxLifetime(300_000L);
+poolConfig.setDataSource(new ClickHouseDataSource(url, properties));
+
+try (HikariDataSource ds = new HikariDataSource(poolConfig);
+ Connection conn = ds.getConnection();
+ Statement s = conn.createStatement();
+ ResultSet rs = s.executeQuery("SELECT * FROM system.numbers LIMIT 3")) {
+ while (rs.next()) {
+ // handle row
+ log.info("Integer: {}, String: {}", rs.getInt(1), rs.getString(1));//Same column but different types
+ }
+}
+```
+
+## More Information {#more-information}
+For more information, see our [GitHub repository](https://github.com/ClickHouse/clickhouse-java) and [Java Client documentation](/integrations/language-clients/java/client/client.mdx).
+
+## Troubleshooting {#troubleshooting}
+### Logging {#logging}
+The driver uses [slf4j](https://www.slf4j.org/) for logging, and will use the first available implementation on the `classpath`.
+
+### Resolving JDBC Timeout on Large Inserts {#resolving-jdbc-timeout-on-large-inserts}
+
+When performing large inserts in ClickHouse with long execution times, you may encounter JDBC timeout errors like:
+
+```plaintext
+Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443]
+```
+These errors can disrupt the data insertion process and affect system stability. To address this issue you may need to adjust a few timeout settings in the client's OS.
+
+#### Mac OS {#mac-os}
+
+On Mac OS, the following settings can be adjusted to resolve the issue:
+
+- `net.inet.tcp.keepidle`: 60000
+- `net.inet.tcp.keepintvl`: 45000
+- `net.inet.tcp.keepinit`: 45000
+- `net.inet.tcp.keepcnt`: 8
+- `net.inet.tcp.always_keepalive`: 1
+
+#### Linux {#linux}
+
+On Linux, the equivalent settings alone may not resolve the issue. Additional steps are required due to the differences in how Linux handles socket keep-alive settings. Follow these steps:
+
+1. Adjust the following Linux kernel parameters in `/etc/sysctl.conf` or a related configuration file:
+
+ - `net.inet.tcp.keepidle`: 60000
+ - `net.inet.tcp.keepintvl`: 45000
+ - `net.inet.tcp.keepinit`: 45000
+ - `net.inet.tcp.keepcnt`: 8
+ - `net.inet.tcp.always_keepalive`: 1
+ - `net.ipv4.tcp_keepalive_intvl`: 75
+ - `net.ipv4.tcp_keepalive_probes`: 9
+ - `net.ipv4.tcp_keepalive_time`: 60 (You may consider lowering this value from the default 300 seconds)
+
+2. After modifying the kernel parameters, apply the changes by running the following command:
+
+```shell
+sudo sysctl -p
+```
+
+After Setting those settings, you need to ensure that your client enables the Keep Alive option on the socket:
+
+```java
+properties.setProperty("socket_keepalive", "true");
+```
+
+
+
+
+
+
+`clickhouse-jdbc` implements the standard JDBC interface. Being built on top of [clickhouse-client](/integrations/sql-clients/sql-console), it provides additional features like custom type mapping, transaction support, and standard synchronous `UPDATE` and `DELETE` statements, etc., so that it can be easily used with legacy applications and tools.
+
+:::note
+Latest JDBC (0.7.2) version uses Client-V1
+:::
+
+`clickhouse-jdbc` API is synchronous, and generally, it has more overheads(e.g., SQL parsing and type mapping/conversion, etc.). Consider [clickhouse-client](/integrations/sql-clients/sql-console) when performance is critical or if you prefer a more direct way to access ClickHouse.
+
+## Environment requirements {#v07-environment-requirements}
+
+- [OpenJDK](https://openjdk.java.net) version >= 8
+
+### Setup {#v07-setup}
+
+
+
+
+ ```xml
+
+
+ com.clickhouse
+ clickhouse-jdbc
+ 0.7.2
+
+ shaded-all
+
+ ```
+
+
+
+
+ ```kotlin
+ // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
+ // use uber jar with all dependencies included, change classifier to http for smaller jar
+ implementation("com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all")
+ ```
+
+
+
+ ```groovy
+ // https://mvnrepository.com/artifact/com.clickhouse/clickhouse-jdbc
+ // use uber jar with all dependencies included, change classifier to http for smaller jar
+ implementation 'com.clickhouse:clickhouse-jdbc:0.7.2:shaded-all'
+ ```
+
+
+
+
+Since version `0.5.0`, we are using Apache HTTP Client that's packed the Client. Since there is not a shared version of the package, you need to add a logger as a dependency.
+
+
+
+
+ ```xml
+
+
+ org.slf4j
+ slf4j-api
+ 2.0.16
+
+ ```
+
+
+
+
+ ```kotlin
+ // https://mvnrepository.com/artifact/org.slf4j/slf4j-api
+ implementation("org.slf4j:slf4j-api:2.0.16")
+ ```
+
+
+
+ ```groovy
+ // https://mvnrepository.com/artifact/org.slf4j/slf4j-api
+ implementation 'org.slf4j:slf4j-api:2.0.16'
+ ```
+
+
+
+
+## Configuration {#v07-configuration}
+
+**Driver Class**: `com.clickhouse.jdbc.ClickHouseDriver`
+
+**URL Syntax**: `jdbc:(ch|clickhouse)[:]://endpoint1[,endpoint2,...][/][?param1=value1¶m2=value2][#tag1,tag2,...]`, for example:
+
+- `jdbc:ch://localhost` is same as `jdbc:clickhouse:http://localhost:8123`
+- `jdbc:ch:https://localhost` is same as `jdbc:clickhouse:http://localhost:8443?ssl=true&sslmode=STRICT`
+- `jdbc:ch:grpc://localhost` is same as `jdbc:clickhouse:grpc://localhost:9100`
+
+**Connection Properties**:
+
+| Property | Default | Description |
+| ------------------------ | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `continueBatchOnError` | `false` | Whether to continue batch processing when error occurred |
+| `createDatabaseIfNotExist` | `false` | Whether to create database if it does not exist |
+| `custom_http_headers` | | comma separated custom http headers, for example: `User-Agent=client1,X-Gateway-Id=123` |
+| `custom_http_params` | | comma separated custom http query parameters, for example: `extremes=0,max_result_rows=100` |
+| `nullAsDefault` | `0` | `0` - treat null value as is and throw exception when inserting null into non-nullable column; `1` - treat null value as is and disable null-check for inserting; `2` - replace null to default value of corresponding data type for both query and insert |
+| `jdbcCompliance` | `true` | Whether to support standard synchronous UPDATE/DELETE and fake transaction |
+| `typeMappings` | | Customize mapping between ClickHouse data type and Java class, which will affect result of both [`getColumnType()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSetMetaData.html#getColumnType-int-) and [`getObject(Class<>?>`)](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-java.lang.String-java.lang.Class-). For example: `UInt128=java.lang.String,UInt256=java.lang.String` |
+| `wrapperObject` | `false` | Whether [`getObject()`](https://docs.oracle.com/javase/8/docs/api/java/sql/ResultSet.html#getObject-int-) should return java.sql.Array / java.sql.Struct for Array / Tuple. |
+
+Note: please refer to [JDBC specific configuration](https://github.com/ClickHouse/clickhouse-java/blob/main/clickhouse-jdbc/src/main/java/com/clickhouse/jdbc/JdbcConfig.java) for more.
+
+## Supported data types {#v07-supported-data-types}
+
+JDBC Driver supports same data formats as client library does.
+
+:::note
+- AggregatedFunction - :warning: does not support `SELECT * FROM table ...`
+- Decimal - `SET output_format_decimal_trailing_zeros=1` in 21.9+ for consistency
+- Enum - can be treated as both string and integer
+- UInt64 - mapped to `long` (in client-v1)
+:::
+
+## Creating Connection {#v07-creating-connection}
+
+```java
+String url = "jdbc:ch://my-server/system"; // use http protocol and port 8123 by default
+
+Properties properties = new Properties();
+
+ClickHouseDataSource dataSource = new ClickHouseDataSource(url, properties);
+try (Connection conn = dataSource.getConnection("default", "password");
+ Statement stmt = conn.createStatement()) {
+}
+```
+
+## Simple Statement {#v07-simple-statement}
+
+```java showLineNumbers
+
+try (Connection conn = dataSource.getConnection(...);
+ Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select * from numbers(50000)");
+ while(rs.next()) {
+ // ...
+ }
+}
+```
+
+## Insert {#v07-insert}
+
+:::note
+- Use `PreparedStatement` instead of `Statement`
+:::
+
+It's easier to use but slower performance compare to input function (see below):
+
+```java showLineNumbers
+try (PreparedStatement ps = conn.prepareStatement("insert into mytable(* except (description))")) {
+ ps.setString(1, "test"); // id
+ ps.setObject(2, LocalDateTime.now()); // timestamp
+ ps.addBatch(); // parameters will be write into buffered stream immediately in binary format
+ ...
+ ps.executeBatch(); // stream everything on-hand into ClickHouse
+}
+```
+
+### With input table function {#with-input-table-function}
+
+An option with great performance characteristics:
+
+```java showLineNumbers
+try (PreparedStatement ps = conn.prepareStatement(
+ "insert into mytable select col1, col2 from input('col1 String, col2 DateTime64(3), col3 Int32')")) {
+ // The column definition will be parsed so the driver knows there are 3 parameters: col1, col2 and col3
+ ps.setString(1, "test"); // col1
+ ps.setObject(2, LocalDateTime.now()); // col2, setTimestamp is slow and not recommended
+ ps.setInt(3, 123); // col3
+ ps.addBatch(); // parameters will be write into buffered stream immediately in binary format
+ ...
+ ps.executeBatch(); // stream everything on-hand into ClickHouse
+}
+```
+- [input function doc](/sql-reference/table-functions/input/) whenever possible
+
+### Insert with placeholders {#insert-with-placeholders}
+
+This option is recommended only for small inserts because it would require a long SQL expression (that will be parsed on client side and it will consume CPU & Memory):
+
+```java showLineNumbers
+try (PreparedStatement ps = conn.prepareStatement("insert into mytable values(trim(?),?,?)")) {
+ ps.setString(1, "test"); // id
+ ps.setObject(2, LocalDateTime.now()); // timestamp
+ ps.setString(3, null); // description
+ ps.addBatch(); // append parameters to the query
+ ...
+ ps.executeBatch(); // issue the composed query: insert into mytable values(...)(...)...(...)
+}
+```
+
+## Handling DateTime and time zones {#handling-datetime-and-time-zones}
+
+Please to use `java.time.LocalDateTime` or `java.time.OffsetDateTime` instead of `java.sql.Timestamp`, and `java.time.LocalDate` instead of `java.sql.Date`.
+
+```java showLineNumbers
+try (PreparedStatement ps = conn.prepareStatement("select date_time from mytable where date_time > ?")) {
+ ps.setObject(2, LocalDateTime.now());
+ ResultSet rs = ps.executeQuery();
+ while(rs.next()) {
+ LocalDateTime dateTime = (LocalDateTime) rs.getObject(1);
+ }
+ ...
+}
+```
+
+## Handling `AggregateFunction` {#handling-aggregatefunction}
+
+:::note
+As of now, only `groupBitmap` is supported.
+:::
+
+```java showLineNumbers
+// batch insert using input function
+try (ClickHouseConnection conn = newConnection(props);
+ Statement s = conn.createStatement();
+ PreparedStatement stmt = conn.prepareStatement(
+ "insert into test_batch_input select id, name, value from input('id Int32, name Nullable(String), desc Nullable(String), value AggregateFunction(groupBitmap, UInt32)')")) {
+ s.execute("drop table if exists test_batch_input;"
+ + "create table test_batch_input(id Int32, name Nullable(String), value AggregateFunction(groupBitmap, UInt32))engine=Memory");
+ Object[][] objs = new Object[][] {
+ new Object[] { 1, "a", "aaaaa", ClickHouseBitmap.wrap(1, 2, 3, 4, 5) },
+ new Object[] { 2, "b", null, ClickHouseBitmap.wrap(6, 7, 8, 9, 10) },
+ new Object[] { 3, null, "33333", ClickHouseBitmap.wrap(11, 12, 13) }
+ };
+ for (Object[] v : objs) {
+ stmt.setInt(1, (int) v[0]);
+ stmt.setString(2, (String) v[1]);
+ stmt.setString(3, (String) v[2]);
+ stmt.setObject(4, v[3]);
+ stmt.addBatch();
+ }
+ int[] results = stmt.executeBatch();
+ ...
+}
+
+// use bitmap as query parameter
+try (PreparedStatement stmt = conn.prepareStatement(
+ "SELECT bitmapContains(my_bitmap, toUInt32(1)) as v1, bitmapContains(my_bitmap, toUInt32(2)) as v2 from {tt 'ext_table'}")) {
+ stmt.setObject(1, ClickHouseExternalTable.builder().name("ext_table")
+ .columns("my_bitmap AggregateFunction(groupBitmap,UInt32)").format(ClickHouseFormat.RowBinary)
+ .content(new ByteArrayInputStream(ClickHouseBitmap.wrap(1, 3, 5).toBytes()))
+ .asTempTable()
+ .build());
+ ResultSet rs = stmt.executeQuery();
+ Assert.assertTrue(rs.next());
+ Assert.assertEquals(rs.getInt(1), 1);
+ Assert.assertEquals(rs.getInt(2), 0);
+ Assert.assertFalse(rs.next());
+}
+```
+
+
+
+## Configuring HTTP library {#v07-configuring-http-library}
+
+The ClickHouse JDBC connector supports three HTTP libraries: [`HttpClient`](https://docs.oracle.com/en/java/javase/11/docs/api/java.net.http/java/net/http/HttpClient.html), [`HttpURLConnection`](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/HttpURLConnection.html), and [Apache `HttpClient`](https://hc.apache.org/httpcomponents-client-5.2.x/).
+
+:::note
+`HttpClient` is only supported in JDK 11 or above.
+:::
+
+The JDBC driver uses `HttpClient` by default. You can change the HTTP library used by the ClickHouse JDBC connector by setting the following property:
+
+```java
+properties.setProperty("http_connection_provider", "APACHE_HTTP_CLIENT");
+```
+
+Here is a full list of the corresponding values:
+
+| Property Value | HTTP Library |
+|---------------------|---------------------|
+| HTTP_CLIENT | `HttpClient` |
+| HTTP_URL_CONNECTION | `HttpURLConnection` |
+| APACHE_HTTP_CLIENT | Apache `HttpClient` |
+
+
+
+## Connect to ClickHouse with SSL {#connect-to-clickhouse-with-ssl}
+
+To establish a secure JDBC connection to ClickHouse using SSL, you need to configure your JDBC properties to include SSL parameters. This typically involves specifying SSL properties such as `sslmode` and `sslrootcert` in your JDBC URL or Properties object.
+
+## SSL Properties {#ssl-properties}
+
+| Name | Default Value | Optional Values | Description |
+| ------------------ | ------------- | --------------- |----------------------------------------------------------------------------------|
+| `ssl` | false | true, false | Whether to enable SSL/TLS for the connection |
+| `sslmode` | strict | strict, none | Whether to verify SSL/TLS certificate |
+| `sslrootcert` | | | Path to SSL/TLS root certificates |
+| `sslcert` | | | Path to SSL/TLS certificate |
+| `sslkey` | | | RSA key in PKCS#8 format |
+| `key_store_type` | | JKS, PKCS12 | Specifies the type or format of the `KeyStore`/`TrustStore` file |
+| `trust_store` | | | Path to the `TrustStore` file |
+| `key_store_password` | | | Password needed to access the `KeyStore` file specified in the `KeyStore` config |
+
+These properties ensure that your Java application communicates with the ClickHouse server over an encrypted connection, enhancing data security during transmission.
+
+```java showLineNumbers
+ String url = "jdbc:ch://your-server:8443/system";
+
+ Properties properties = new Properties();
+ properties.setProperty("ssl", "true");
+ properties.setProperty("sslmode", "strict"); // NONE to trust all servers; STRICT for trusted only
+ properties.setProperty("sslrootcert", "/mine.crt");
+ try (Connection con = DriverManager
+ .getConnection(url, properties)) {
+
+ try (PreparedStatement stmt = con.prepareStatement(
+
+ // place your code here
+
+ }
+ }
+```
+
+## Resolving JDBC Timeout on Large Inserts {#v07-resolving-jdbc-timeout-on-large-inserts}
+
+When performing large inserts in ClickHouse with long execution times, you may encounter JDBC timeout errors like:
+
+```plaintext
+Caused by: java.sql.SQLException: Read timed out, server myHostname [uri=https://hostname.aws.clickhouse.cloud:8443]
+```
+
+These errors can disrupt the data insertion process and affect system stability. To address this issue you need to adjust a few timeout settings in the client's OS.
+
+### Mac OS {#v07-mac-os}
+
+On Mac OS, the following settings can be adjusted to resolve the issue:
+
+- `net.inet.tcp.keepidle`: 60000
+- `net.inet.tcp.keepintvl`: 45000
+- `net.inet.tcp.keepinit`: 45000
+- `net.inet.tcp.keepcnt`: 8
+- `net.inet.tcp.always_keepalive`: 1
+
+### Linux {#v07-linux}
+
+On Linux, the equivalent settings alone may not resolve the issue. Additional steps are required due to the differences in how Linux handles socket keep-alive settings. Follow these steps:
+
+1. Adjust the following Linux kernel parameters in `/etc/sysctl.conf` or a related configuration file:
+
+- `net.inet.tcp.keepidle`: 60000
+- `net.inet.tcp.keepintvl`: 45000
+- `net.inet.tcp.keepinit`: 45000
+- `net.inet.tcp.keepcnt`: 8
+- `net.inet.tcp.always_keepalive`: 1
+- `net.ipv4.tcp_keepalive_intvl`: 75
+- `net.ipv4.tcp_keepalive_probes`: 9
+- `net.ipv4.tcp_keepalive_time`: 60 (You may consider lowering this value from the default 300 seconds)
+
+2. After modifying the kernel parameters, apply the changes by running the following command:
+
+```shell
+sudo sysctl -p
+ ```
+
+After Setting those settings, you need to ensure that your client enables the Keep Alive option on the socket:
+
+```java
+properties.setProperty("socket_keepalive", "true");
+```
+
+:::note
+Currently, you must use Apache HTTP Client library when setting the socket keep-alive, as the other two HTTP client libraries supported by `clickhouse-java` do not allow setting socket options. For a detailed guide, see [Configuring HTTP library](#v07-configuring-http-library).
+:::
+
+Alternatively, you can add equivalent parameters to the JDBC URL.
+
+The default socket and connection timeout for the JDBC driver is 30 seconds. The timeout can be increased to support large data insert operations. Use the `options` method on `ClickHouseClient` together with the `SOCKET_TIMEOUT` and `CONNECTION_TIMEOUT` options as defined by `ClickHouseClientOption`:
+
+```java showLineNumbers
+final int MS_12H = 12 * 60 * 60 * 1000; // 12 h in ms
+final String sql = "insert into table_a (c1, c2, c3) select c1, c2, c3 from table_b;";
+
+try (ClickHouseClient client = ClickHouseClient.newInstance(ClickHouseProtocol.HTTP)) {
+ client.read(servers).write()
+ .option(ClickHouseClientOption.SOCKET_TIMEOUT, MS_12H)
+ .option(ClickHouseClientOption.CONNECTION_TIMEOUT, MS_12H)
+ .query(sql)
+ .executeAndWait();
+}
+```
+
+
+
+
diff --git a/package.json b/package.json
index 5a410f750d6..aae03c415e8 100644
--- a/package.json
+++ b/package.json
@@ -111,4 +111,4 @@
"resolutions": {
"form-data": "^4.0.4"
}
-}
+}
\ No newline at end of file
diff --git a/sidebars.js b/sidebars.js
index b299f5ea57c..46b4171eb5c 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -176,7 +176,7 @@ const sidebars = {
}
]
},
- {
+ {
type: "category",
label: "Data lake",
collapsed: true,