-
Hi all: env {
spark.app.name = "seatunnel_2023-02-01"
spark.driver.cores = 2
spark.rpc.message.maxSize = 2047
spark.executor.instances = 200
spark.driver.maxResultSize="5g"
spark.driver.memory = "20g"
spark.executor.cores = 5
spark.executor.memory = "10g"
spark.sql.catalogImplementation = "hive"
}
source {
hive {
pre_sql = "select .... from A'"
parallelism = 6
table_name = "A"
metastore_uri = "xxxxx"
result_table_name = "result_table_A"
}
hive {
pre_sql = "select .... from B"
parallelism = 6
table_name = "B"
metastore_uri = "xxxxx"
result_table_name = "result_table_B"
}
transform {}
sink {
clickhouse {
host = "1xxxx"
database = "xx"
table = "sink_table"
fields = [....]
username = "default"
password = "default"
bulk_size = 4000000
clickhouse.socket_timeout = 300000
clickhouse.connection_timeout = 300000
clickhouse.dataTransferTimeout = 300000
retry = 3
}
} I don't know why. Could someone help me ? |
Beta Was this translation helpful? Give feedback.
Answered by
lingtaolf
Nov 27, 2023
Replies: 1 comment
-
Solved:
|
Beta Was this translation helpful? Give feedback.
0 replies
Answer selected by
lingtaolf
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Solved: