check chkeeper health manually because swarm doesn't support depends_on-healthy_condition

This commit is contained in:
2024-11-28 22:15:27 -07:00
parent 3f15f225f4
commit 030659c3e1
10 changed files with 88 additions and 2 deletions

View File

@@ -0,0 +1,29 @@
#!/bin/bash
set -e
keeper_hostnames=(
"clickhouse-keeper1"
"clickhouse-keeper2"
"clickhouse-keeper3"
)
keeper_healthy=(false false false)
can_proceed=false
while ! $can_proceed ; do
for keeper_idx in "${!keeper_hostnames[@]}"; do
if wget -q --tries=1 --spider "http://${keeper_hostnames[$keeper_idx]}:9182/ready" ; then
echo "keeper healthy"
keeper_healthy[$keeper_idx]=true
fi
done
can_proceed=true
for keeper_idx in "${!keeper_hostnames[@]}"; do
if ! ${keeper_healthy[$keeper_idx]} ; then
can_proceed=false
sleep 5
break
fi
done
done

View File

@@ -0,0 +1,28 @@
-- local table creation
CREATE TABLE traffic_records (
time_stamp DateTime64 (6, 'Japan') CODEC (Delta, ZSTD),
l4_protocol Enum8 ('TCP' = 1, 'UDP' = 2),
src_ip IPv4,
dst_ip IPv4,
src_port UInt16 CODEC (ZSTD),
dst_port UInt16 CODEC (ZSTD),
pkt_len UInt16 CODEC (ZSTD),
INDEX port_idx src_port TYPE bloom_filter GRANULARITY 10
) ENGINE = ReplicatedMergeTree(
'/clickhouse/tables/{shard}/traffic_records',
'{replica}'
)
ORDER BY time_stamp
TTL toDateTime(time_stamp) + INTERVAL 15 DAY TO VOLUME 'cold_vol'
SETTINGS storage_policy = 'hot_cold';
CREATE TABLE ip_region_map (
ip_range_start IPv4,
ip_range_end IPv4,
region LowCardinality(String),
INDEX region_idx region TYPE bloom_filter
) ENGINE = ReplicatedMergeTree(
'/clickhouse/tables/{shard}/ip_region_map',
'{replica}'
)
ORDER BY ip_range_start;

View File

@@ -0,0 +1,29 @@
#!/bin/bash
set -e
keeper_hostnames=(
"clickhouse-keeper1"
"clickhouse-keeper2"
"clickhouse-keeper3"
)
keeper_healthy=(false false false)
can_proceed=false
while ! $can_proceed ; do
for keeper_idx in "${!keeper_hostnames[@]}"; do
if wget -q --tries=1 --spider "http://${keeper_hostnames[$keeper_idx]}:9182/ready" ; then
echo "keeper healthy"
keeper_healthy[$keeper_idx]=true
fi
done
can_proceed=true
for keeper_idx in "${!keeper_hostnames[@]}"; do
if ! ${keeper_healthy[$keeper_idx]} ; then
can_proceed=false
sleep 5
break
fi
done
done

View File

@@ -0,0 +1,28 @@
-- local table creation
CREATE TABLE traffic_records (
time_stamp DateTime64 (6, 'Japan') CODEC (Delta, ZSTD),
l4_protocol Enum8 ('TCP' = 1, 'UDP' = 2),
src_ip IPv4,
dst_ip IPv4,
src_port UInt16 CODEC (ZSTD),
dst_port UInt16 CODEC (ZSTD),
pkt_len UInt16 CODEC (ZSTD),
INDEX port_idx src_port TYPE bloom_filter GRANULARITY 10
) ENGINE = ReplicatedMergeTree(
'/clickhouse/tables/{shard}/traffic_records',
'{replica}'
)
ORDER BY time_stamp
TTL toDateTime(time_stamp) + INTERVAL 15 DAY TO VOLUME 'cold_vol'
SETTINGS storage_policy = 'hot_cold';
CREATE TABLE ip_region_map (
ip_range_start IPv4,
ip_range_end IPv4,
region LowCardinality(String),
INDEX region_idx region TYPE bloom_filter
) ENGINE = ReplicatedMergeTree(
'/clickhouse/tables/{shard}/ip_region_map',
'{replica}'
)
ORDER BY ip_range_start;

View File

@@ -0,0 +1,3 @@
CREATE TABLE traffic_records_all
AS traffic_records
ENGINE = Distributed ('{cluster}', 'default', 'traffic_records');

View File

@@ -0,0 +1,23 @@
CREATE TABLE traffic_records_kafka_queue (
time Float64,
l4_proto String,
src_addr String,
dst_addr String,
src_port UInt16,
dst_port UInt16,
pkt_len UInt32
) ENGINE = Kafka() SETTINGS kafka_broker_list = 'kafka:9092',
kafka_topic_list = 'traffic_records_stream',
kafka_group_name = 'clickhouse_consumer',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 1;
CREATE MATERIALIZED VIEW traffic_records_kafka_view TO traffic_records_all AS
SELECT time AS time_stamp,
l4_proto AS l4_protocol,
src_addr AS src_ip,
dst_addr AS dst_ip,
src_port,
dst_port,
pkt_len
FROM traffic_records_kafka_queue;