integration working in akash's system

This commit is contained in:
Akash Sivakumar 2024-11-22 20:26:57 -07:00
parent e65cf00d34
commit 5b1add9241
5 changed files with 244 additions and 2 deletions

View File

@ -0,0 +1,61 @@
<!-- This file was generated automatically.
Do not edit it: it is likely to be discarded and generated again before it's read next time.
Files used to generate this file:
/etc/clickhouse-server/config.xml
/etc/clickhouse-server/config.d/docker_related_config.xml -->
<yandex>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
<size>1000M</size>
<count>3</count>
</logger>
<listen_host>::</listen_host>
<path>/var/lib/clickhouse/data/</path>
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>30000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>clickhouse-keeper1</hostname>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>clickhouse-keeper2</hostname>
<port>9234</port>
</server>
<server>
<id>3</id>
<hostname>clickhouse-keeper3</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
<!--
<logger>
<console>1</console>
</logger>
-->
</yandex>

View File

@ -0,0 +1,156 @@
services:
clickhouse-keeper1:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-keeper1
command: >
/usr/bin/clickhouse-keeper --config-file=/etc/clickhouse-server/config.xml
volumes:
- ./clickhouse_keeper/keeper1-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse_data/data:/var/lib/clickhouse/data
- ./clickhouse_data/tmp:/var/lib/clickhouse/tmp
- ./clickhouse_data/user_files:/var/lib/clickhouse/user_files
- ./clickhouse_data/format_schemas:/var/lib/clickhouse/format_schemas
networks:
common-network:
aliases:
- clickhouse-keeper1
clickhouse-keeper2:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-keeper2
command: >
/usr/bin/clickhouse-keeper --config-file=/etc/clickhouse-server/config.xml
volumes:
- ./clickhouse_keeper/keeper2-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse_data/data:/var/lib/clickhouse/data
- ./clickhouse_data/tmp:/var/lib/clickhouse/tmp
- ./clickhouse_data/user_files:/var/lib/clickhouse/user_files
- ./clickhouse_data/format_schemas:/var/lib/clickhouse/format_schemas
networks:
common-network:
aliases:
- clickhouse-keeper2
clickhouse-keeper3:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-keeper3
command: >
/usr/bin/clickhouse-keeper --config-file=/etc/clickhouse-server/config.xml
volumes:
- ./clickhouse_keeper/keeper3-config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse_data/data:/var/lib/clickhouse/data
- ./clickhouse_data/tmp:/var/lib/clickhouse/tmp
- ./clickhouse_data/user_files:/var/lib/clickhouse/user_files
- ./clickhouse_data/format_schemas:/var/lib/clickhouse/format_schemas
networks:
common-network:
aliases:
- clickhouse-keeper3
clickhouse-server1:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-server1
volumes:
- ./node1-config/:/etc/clickhouse-server/config.d/
- clickhouse_data1:/var/lib/clickhouse
networks:
common-network:
aliases:
- clickhouse-server1
depends_on:
- clickhouse-keeper1
- clickhouse-keeper2
- clickhouse-keeper3
ports:
- "9001:9000" # Native client port
- "8123:8123" # HTTP interface
clickhouse-server2:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-server2
volumes:
- ./node2-config/:/etc/clickhouse-server/config.d/
- clickhouse_data2:/var/lib/clickhouse
networks:
common-network:
aliases:
- clickhouse-server2
depends_on:
- clickhouse-keeper1
- clickhouse-keeper2
- clickhouse-keeper3
ports:
- "9002:9000" # Native client port
- "8125:8123" # HTTP interface
zookeeper:
image: confluentinc/cp-zookeeper:latest
networks:
common-network:
aliases:
- zookeeper
deploy:
replicas: 1
restart_policy:
condition: on-failure
environment:
ZOOKEEPER_CLIENT_PORT: 2182
ports:
- "2182:2181"
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2182
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT
KAFKA_BROKER_ID: 1
KAFKA_MESSAGE_MAX_BYTES: 200000000
KAFKA_REPLICA_FETCH_MAX_BYTES: 200000000
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
networks:
common-network:
aliases:
- kafka
ports:
- "9092:9092"
volumes:
- kafka_data_new:/var/lib/kafka/data
deploy:
replicas: 1
restart_policy:
condition: on-failure
pcap_streamer:
image: levenshtein/streamer_test4:latest
depends_on:
- kafka
networks:
common-network:
aliases:
- pcap_streamer
volumes:
- "/host_mnt/c/Users/akash/storage/Asu/sem3/dds/project:/data/pcap"
environment:
PCAP_FILE: /data/pcap/202310081400.pcap
command: ["sh", "-c", "sleep 30 && python /app/pcap_processor.py -f /data/pcap/202310081400.pcap -s --stream_size 1000"]
deploy:
replicas: 1
restart_policy:
condition: on-failure
networks:
common-network:
driver: overlay
attachable: true
volumes:
clickhouse_data1:
driver: local
clickhouse_data2:
driver: local
kafka_data_new:
driver: local

View File

@ -0,0 +1,25 @@
from kafka import KafkaConsumer
import json
KAFKA_TOPIC = 'pcap_stream_new' # Use the topic name from your producer
KAFKA_SERVER = 'localhost:9092' # Ensure this matches your Kafka server configuration
# Create a Kafka consumer
consumer = KafkaConsumer(
KAFKA_TOPIC,
bootstrap_servers=[KAFKA_SERVER],
value_deserializer=lambda x: x.decode('utf-8'),#json.loads(x.decode('utf-8')),
auto_offset_reset='earliest', # Ensures it starts reading from the beginning
enable_auto_commit=True
)
print("Consuming messages from topic:", KAFKA_TOPIC)
# Consume and print messages
for message in consumer:
print(type(message))

View File

@ -21,7 +21,7 @@ class KafkaClient:
self.client = KafkaProducer(
bootstrap_servers=['kafka:9092'],
max_request_size = 200000000,
api_version=(0,11,5),
#api_version=(0,11,5),
value_serializer=lambda x: json.dumps(x).encode('utf-8'))
elif mode == 'consumer' and topic_name is not None:
self.client = KafkaConsumer(
@ -33,7 +33,7 @@ class KafkaClient:
raise ValueError("Consumer mode requires a topic_name")
# Kafka Configuration
KAFKA_TOPIC = 'pcap_stream'
KAFKA_TOPIC = 'pcap_stream_new'
KAFKA_SERVER = 'kafka:9092' # Adjust to your Kafka server
#KAFKA_SERVER = 'kafka_service:9092'