Skip to content

Commit a44a31f

Browse files
author
ci bot
committed
Merge branch 'docker-compose-enterprise' into 'enterprise'
feat: Deploy using docker compose (PR 2/2) See merge request dkinternal/observability/dataops-observability!57
2 parents 948da92 + deeb35e commit a44a31f

8 files changed

Lines changed: 266 additions & 33 deletions

File tree

cli/entry_points/init.py

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,13 @@
99
from common.auth.keys.lib import hash_value
1010
from common.auth.keys.service_key import generate_key
1111
from common.entities import DB, Action, AuthProvider, Company, Organization, Project, User
12+
from common.kafka import (
13+
TOPIC_IDENTIFIED_EVENTS,
14+
TOPIC_UNIDENTIFIED_EVENTS,
15+
TOPIC_SCHEDULED_EVENTS,
16+
TOPIC_DEAD_LETTER_OFFICE,
17+
)
18+
from common.kafka.admin import create_topics
1219
from common.model import create_all_tables
1320

1421
USER_FIELDS = ["name", "email", "username", "password"]
@@ -89,10 +96,19 @@ def args(parser: ArgumentParser) -> None:
8996
action="store_true",
9097
help="Outputs the generated IDs in JSON format when successful",
9198
)
99+
parser.add_argument(
100+
"-k",
101+
"--topics",
102+
action="store_true",
103+
help="Create the Kafka Topics",
104+
)
92105

93106
def subcmd_entry_point(self) -> None:
94107
try:
108+
if not any([self.kwargs.get(arg) for arg in ("data", "demo", "tables", "topics")]):
109+
raise OperationAborted("Either --data or --demo or --tables or --topics has to be set.")
95110
self.initialize_database()
111+
self.create_kafka_topics()
96112
except OperationAborted as e:
97113
LOG.info("Operation #y<ABORTED>: %s", e)
98114
sys.exit(1)
@@ -102,10 +118,14 @@ def subcmd_entry_point(self) -> None:
102118
else:
103119
LOG.info("Operation #g<SUCCEEDED>")
104120

105-
def initialize_database(self) -> None:
106-
if not (self.kwargs.get("tables") or self.kwargs.get("data") or self.kwargs.get("demo")):
107-
raise OperationAborted("Either --data or --demo or --tables has to be set.")
121+
def create_kafka_topics(self) -> None:
122+
if self.kwargs.get("topics"):
123+
LOG.info("#c<Creating Kafka topics...>")
124+
create_topics(
125+
[TOPIC_IDENTIFIED_EVENTS, TOPIC_UNIDENTIFIED_EVENTS, TOPIC_SCHEDULED_EVENTS, TOPIC_DEAD_LETTER_OFFICE]
126+
)
108127

128+
def initialize_database(self) -> None:
109129
if self.kwargs.get("tables"):
110130
if self.kwargs.get("force"):
111131
raise OperationAborted("The --force option can not be used when creating the tables.")

cli/tests/entry_points/test_init.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,12 @@ def create_tables_mock():
1515
yield create_tables_mock
1616

1717

18+
@pytest.fixture
19+
def create_topics_mock():
20+
with patch("cli.entry_points.init.create_topics") as mock:
21+
yield mock
22+
23+
1824
@pytest.fixture
1925
def init_entry_point(test_db, create_tables_mock):
2026
yield Initialize(default=True)
@@ -124,6 +130,16 @@ def test_init_not_empty(arg, init_entry_point, create_tables_mock, mock_user_inp
124130
assert ServiceAccountKey.select().count() == demo_data_count
125131

126132

133+
@pytest.mark.integration
134+
def test_init_topics(init_entry_point, create_topics_mock):
135+
with (
136+
patch.dict(init_entry_point.kwargs, {"topics": True}),
137+
):
138+
init_entry_point.subcmd_entry_point()
139+
140+
create_topics_mock.assert_called_once()
141+
142+
127143
@pytest.mark.integration
128144
def test_init_error(init_entry_point, mock_user_input):
129145
DB.create_tables(ALL_MODELS)

common/kafka/admin.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import logging
2+
3+
from confluent_kafka.admin import AdminClient, NewTopic
4+
from confluent_kafka import KafkaException, KafkaError
5+
6+
from common.kafka.topic import Topic
7+
from conf import settings
8+
9+
LOG = logging.getLogger(__name__)
10+
11+
12+
def create_topics(topics: list[Topic], num_partitions: int = 1, replication_factor: int = 1) -> None:
13+
client = AdminClient(settings.KAFKA_CONNECTION_PARAMS)
14+
new_topics = [
15+
NewTopic(topic.name, num_partitions=num_partitions, replication_factor=replication_factor) for topic in topics
16+
]
17+
failed_topics = []
18+
for topic_name, future in client.create_topics(new_topics).items():
19+
try:
20+
future.result()
21+
except KafkaException as e:
22+
if e.args[0] != KafkaError.TOPIC_ALREADY_EXISTS:
23+
failed_topics.append(topic_name)
24+
except Exception:
25+
LOG.exception("Error creating %s Kafka topic", topic_name)
26+
failed_topics.append(topic_name)
27+
28+
if failed_topics:
29+
raise Exception(f"Creating the topics {", ".join(failed_topics)} failed.")
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
from unittest.mock import patch, Mock
2+
3+
import pytest
4+
from confluent_kafka.cimpl import NewTopic, KafkaException, KafkaError
5+
6+
from common.kafka.admin import create_topics
7+
from common.kafka.topic import Topic
8+
9+
10+
@pytest.fixture
11+
def client_class_mock():
12+
with patch("common.kafka.admin.AdminClient") as mock:
13+
yield mock
14+
15+
16+
@pytest.fixture
17+
def client_mock(client_class_mock):
18+
mock = Mock()
19+
client_class_mock.return_value = mock
20+
yield mock
21+
22+
23+
@pytest.fixture
24+
def create_topics_mock(client_mock):
25+
yield client_mock.create_topics
26+
27+
28+
@pytest.fixture
29+
def result_mock(create_topics_mock):
30+
future_mock = Mock()
31+
result_mock = future_mock.result
32+
create_topics_mock.return_value = {"TEST_TOPIC": future_mock}
33+
yield result_mock
34+
35+
36+
@pytest.fixture
37+
def topic():
38+
return Topic(name="TEST_TOPIC")
39+
40+
41+
@pytest.mark.unit
42+
@pytest.mark.parametrize("result_side_effect", (lambda: None, KafkaException(KafkaError.TOPIC_ALREADY_EXISTS)))
43+
def test_create_topics(result_side_effect, client_class_mock, create_topics_mock, result_mock, topic):
44+
result_mock.side_effect = result_side_effect
45+
46+
create_topics([topic], num_partitions=5, replication_factor=5)
47+
48+
client_class_mock.assert_called_once()
49+
create_topics_mock.assert_called_once_with([NewTopic("TEST_TOPIC", num_partitions=5, replication_factor=5)])
50+
result_mock.assert_called_once()
51+
52+
53+
@pytest.mark.unit
54+
def test_create_topics_fail(client_class_mock, create_topics_mock, result_mock, topic):
55+
result_mock.side_effect = KafkaException(KafkaError.MEMBER_ID_REQUIRED)
56+
57+
with pytest.raises(Exception):
58+
create_topics([topic], num_partitions=5, replication_factor=5)
59+
60+
client_class_mock.assert_called_once()
61+
create_topics_mock.assert_called_once_with([NewTopic("TEST_TOPIC", num_partitions=5, replication_factor=5)])
62+
result_mock.assert_called_once()

deploy/docker-compose/README.md

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# DataOps Observability Docker Compose
2+
3+
## Overview
4+
5+
This document describes how to use Docker Compose to deploy Observability. The provided compose
6+
file can be used without modifications to deploy to a local, trial docker environment, or can
7+
be modified to guide a cloud-based deployment.
8+
9+
> [!IMPORTANT]
10+
> For your convenience, the provided compose file has the services (database and message broker)
11+
> connectivity configuration at their most simplistic form, which is not secure for a production deployment.
12+
13+
## Local deployment
14+
15+
The following command will start the platform, using its default configurations. All the commands
16+
mentioned in this document assumes Docker is installed and the `compose.yaml` and `config.env` files are within the
17+
current folder
18+
19+
```shell
20+
docker compose up -d
21+
```
22+
23+
When the command returns successfully, it means that the platform in running in the background, and you can proceed to
24+
seeding the minimal data it needs, which includes creating a user account. The following command will prompt you
25+
for the user account information.
26+
27+
```shell
28+
docker compose run --rm -it observability_data_init init --data
29+
```
30+
31+
Once that is finished, you can access the platform using the credentials you created at http://localhost:8082/
32+
33+
The data initialization also provides some information that you must have available if you are planning to
34+
interact with the platform using its integration APIs. Store this information somewhere, if that is the case. This
35+
is an example of what this information will look like:
36+
37+
```
38+
INFO: company_id: 6a6e5aa0-76cb-45d9-b1f5-cad3ebaf1cb4
39+
INFO: organization_id: 6cfa3063-5220-4061-8004-df5b32cd54a2
40+
INFO: project_id: b85ebf07-fa4f-4cc8-9362-d78ec800e2c0
41+
INFO: user_id: f5c30e55-d2e2-4b1f-baec-9901f56060e0
42+
```
43+
44+
Your DataOps Observability platform is deployed and ready.
45+
46+
Once you are done, you can stop the platform without losing your data, by issuing the command below. You
47+
can start and stop it at any time.
48+
49+
```shell
50+
docker compose down
51+
```
52+
53+
## Configurations
54+
55+
The Docker Compose file allows you to configure certain aspects of the deployment. The `config.env` file serves as a
56+
reference of which environment variables are considered, and can be edited as needed. In that case, most of the
57+
"docker compose" commands will need an argument pointing to this file to be included, as the example below.
58+
59+
```shell
60+
docker compose --env-file=conig.env up -d
61+
```
62+
63+
## Upgrading
64+
65+
Upgrading Observability to its latest version only requires the new images to be pulled, and starting fresh containers
66+
running the updated images. The following command pulls the images. Note the `--policy=always` argument. It forces
67+
pulling even when the images are already present.
68+
69+
```shell
70+
docker compose pull --policy=always
71+
```
72+
73+
Next, you will need to restart the application containers, which can be achieved with the same installation command.
74+
The database migrations, if any, will automatically run during the restart. It can be executed whether the
75+
application is running or not.
76+
77+
```
78+
docker compose up -d
79+
```

deploy/docker-compose/compose.yaml

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1+
name: dataops-observability
12

23
x-database-config: &database_config
34
MYSQL_USER: ${MYSQL_USER:-observability}
4-
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
5+
MYSQL_PASSWORD: ${MYSQL_PASSWORD:-193d036391fa7f14e3c48cabca7e47d0}
56

67
x-database-client-config: &database_client_config
78
MYSQL_SERVICE_HOST: ${MYSQL_HOST:-database}
@@ -28,10 +29,12 @@ services:
2829
# Transactions
2930
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
3031
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
32+
# Topics
33+
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
3134
# Other
3235
KAFKA_LOG4J_LOGGERS: "kafka=WARN,org.apache.kafka=WARN"
3336
volumes:
34-
- /var/lib/kafka/data
37+
- kafka_data:/var/lib/kafka/data
3538
healthcheck:
3639
test: [ "CMD", "/opt/kafka/bin/kafka-topics.sh", "--bootstrap-server", "localhost:9092", "--list" ]
3740
interval: 10s
@@ -44,18 +47,20 @@ services:
4447
restart: always
4548
expose: ["3306"]
4649
environment:
47-
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
50+
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD:-1824f41cee3033a881e1888a8fea24a3}
4851
MYSQL_DATABASE: datakitchen
4952
<<: *database_config
53+
volumes:
54+
- mysql_data:/var/lib/mysql
55+
5056
healthcheck:
5157
test: [ "CMD", "mysqladmin", "ping", "-h", "localhost" ]
5258
interval: 5s
5359
retries: 5
5460

5561
observability_data_init:
5662
container_name: data-init
57-
image: datakitchen/dataops-observability-be
58-
pull_policy: never
63+
image: ${DK_OBSERVABILITY_BE_IMAGE:-datakitchen/dataops-observability-be:v2}
5964
restart: on-failure
6065
depends_on:
6166
database:
@@ -65,12 +70,12 @@ services:
6570
environment:
6671
OBSERVABILITY_CONFIG: minikube
6772
<<: [*database_config, *database_client_config]
68-
entrypoint: /bin/sh -c '/dk/bin/cli migrate'
73+
entrypoint: /dk/bin/cli
74+
command: migrate
6975

7076
observability_backend:
7177
container_name: back-end
72-
image: datakitchen/dataops-observability-be
73-
pull_policy: never
78+
image: ${DK_OBSERVABILITY_BE_IMAGE:-datakitchen/dataops-observability-be:v2}
7479
restart: always
7580
depends_on:
7681
observability_data_init:
@@ -85,30 +90,31 @@ services:
8590
test: [ "CMD", "/bin/sh", "-c", "supervisorctl -c /dk/supervisord.conf status | grep -q RUNNING" ]
8691
interval: 5s
8792
retries: 10
88-
volumes:
89-
- type: bind
90-
source: ../conf/supervisord.conf
91-
target: /dk/supervisord.conf
9293

9394
observability_ui:
9495
container_name: user-interface
95-
image: datakitchen/dataops-observability-ui
96-
pull_policy: never
96+
image: ${DK_OBSERVABILITY_UI_IMAGE:-datakitchen/dataops-observability-ui:v2}
9797
restart: always
9898
depends_on:
9999
observability_backend:
100100
condition: service_healthy
101101
environment:
102-
OBSERVABILITY_API_HOSTNAME: ${OBSERVABILITY_API_HOSTNAME}
103-
OBSERVABILITY_CSP_EXTRA: ${observability_csp_extra}
102+
OBSERVABILITY_API_HOSTNAME: ${DK_OBSERVABILITY_API_HOSTNAME:-}
103+
OBSERVABILITY_CSP_EXTRA: ${DK_OBSERVABILITY_CSP_EXTRA:-}
104+
OBSERVABILITY_API_BASE_URL: ${DK_OBSERVABILITY_API_BASE_URL:-}
105+
OBSERVABILITY_AUTH_METHOD: ${DK_OBSERVABILITY_AUTH_METHOD:-basic}
104106
links:
105107
- "observability_backend:observability-api"
106108
- "observability_backend:event-api"
107109
- "observability_backend:agent-api"
108110
ports:
109-
- "8080:8082"
111+
- "${DK_OBSERVABILITY_HTTP_PORT:-8082}:8082"
110112

111113
networks:
112114
datakitchen:
113115
name: datakitchen-network
114-
external: true
116+
external: true
117+
118+
volumes:
119+
mysql_data:
120+
kafka_data:

0 commit comments

Comments
 (0)