Init docker compose

This commit is contained in:
k.struzhkin 2020-07-07 17:00:01 +03:00
parent 1314172f84
commit 8b9fc7e409
10 changed files with 2054 additions and 1 deletions

View File

@ -1 +1,21 @@
# fraudbusters-compose
### Topology
Fraudbusters is a central service that accepts templates from a management service and
responds to inspection requests from external systems, for checks in black / white / named lists.
Interacts with wb-list-manager based on protocol
[wb-list-proto](https://github.com/rbkmoney/wb-list-proto/blob/master/proto/wb_list.thrift)
also interacts with ClickHouse to collect aggregates and fill in the necessary audit data (JDBC).
When analyzing the results, it enriches the data from the storage location service by ip addresses
[Columbus](https://github.com/rbkmoney/damsel/blob/master/proto/geo_ip.thrift)
![alt text](images/fb_scheme.png)
### Install
```docker-compose up -d```
### License
[Apache 2.0 License.](/LICENSE)

View File

@ -0,0 +1,222 @@
#!/bin/bash
set -e
clickhouse client -n <<-EOSQL
CREATE DATABASE IF NOT EXISTS fraud;
DROP TABLE IF EXISTS fraud.events_unique;
create table fraud.events_unique (
timestamp Date,
eventTimeHour UInt64,
eventTime UInt64,
partyId String,
shopId String,
ip String,
email String,
bin String,
fingerprint String,
resultStatus String,
amount UInt64,
country String,
checkedRule String,
bankCountry String,
currency String,
invoiceId String,
maskedPan String,
bankName String,
cardToken String,
paymentId String,
checkedTemplate String
) ENGINE = MergeTree()
PARTITION BY toYYYYMM (timestamp)
ORDER BY (eventTimeHour, partyId, shopId, bin, resultStatus, cardToken, email, ip, fingerprint)
TTL timestamp + INTERVAL 3 MONTH;
DROP TABLE IF EXISTS fraud.events_p_to_p;
create table fraud.events_p_to_p (
timestamp Date,
eventTime UInt64,
eventTimeHour UInt64,
identityId String,
transferId String,
ip String,
email String,
bin String,
fingerprint String,
amount UInt64,
currency String,
country String,
bankCountry String,
maskedPan String,
bankName String,
cardTokenFrom String,
cardTokenTo String,
resultStatus String,
checkedRule String,
checkedTemplate String
) ENGINE = MergeTree()
PARTITION BY toYYYYMM(timestamp)
ORDER BY (eventTimeHour, identityId, cardTokenFrom, cardTokenTo, bin, fingerprint, currency);
CREATE DATABASE IF NOT EXISTS fraud;
DROP TABLE IF EXISTS fraud.fraud_payment;
create table fraud.fraud_payment (
timestamp Date,
id String,
eventTime String,
partyId String,
shopId String,
amount UInt64,
currency String,
payerType String,
paymentToolType String,
cardToken String,
paymentSystem String,
maskedPan String,
issuerCountry String,
email String,
ip String,
fingerprint String,
status String,
rrn String,
providerId UInt32,
terminalId UInt32,
tempalateId String,
description String
) ENGINE = MergeTree()
PARTITION BY toYYYYMM (timestamp)
ORDER BY (partyId, shopId, paymentToolType, status, currency, providerId, fingerprint, cardToken, id);
DROP TABLE IF EXISTS fraud.refund;
create table fraud.refund
(
timestamp Date,
eventTime UInt64,
eventTimeHour UInt64,
id String,
email String,
ip String,
fingerprint String,
bin String,
maskedPan String,
cardToken String,
paymentSystem String,
paymentTool String,
terminal String,
providerId String,
bankCountry String,
partyId String,
shopId String,
amount UInt64,
currency String,
status Enum8('pending' = 1, 'succeeded' = 2, 'failed' = 3),
errorReason String,
errorCode String,
paymentId String
) ENGINE = ReplacingMergeTree()
PARTITION BY toYYYYMM (timestamp)
ORDER BY (eventTimeHour, partyId, shopId, status, currency, providerId, fingerprint, cardToken, id, paymentId);
DROP TABLE IF EXISTS fraud.payment;
create table fraud.payment
(
timestamp Date,
eventTime UInt64,
eventTimeHour UInt64,
id String,
email String,
ip String,
fingerprint String,
bin String,
maskedPan String,
cardToken String,
paymentSystem String,
paymentTool String,
terminal String,
providerId String,
bankCountry String,
partyId String,
shopId String,
amount UInt64,
currency String,
status Enum8('pending' = 1, 'processed' = 2, 'captured' = 3, 'cancelled' = 4, 'failed' = 5),
errorReason String,
errorCode String,
paymentCountry String
) ENGINE = ReplacingMergeTree()
PARTITION BY toYYYYMM (timestamp)
ORDER BY (eventTimeHour, partyId, shopId, paymentTool, status, currency, providerId, fingerprint, cardToken, id);
DROP TABLE IF EXISTS fraud.chargeback;
create table fraud.chargeback
(
timestamp Date,
eventTime UInt64,
eventTimeHour UInt64,
id String,
email String,
ip String,
fingerprint String,
bin String,
maskedPan String,
cardToken String,
paymentSystem String,
paymentTool String,
terminal String,
providerId String,
bankCountry String,
partyId String,
shopId String,
amount UInt64,
currency String,
status Enum8('accepted' = 1, 'rejected' = 2, 'cancelled' = 3),
category Enum8('fraud' = 1, 'dispute' = 2, 'authorisation' = 3, 'processing_error' = 4),
chargebackCode String,
paymentId String
) ENGINE = ReplacingMergeTree()
PARTITION BY toYYYYMM (timestamp)
ORDER BY (eventTimeHour, partyId, shopId, category, status, currency, providerId, fingerprint, cardToken, id, paymentId);
EOSQL

216
docker-compose.yml Normal file
View File

@ -0,0 +1,216 @@
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.0.1
hostname: zookeeper
container_name: zookeeper
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
volumes:
- /tmp/docker/zookeeper/data:/var/lib/zookeeper/data
- /tmp/docker/zookeeper/logs:/var/lib/zookeeper/log
broker:
image: confluentinc/cp-enterprise-kafka:5.0.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT_HOST://broker:29092,PLAINTEXT://broker:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:9092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
volumes:
- /tmp/docker/kafka2/data:/var/lib/kafka/data
kafka-setup:
image: confluentinc/cp-kafka:5.1.1
hostname: kafka-setup
container_name: kafka-setup
depends_on:
- broker
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:9092 1 60 && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic wb-list-command && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic wb-list-event-sink && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic result && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic p2p_result && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic fraud_payment && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic payment_event && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic refund_event && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic chargeback_event && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic template && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic template_p2p && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic template_reference && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic template_p2p_reference && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic group_list && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic group_p2p_list && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic group_reference && \
kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --config cleanup.policy=compact --topic group_p2p_reference && \
echo Waiting 60 seconds for Connect to be ready... && \
sleep 60'"
riak:
image: basho/riak-kv
hostname: riak
container_name: riak
ports:
- "8087:8087"
- "8098:8098"
environment:
- CLUSTER_NAME=riakts
labels:
- "com.basho.riak.cluster.name=riakts"
volumes:
- /tmp/docker/riak/data:/etc/riak/schemas
clickhouse:
image: yandex/clickhouse-server:19.17
hostname: clickhouse
container_name: clickhouse
ports:
- 9000:9000
- 8123:8123
volumes:
- ./clickhouse/data:/var/lib/clickhouse
- ./clickhouse/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
grafana:
image: grafana/grafana:5.1.0
hostname: grafana
container_name: grafana
ports:
- 3000:3000
environment:
GF_INSTALL_PLUGINS: "grafana-piechart-panel,vertamedia-clickhouse-datasource"
GF_SECURITY_ADMIN_USER: "admin"
GF_SECURITY_ADMIN_PASSWORD: "admin"
volumes:
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/datasources:/etc/grafana/provisioning/datasources
postgres-columbus:
image: dr2.rbkmoney.com/rbkmoney/postgres-geodata:0758740c8c3f350ddd7a8331b5e1cbb0374f4832
hostname: postgres-columbus
container_name: postgres-columbus
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: columbus
entrypoint:
- /docker-entrypoint.sh
- postgres
ports:
- 5432:5432
columbus:
image: dr2.rbkmoney.com/rbkmoney/columbus:79a03704da4b17afa6f6ce07dbd3522fd5a52442
hostname: columbus
container_name: columbus
environment:
spring.datasource.url: "jdbc:postgresql://postgres-columbus:5432/columbus"
# geo.db.file.path: "file:/maxmind.mmdb"
logging.level.com.rbkmoney.woody: "ERROR"
depends_on:
- postgres-columbus
ports:
- 8990:8022
wblist:
image: dr2.rbkmoney.com/rbkmoney/wb-list-manager:d5ee1b86445e20f45d0271f86696446f57fc829b
hostname: wblist
container_name: wblist
environment:
kafka.bootstrap.servers: "broker:9092"
riak.address: riak
riak.port: 8087
management.metrics.export.statsd.enabled: "false"
logging.level.com.rbkmoney.woody: "ERROR"
kafka.wblist.topic.command: "wb-list-command"
kafka.wblist.topic.event.sink: "wb-list-event-sink"
depends_on:
- riak
- broker
ports:
- 8991:8022
fraudbusters:
image: dr2.rbkmoney.com/rbkmoney/fraudbusters:e4b2f9e830890a8956d17b6a0599a4ba2ebb1c1c
hostname: fraudbusters
container_name: fraudbusters
environment:
kafka.bootstrap.servers: "broker:9092"
geo.ip.service.url: "http://columbus:8022/repo"
wb.list.service.url: "http://wblist:8022/v1/wb_list"
clickhouse.db.url: "jdbc:clickhouse://clickhouse:8123/default"
spring.profiles.active: "full-prod"
logging.config: "tmp/logback-test.xml"
depends_on:
- clickhouse
- broker
- columbus
- wblist
- kafka-setup
ports:
- 8999:8022
volumes:
- ./log-java:/opt/fraudbusters/tmp
postgres-fb:
container_name: postgres_container
hostname: postgres-fb
image: postgres
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: wblist
PGDATA: /data/postgres
volumes:
- ./postgres/data/3:/data/postgres
restart: unless-stopped
fb-management:
image: dr2.rbkmoney.com/rbkmoney/fraudbusters-management:ecb5272c2c5920d4911959e3ec2b90e90c07377d
hostname: fb-management
container_name: fb-management
environment:
kafka.bootstrap.servers: "broker:9092"
kafka.wblist.topic.command: "wb-list-command"
kafka.wblist.topic.event.sink: "wb-list-event-sink"
kafka.fraudbusters.template: "template"
kafka.fraudbusters.reference: "template_reference"
spring.datasource.url: "jdbc:postgresql://postgres-fb:5432/wblist"
logging.config: "./tmp/logback-test.xml"
management.metrics.binders.jvm.enabled: 'false'
service.validate.payment.url: 'http://fraudbusters:8022/fraud_payment/v1/'
depends_on:
- fraudbusters
- broker
- wblist
- postgres-fb
- kafka-setup
ports:
- 8080:8080
volumes:
- ./log-java:/opt/fraudbusters-management/tmp
networks:
default:
driver: bridge
driver_opts:
com.docker.network.enable_ipv6: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
volumes:
mi2: {}

View File

@ -0,0 +1,11 @@
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
folder: '/etc/grafana/provisioning/dashboards/jsons'

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,172 @@
{
"__inputs": [
{
"name": "ClickHouse",
"label": "ClickHouse",
"description": "",
"type": "datasource",
"pluginId": "vertamedia-clickhouse-datasource",
"pluginName": "ClickHouse"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "5.1.0"
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": "5.0.0"
},
{
"type": "datasource",
"id": "vertamedia-clickhouse-datasource",
"name": "ClickHouse",
"version": "2.0.1"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "",
"editable": true,
"gnetId": 3457,
"graphTooltip": 0,
"id": null,
"iteration": 1594122950512,
"links": [],
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 40,
"panels": [],
"title": "Clickhouse Tables",
"type": "row"
},
{
"cacheTimeout": null,
"columns": [],
"datasource": "ClickHouse",
"description": "",
"fontSize": "100%",
"gridPos": {
"h": 5,
"w": 24,
"x": 0,
"y": 1
},
"id": 44,
"links": [],
"options": {},
"pageSize": null,
"pluginVersion": "6.3.6",
"scroll": true,
"showHeader": true,
"sort": {
"col": null,
"desc": false
},
"styles": [
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 2,
"pattern": "/.*/",
"thresholds": [],
"type": "number",
"unit": "short"
}
],
"targets": [
{
"database": "analytic",
"dateTimeType": "DATETIME",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
"query": "SELECT * FROM system.events LIMIT 10 ",
"rawQuery": "SELECT * FROM system.events LIMIT 10",
"refId": "A",
"round": "0s"
}
],
"timeFrom": null,
"timeShift": null,
"title": "MAX QUERY",
"transform": "table",
"type": "table"
}
],
"refresh": "10s",
"schemaVersion": 16,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-15m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "browser",
"title": "Antifraud-metrics",
"uid": "HnyHGmHWz",
"version": 7
}

View File

@ -0,0 +1,9 @@
datasources:
- access: 'proxy' # make grafana perform the requests
type: vertamedia-clickhouse-datasource
editable: true # whether it should be editable
is_default: true # whether this should be the default DS
name: 'ClickHouse' # name of the datasource
org_id: 1 # id of the organization to tie this datasource to
url: 'http://clickhouse:8123' # url of the prom instance
version: 1 # well, versioning

BIN
images/fb_scheme.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

BIN
images/fraud scheme.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

12
log-java/logback-test.xml Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<root level="warn">
<appender-ref ref="CONSOLE"/>
</root>
<logger name="com.rbkmoney" level="ALL"/>
<logger name="org.springframework.jdbc.core" level="ALL"/>
<logger name="org.apache.tomcat" level="ALL"/>
</configuration>