Skip to main content

Application Examples

This page contains YAML payloads for various applications from our Avassa Public Repository.

It aims to be a source of example applications, secrets, etc. For more information, see the individual repositories.

Examples from application-examples.git

For more info, see https://gitlab.com/avassa-public/application-examples.git

port-map

http.app.yaml

name: http
services:
- name: http
init-containers:
- name: port-map
image: registry.gitlab.com/avassa-public/application-examples/nftables:1.0
additional-capabilities:
- net-admin
cmd:
- nft
- |
table ip port-map {
chain port-map {
type nat hook prerouting priority dstnat; policy accept;
tcp dport 80 redirect to :8000
}
}
containers:
- name: http
image: registry-1.docker.io/library/python:3.11
cmd: ["python3", "-m", "http.server", "-d", "/srv", "8000"]
probes:
liveness:
http:
port: 80
path: /
mounts:
- volume-name: index-html
mount-path: /srv
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "80"
volumes:
- name: index-html
config-map:
items:
- name: index.html
data: |
<!DOCTYPE html>
<html lang="en">
<head>
<title>Port mapping example</title>
</head>
<body>
This page is served on port 8000 inside the container
and accessible on port 80 outside the container.
</body>
</html>
mode: replicated
replicas: 1

redis

redis.app.yaml

name: redis
services:
- name: redis
init-containers:
- name: setup
image: registry-1.docker.io/redis
cmd: ["sh", "/cfg0/setup.sh"]
mounts:
- volume-name: cfg-storage
mount-path: /cfg
- volume-name: data
mount-path: /data
- volume-name: cfg0
files:
- name: setup.sh
mount-path: /cfg0/setup.sh
- name: redis.conf
mount-path: /cfg0/redis.conf
- name: sentinel.conf
mount-path: /cfg0/sentinel.conf
containers:
- name: redis
image: registry-1.docker.io/redis
cmd: ["redis-server", "/cfg/redis.conf"]
probes:
liveness:
exec:
cmd: ["redis-cli", "ping"]
mounts:
- volume-name: cfg-storage
mount-path: /cfg
- volume-name: data
mount-path: /data
- name: redis-sentinel
image: registry-1.docker.io/redis
cmd: ["redis-sentinel", "/cfg/sentinel.conf"]
mounts:
- volume-name: cfg-storage
mount-path: /cfg
- volume-name: data
mount-path: /data
volumes:
- name: cfg-storage
ephemeral-volume:
size: 3MiB
- name: data
ephemeral-volume:
# this is the size of redis' data storage volume
# it should be much larger in production
size: 5MiB
- name: cfg0
config-map:
items:
- name: setup.sh
data: |
#!/bin/sh
cp /cfg0/redis.conf /cfg/redis.conf
chown redis:redis /cfg/redis.conf
cp /cfg0/sentinel.conf /cfg/sentinel.conf
if [ ${SYS_SERVICE_INSTANCE_INDEX} -gt 1 ]; then
echo "replicaof redis-1 6379" >> /cfg/redis.conf
fi
- name: redis.conf
data: |
protected-mode no
port 6379
- name: sentinel.conf
data: |
sentinel resolve-hostnames yes
sentinel monitor mymaster redis-1 6379 2
sentinel down-after-milliseconds mymaster 60000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1

mode: replicated
replicas: 3
placement:
preferred-anti-affinity:
services: [ redis ]

hivemq-edge

hivemq-edge.vault.yaml

name: hivemq-edge
distribute:
deployments:
- hivemq-edge

hivemq-edge.app.yaml

name: hivemq-edge
# version: "1.0"
services:
- name: hivemq-edge
mode: replicated
replicas: 1
variables:
# Get Enterprise MQTT broker credentials from Strongbox
- name: MQTT_USERNAME
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: username
- name: MQTT_PASSWORD
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: password

volumes:
- name: log
ephemeral-volume:
size: 20MB
file-ownership: 1001:1001
- name: cfg
config-map:
items:
- name: default.conf
data: |
<?xml version="1.0"?>
<hivemq xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">

<!-- Add mqtt listener -->
<mqtt-listeners>
<tcp-listener>
<port>1883</port>
<bind-address>0.0.0.0</bind-address>
</tcp-listener>
</mqtt-listeners>

<!-- Create a simulation that publish on a topic called simulation -->
<protocol-adapters>
<simulation>
<id>test-simulation-server</id>
<pollingIntervalMillis>5000</pollingIntervalMillis>
<subscriptions>
<subscription>
<destination>simulation</destination>
<qos>1</qos>
</subscription>
</subscriptions>
</simulation>
</protocol-adapters>

<mqtt-bridges>
<mqtt-bridge>
<!-- Make sure each bridge uses a unique name -->
<id>${SYS_SITE}-remote-mqtt-broker</id>
<!-- forward to broker with authentication -->
<remote-broker>
<host>192.168.0.20</host>
<port>11883</port>
<authentication>
<mqtt-simple-authentication>
<username>${MQTT_USERNAME}</username>
<password>${MQTT_PASSWORD}</password>
</mqtt-simple-authentication>
</authentication>
</remote-broker>
<forwarded-topics>
<forwarded-topic>
<!-- Only forward topics from the simulation -->
<filters>
<mqtt-topic-filter>simulation/#</mqtt-topic-filter>
</filters>
<!-- prefix each topic with the name of the site -->
<destination>${SYS_SITE}/{#}</destination>
</forwarded-topic>
</forwarded-topics>
</mqtt-bridge>
</mqtt-bridges>

<admin-api>
<enabled>true</enabled>
</admin-api>
</hivemq>
network:
ingress-ip-per-instance:
protocols:
- name: tcp
# 1883: MQTT, 2442: MQTT-SN, 8080: HiveMQ Edge WebUI
port-ranges: "1883,2442,8080"
outbound-access:
allow-all: true
containers:
- name: hivemq-edge
image: registry-1.docker.io/hivemq/hivemq-edge
mounts:
# Mount the config file in the container
- volume-name: cfg
files:
- name: default.conf
mount-path: /opt/hivemq/conf/config.xml
# Mount the log volume in the container
- volume-name: log
mount-path: /opt/hivemq/log

credentials.hivemq-edge.secret.yaml

name: credentials
data:
password: password
username: test-user
allow-image-access: ["*"]

jumphost

cert-issuer.ssh-ca.ca-role.yaml

name: cert-issuer
key-type: "ca"
allowed-domains:
- "*"
allowed-users:
- "*"
ttl: 1y
allow-user-certs: true
allow-host-certs: true
allow-bare-domains: true
allow-subdomains: true
allow-user-key-ids: false

ssh-client.app.yaml

name: ssh-client
services:
- name: host
mode: replicated
replicas: 1
volumes:
# Fetch the ssh-client certificate
- name: cert
vault-secret:
vault: ssh-client
secret: client-cert
file-mode: "400"
file-ownership: 0:0
network:
outbound-access:
allow-all: true
containers:
- name: jump
image: registry.gitlab.com/avassa-public/application-examples/openssh-client:1.0
cmd: [ "sleep", "infinity" ]
mounts:
- volume-name: cert
files:
# Mount the private key
- name: ssh-private-key
mount-path: /ssh/private-key
# Mount the certificate, note the name convention <private key name>-cert.pub
- name: ssh-cert
mount-path: /ssh/private-key-cert.pub

ssh-client.vault.yaml

name: ssh-client
distribute:
deployments:
- ssh-client

server-cert.ssh-server.secret.yaml

name: server-cert
auto-ssh-cert:
cert-type: host
issuing-ca: ssh-ca
issuing-role: cert-issuer
ttl: 30d
allow-image-access:
- "*"

client-cert.ssh-client.secret.yaml

name: client-cert
auto-ssh-cert:
cert-type: user
issuing-ca: ssh-ca
issuing-role: cert-issuer
ttl: 2h
valid-principals:
- test
extensions:
- permit-pty
- permit-agent-forwarding
- permit-user-rc
allow-image-access: ["*"]

ssh-server.app.yaml

name: ssh-server
services:
- name: host
mode: replicated
replicas: 1
volumes:
- name: cert
vault-secret:
vault: ssh-server
secret: server-cert
file-mode: "400"
file-ownership: 0:0
- name: configs
config-map:
items:
- name: entrypoint.sh
file-mode: "500"
data: |
#!/bin/sh
cat /etc/ssh/avassa_sshd_config >> /etc/ssh/sshd_config
ssh-keygen -A

# Add a test user
adduser -h /home/test -s /bin/sh -D test
echo -n 'test:secret' | chpasswd

touch /etc/ssh/revoked_keys

exec /usr/sbin/sshd -D -e "$@"
- name: sshd_config
file-mode: "300"
data: |
PasswordAuthentication no
HostKey /etc/ssh/ssh_avassa_host_key
HostCertificate /etc/ssh/ssh_avassa_host_key-cert.pub
TrustedUserCAKeys /etc/ssh/ssh_avassa_user_key.pub
RevokedKeys /etc/ssh/revoked_keys

network:
outbound-access:
allow-all: true
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "22"
containers:
- name: host
image: registry.gitlab.com/avassa-public/application-examples/openssh-server:1.0
cmd:
- /entrypoint.sh
mounts:
- volume-name: cert
files:
- name: ssh-public-key
mount-path: /etc/ssh/ssh_avassa_host_key.pub
- name: ssh-private-key
mount-path: /etc/ssh/ssh_avassa_host_key
- name: ssh-cert
mount-path: /etc/ssh/ssh_avassa_host_key-cert.pub
- name: ssh-ca-public-key
mount-path: /etc/ssh/ssh_avassa_user_key.pub
- volume-name: configs
files:
- name: entrypoint.sh
mount-path: /entrypoint.sh
- name: sshd_config
mount-path: /etc/ssh/avassa_sshd_config

ssh-server.vault.yaml

name: ssh-server
distribute:
deployments:
- ssh-server

ssh-ca.ca.yaml

name: ssh-ca
distribute:
to: none

ros2

talker-listener.app.yaml

# Example of having both services in the same application
name: ros-talker-listener
services:
- name: talker
mode: replicated
replicas: 1
containers:
- name: ros
image: registry-1.docker.io/osrf/ros:jazzy-desktop
cmd:
- ros2
- run
- demo_nodes_cpp
- talker
- name: listener
mode: replicated
replicas: 1
containers:
- name: ros
image: registry-1.docker.io/osrf/ros:jazzy-desktop
cmd:
- ros2
- run
- demo_nodes_cpp
- listener

talker.app.yaml

name: ros-talker
services:
- name: talker
mode: replicated
replicas: 1
containers:
- name: ros
image: registry-1.docker.io/osrf/ros:jazzy-desktop
cmd:
- ros2
- run
- demo_nodes_cpp
- talker
network:
shared-application-network: ros

listener.app.yaml

name: ros-listener
services:
- name: listener
mode: replicated
replicas: 1
containers:
- name: ros
image: registry-1.docker.io/osrf/ros:jazzy-desktop
cmd:
- ros2
- run
- demo_nodes_cpp
- listener
network:
shared-application-network: ros

mongodb

mongodb.app.yaml

name: mongodb
version: "1.0"
services:
- name: db
mode: replicated
replicas: 1
volumes:
- name: db
ephemeral-volume:
size: 100MB
file-ownership: 999:999
- name: config
ephemeral-volume:
size: 100MB
file-ownership: 999:999
- name: credentials
vault-secret:
vault: mongodb
secret: credentials
file-ownership: 999:999
containers:
- name: db
image: registry-1.docker.io/mongo
env:
MONGO_INITDB_ROOT_PASSWORD_FILE: /creds/password
MONGO_INITDB_ROOT_USERNAME_FILE: /creds/username
mounts:
- volume-name: db
mount-path: /data/db
mode: read-write
- volume-name: config
mount-path: /data/configdb
mode: read-write
- volume-name: credentials
mount-path: /creds
mode: read-only
- name: express
mode: replicated
replicas: 1
variables:
- name: USERNAME
value-from-vault-secret:
vault: mongodb
secret: credentials
key: username
- name: PASSWORD
value-from-vault-secret:
vault: mongodb
secret: credentials
key: password
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8081"
outbound-access:
allow-all: true
containers:
- name: express
image: registry-1.docker.io/mongo-express
env:
ME_CONFIG_MONGODB_ADMINUSERNAME: ${USERNAME}
ME_CONFIG_MONGODB_ADMINPASSWORD: ${PASSWORD}
ME_CONFIG_MONGODB_URL: mongodb://${USERNAME}:${PASSWORD}@db:27017/

mongodb.vault.yaml

name: mongodb
distribute:
deployments:
- mongodb

credentials.mongodb.secret.yaml

name: credentials
allow-image-access: ["*"]
data:
username: root
password: example

influxdb

credentials.influxdb.secret.yaml

name: credentials
data:
password: password
username: test-user
admin-token: f777c965-bd42-47c7-9b2d-237bfc0d38fd
organization: test-org
allow-image-access: ["*"]

influxdb.app.yaml

name: influxdb
# version: "1.0"
services:
- name: influxdb
mode: replicated
replicas: 1
variables:
# Get credentials from Strongbox
- name: USERNAME
value-from-vault-secret:
vault: influxdb
secret: credentials
key: username
- name: PASSWORD
value-from-vault-secret:
vault: influxdb
secret: credentials
key: password
- name: ADMIN_TOKEN
value-from-vault-secret:
vault: influxdb
secret: credentials
key: admin-token
- name: ORGANIZATION
value-from-vault-secret:
vault: influxdb
secret: credentials
key: organization
volumes:
- name: data
# Note, you may want a persistent volume here. In case this application is removed from
# a site, an ephemeral volume will be removed.
ephemeral-volume:
size: 20MB
file-ownership: 0:0
- name: etc
ephemeral-volume:
size: 10MB
file-ownership: 0:0
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8086"
outbound-access:
allow-all: true
containers:
- name: influxdb
image: registry-1.docker.io/influxdb
env:
# Setup influx at start
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: ${USERNAME}
DOCKER_INFLUXDB_INIT_PASSWORD: ${PASSWORD}
DOCKER_INFLUXDB_INIT_ORG: ${ORGANIZATION}
DOCKER_INFLUXDB_INIT_BUCKET: my-bucket
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: ${ADMIN_TOKEN}
mounts:
- volume-name: data
mount-path: /var/lib/influxdb2
- volume-name: etc
mount-path: /etc/influxdb2
mode: read-write

influxdb.vault.yaml

name: influxdb
distribute:
deployments:
- influxdb

postgres

postgres.app.yaml

name: postgres
services:
- name: pg
mode: replicated
replicas: 3
volumes:
- name: pg-data
ephemeral-volume:
size: 20MB
file-ownership: 1001:1001
variables:
- name: POSTGRESQL_POSTGRES_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_POSTGRES_PASSWORD
- name: REPMGR_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: REPMGR_PASSWORD
- name: POSTGRESQL_USERNAME
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_USERNAME
- name: POSTGRESQL_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_PASSWORD
containers:
- name: db
image: registry-1.docker.io/bitnami/postgresql-repmgr:15
env:
REPMGR_PARTNER_NODES: pg-1,pg-2,pg-3
# This will resolve to pg-1, pg-2 and pg-3
REPMGR_NODE_NAME: pg-${SYS_SERVICE_INSTANCE_INDEX}
REPMGR_NODE_NETWORK_NAME: pg-${SYS_SERVICE_INSTANCE_INDEX}
# Start by assuming the first instance is the leader
REPMGR_PRIMARY_HOST: pg-1

# Password for the postgres user
POSTGRESQL_POSTGRES_PASSWORD: ${POSTGRESQL_POSTGRES_PASSWORD}

# Password for repmgr
REPMGR_PASSWORD: ${REPMGR_PASSWORD}

# Inital database
POSTGRESQL_DATABASE: my_database
POSTGRESQL_USERNAME: ${POSTGRESQL_USERNAME}
POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD}
mounts:
- volume-name: pg-data
mount-path: /bitnami/postgresql
- name: pgpool
mode: replicated
replicas: 1
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "5432"
variables:
- name: POSTGRESQL_POSTGRES_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_POSTGRES_PASSWORD
- name: REPMGR_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: REPMGR_PASSWORD
- name: POSTGRESQL_USERNAME
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_USERNAME
- name: POSTGRESQL_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: POSTGRESQL_PASSWORD
- name: PGPOOL_ADMIN_USERNAME
value-from-vault-secret:
vault: postgres
secret: credentials
key: PGPOOL_ADMIN_USERNAME
- name: PGPOOL_ADMIN_PASSWORD
value-from-vault-secret:
vault: postgres
secret: credentials
key: PGPOOL_ADMIN_PASSWORD
containers:
- name: pgpool
image: registry-1.docker.io/bitnami/pgpool:4
env:
PGPOOL_BACKEND_NODES: 0:pg-1:5432,1:pg-2:5432,2:pg-3:5432
PGPOOL_SR_CHECK_USER: repmgr
PGPOOL_SR_CHECK_PASSWORD: ${REPMGR_PASSWORD}
PGPOOL_ENABLE_LDAP: no
PGPOOL_ENABLE_LOAD_BALANCING: yes
PGPOOL_POSTGRES_USERNAME: postgres
PGPOOL_POSTGRES_PASSWORD: ${POSTGRESQL_POSTGRES_PASSWORD}
PGPOOL_ADMIN_USERNAME: ${PGPOOL_ADMIN_USERNAME}
PGPOOL_ADMIN_PASSWORD: ${PGPOOL_ADMIN_PASSWORD}
PGPOOL_POSTGRES_CUSTOM_USERS: ${POSTGRESQL_USERNAME}
PGPOOL_POSTGRES_CUSTOM_PASSWORDS: ${POSTGRESQL_PASSWORD}

credentials.postgres.secret.yaml

name: credentials
allow-image-access: ["*"]
data:
# postgres user password
POSTGRESQL_POSTGRES_PASSWORD: secretpass
REPMGR_PASSWORD: repmgrpass
POSTGRESQL_USERNAME: db-user
POSTGRESQL_PASSWORD: password123

PGPOOL_ADMIN_USERNAME: admin
PGPOOL_ADMIN_PASSWORD: verysecret

postgres.vault.yaml

name: postgres
distribute:
deployments:
- postgres

firebirdsql

firebird.vault.yaml

name: firebird
distribute:
deployments:
- firebird

credentials.firebird.secret.yaml

name: credentials
allow-image-access:
- "*"
data:
root-password: foobar!
username: alice
password: secret

firebird.app.yaml

name: firebird
version: "0.2"
services:
- name: db
mode: replicated
replicas: 1
volumes:
- name: data
# NOTE: In production, this should be a persistent-volume to prevent
# accidental deletion of the database file
ephemeral-volume:
size: 100MB
- name: credentials
vault-secret:
vault: firebird
secret: credentials
- name: config
config-map:
items:
# Some database setup sql
- name: test.sql
data: |
CREATE TABLE test (
test_id INTEGER PRIMARY KEY
);
INSERT INTO test VALUES (1);
INSERT INTO test VALUES (2);
variables:
# Just to show how to get secret values into the environment
- name: USERNAME
value-from-vault-secret:
vault: firebird
secret: credentials
key: username
# Allow access from the outside to the Firebird DB
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "3050"
outbound-access:
allow-all: true
containers:
- name: fb
image: ghcr.io/fdcastel/firebird:5.0.1-noble
env:
FIREBIRD_ROOT_PASSWORD_FILE: /tmp/credentials/root-password
FIREBIRD_USER: ${USERNAME}
FIREBIRD_PASSWORD_FILE: /tmp/credentials/password
FIREBIRD_DATABASE: mirror.fdb
FIREBIRD_DATABASE_DEFAULT_CHARSET: UTF8
mounts:
- volume-name: data
mount-path: /var/lib/firebird/data
# Mount credentials
- volume-name: credentials
mount-path: /tmp/credentials
# Mount init sql
- volume-name: config
files:
- name: test.sql
mount-path: /docker-entrypoint-initdb.d/test.sql

nginx

nginx.app.yaml

name: nginx
version: "1.0"
services:
- name: nginx-svs
mode: replicated
replicas: 1
volumes:
- name: cert
vault-secret:
vault: nginx
secret: server-cert
- name: cfg
config-map:
items:
- name: default.conf
data: |
server {
listen 443 ssl;
server_name nginx.nginx.${SYS_TENANT}.${SYS_SITE}.${SYS_GLOBAL_DOMAIN};
ssl_certificate /etc/ssl/certs/nginx.pem;
ssl_certificate_key /etc/ssl/private/nginx.key;
ssl_protocols TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;

location / {
root /usr/share/nginx/html;
index index.html index.htm;
}

error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "80,443"
containers:
- name: nginx
image: registry-1.docker.io/nginx
mounts:
- volume-name: cfg
files:
- name: default.conf
mount-path: /etc/nginx/conf.d/default.conf
- volume-name: cert
files:
- name: cert.pem
mount-path: /etc/ssl/certs/nginx.pem
- name: cert.key
mount-path: /etc/ssl/private/nginx.key

nginx.vault.yaml

name: nginx
distribute:
deployments:
- nginx

cert-issuer.nginx-ca.ca-role.yaml

name: cert-issuer
allow-client-certs: true
allow-server-certs: true
allowed-domains:
- "*"
allowed-hosts:
- "*"

nginx-ca.ca.yaml

name: nginx-ca
ttl: 30d
auto-renew:
# Auto renew the CA when there are 5 days left
renew-threshold: 5d
distribute:
deployments:
- nginx

server-cert.nginx.secret.yaml

name: server-cert
auto-cert:
cert-type: server
host: nginx
issuing-ca: nginx-ca
ttl: 5d
allow-image-access:
- "*"

Examples from edge-ai.git

For more info, see https://gitlab.com/avassa-public/edge-ai.git

torch

image-classifier.vault.yaml

name: image-classifier
distribute:
deployments:
- image-classifier

image-classifier.app.yml

name: image-classifier
version: "1.0"
network:
shared-application-network: edge-ai
services:
- name: torchserve
mode: replicated
replicas: 1
variables:
- name: JOB_QUEUE_SIZE
value-from-vault-secret:
vault: image-classifier
secret: configuration
key: job_queue_size
containers:
- name: serve
image: registry.gitlab.com/avassa-public/edge-ai/torchserve:v1.0
env:
TS_JOB_QUEUE_SIZE: ${JOB_QUEUE_SIZE}
- name: api
mode: replicated
replicas: 1
containers:
- name: api
image: registry.gitlab.com/avassa-public/edge-ai/torchapi
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8080"
outbound-access:
allow-all: true

configuration.image-classifier.secret.yaml

name: configuration
allow-image-access:
- "*"
data:
job_queue_size: 100

jupyter

credentials.jupyter.vault.yaml

name: credentials
allow-image-access:
- "*"
data:
token: secret-token

jupyter.app.yaml

name: jupyter
network:
shared-application-network: edge-ai
services:
- name: jupyter
mode: replicated
replicas: 1
variables:
- name: TOKEN
value-from-vault-secret:
vault: jupyter
secret: credentials
key: token
volumes:
- name: work
ephemeral-volume:
size: 100MB
file-ownership: 1000:100
- name: cfg
config-map:
items:
- name: avassa.ipynb
data: |
{
"cells": [
{
"cell_type": "markdown",
"id": "5c5b8f10-8424-4b74-8dd4-5ef6b8ddbda1",
"metadata": {},
"source": [
"# List models"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9900f796-ce06-43a1-bded-8c5f3c025be5",
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"requests.get(\"http://api.image-classifier:8080/models\").json()"
]
},
{
"cell_type": "markdown",
"id": "46e4551f-f3d8-4741-a025-8e8bf2b86446",
"metadata": {},
"source": [
"# Classify image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b284c22c-6ed1-4fa2-83ca-4be3c36f2cc1",
"metadata": {},
"outputs": [],
"source": [
"image_url=\"https://4.bp.blogspot.com/-TWz--38xAjc/T-m_WjhEBYI/AAAAAAAAL-8/aqwDY1QNH8g/s1600/Crocodile3.jpg\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7777ad80-ce4b-4b02-958c-107d2b68d342",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"from IPython.display import Image\n",
"Image(url= image_url, width=200,height=200)"
]
},
{
"cell_type": "markdown",
"id": "d9e8d3c2-5d06-463b-bb25-c232797b2c73",
"metadata": {},
"source": [
"### Resnet model"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79c8e711-5aba-4d82-8237-840bc6fb060b",
"metadata": {},
"outputs": [],
"source": [
"requests.post(\"http://api.image-classifier:8080/predict\", json={\"image_url\": image_url}).json()"
]
},
{
"cell_type": "markdown",
"id": "c31e6268-36c2-49fd-9ac3-517f77ad448c",
"metadata": {},
"source": [
"### Densenet model"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "da9bc256-eafe-4d08-88ce-53e9a2940c72",
"metadata": {},
"outputs": [],
"source": [
"requests.post(\"http://api.image-classifier:8080/predict?model=densenet161\", json={\"image_url\": image_url}).json()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

containers:
- name: lab
image: quay.io/jupyter/base-notebook
mounts:
- volume-name: work
mount-path: /home/jovyan/work
mode: read-write
- volume-name: cfg
files:
- name: avassa.ipynb
mount-path: /home/jovyan/work/avassa.ipynb
cmd:
- start-notebook.py
- "--IdentityProvider.token='${TOKEN}'"
env:
API_CERT: ${SYS_API_CA_CERT}
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8888"
outbound-access:
allow-all: true

jupyter.vault.yaml

name: jupyter
distribute:
deployments:
- jupyter

Examples from iot-demo.git

For more info, see https://gitlab.com/avassa-public/iot-demo.git

mosquitto

cert.mosquitto.secret.yaml

name: cert
auto-cert:
issuing-ca: mosquitto
ttl: 30d
truncate-ttl: false
host: mosquitto
cert-type: server
allow-image-access:
- "*"

mosquitto.ca.yaml

name: mosquitto
ttl: 3y
cert-key-type: rsa
auto-renew:
# Auto renew the CA when there are 30 days left
renew-threshold: 30d
distribute:
deployments:
- mosquitto

credentials.mosquitto.secret.yaml

name: credentials
data:
# username: "test-user"
# password: "test-pwd"
# test-user/test-pwd
# Generated using: mosquitto_passwd -b pwd test-user test-pwd
password: "test-user:$7$101$ctWMAD+/YScCjE9m$mzaBRxx9APbjOcsFkwr0brASw1jIjDPsEiRdmvWmYOycQ3OC4f9C9zUBjhdTCkLl8afToTeTFSTXxtztLIiO5g=="
allow-image-access: ["*"]

mosquitto.vault.yaml

name: mosquitto
distribute:
deployments:
- mosquitto

mosquitto-anon.app.yaml

name: mosquitto
# version: "1.0
services:
- name: mqtt
mode: replicated
replicas: 1
volumes:
- name: log
ephemeral-volume:
size: 20MB
file-mode: "755"
file-ownership: 1883:1883
- name: data
ephemeral-volume:
size: 100MB
file-mode: "755"
file-ownership: 1883:1883
- name: cfg
config-map:
items:
- name: mosquitto.conf
data: |
listener 1883 0.0.0.0
allow_anonymous true
log_type debug
log_type error
log_type warning
log_type notice
log_type information

# For healthchecks
listener 1880 127.0.0.1
allow_anonymous true
file-mode: "600"
file-ownership: 1883:1883
share-pid-namespace: false
containers:
- name: mosquitto
image: registry-1.docker.io/eclipse-mosquitto
container-log-size: 100 MB
container-log-archive: false
mounts:
- volume-name: log
mount-path: /mosquitto/log
mode: read-write
- volume-name: data
mount-path: /mosquitto/data
mode: read-write
- volume-name: cfg
files:
- name: mosquitto.conf
mount-path: /mosquitto/config/mosquitto.conf
env: {}
probes:
readiness:
exec:
cmd:
- mosquitto_sub
- -p
- "1880"
- -t
- '$SYS/broker/uptime'
- -C
- "1"
- -i
- healthcheck
- -W
- "3"
initial-delay: 0s
timeout: 5s
period: 10s
success-threshold: 1
failure-threshold: 2
on-mounted-file-change:
restart: true
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "1883"
inbound-access:
allow-all: true
outbound-access:
allow-all: true
# Optionally put this on a shared application network
# network:
# shared-application-network: sensors
on-mutable-variable-change: restart-service-instance

mosquitto.app.yaml

name: mosquitto
# version: "1.0
services:
- name: mqtt
mode: replicated
replicas: 1
volumes:
- name: log
ephemeral-volume:
size: 20MB
file-mode: "755"
file-ownership: 1883:1883
- name: data
ephemeral-volume:
size: 100MB
file-mode: "755"
file-ownership: 1883:1883
- name: cert
vault-secret:
vault: mosquitto
secret: cert
file-mode: "400"
file-ownership: 1883:1883
- name: credentials
vault-secret:
vault: mosquitto
secret: credentials
file-mode: "400"
file-ownership: 1883:1883
- name: cfg
config-map:
items:
- name: mosquitto.conf
data: |
listener 8883 0.0.0.0
log_type debug
log_type error
log_type warning
log_type notice
log_type information
password_file /mosquitto/config/password
cafile /mosquitto/cert/ca-cert.pem
certfile /mosquitto/cert/cert.pem
keyfile /mosquitto/cert/cert.key

# For healthchecks
listener 1880 127.0.0.1
allow_anonymous true
file-mode: "600"
file-ownership: 1883:1883
share-pid-namespace: false
containers:
- name: mosquitto
image: registry-1.docker.io/eclipse-mosquitto
container-log-size: 100 MB
container-log-archive: false
mounts:
- volume-name: log
mount-path: /mosquitto/log
mode: read-write
- volume-name: data
mount-path: /mosquitto/data
mode: read-write
- volume-name: cfg
files:
- name: mosquitto.conf
mount-path: /mosquitto/config/mosquitto.conf
- volume-name: cert
files:
- name: cert.pem
mount-path: /mosquitto/cert/cert.pem
- name: cert.key
mount-path: /mosquitto/cert/cert.key
- name: ca-cert.pem
mount-path: /mosquitto/cert/ca-cert.pem
- volume-name: credentials
files:
- name: password
mount-path: /mosquitto/config/password
env: {}
probes:
readiness:
exec:
cmd:
- mosquitto_sub
- -p
- "1880"
- -t
- '$SYS/broker/uptime'
- -C
- "1"
- -i
- healthcheck
- -W
- "3"
initial-delay: 0s
timeout: 5s
period: 10s
success-threshold: 1
failure-threshold: 2
on-mounted-file-change:
restart: true
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8883"
inbound-access:
allow-all: true
outbound-access:
allow-all: true
# Optionally put this on a shared application network
# network:
# shared-application-network: sensors
on-mutable-variable-change: restart-service-instance

hivemq-edge

hivemq-edge.vault.yaml

name: hivemq-edge
distribute:
deployments:
- hivemq-edge

hivemq-edge.app.yaml

name: hivemq-edge
# version: "1.0"
services:
- name: mqtt
mode: replicated
replicas: 1
variables:
# Get Enterprise MQTT broker credentials from Strongbox
- name: MQTT_USERNAME
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: mqtt-bridge-username
- name: MQTT_PASSWORD
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: mqtt-bridge-password
- name: ADMIN_USERNAME
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: admin-username
- name: ADMIN_PASSWORD
value-from-vault-secret:
vault: hivemq-edge
secret: credentials
key: admin-password
volumes:
- name: log
ephemeral-volume:
size: 20MB
file-ownership: 1001:1001
- name: data
ephemeral-volume:
size: 20MB
file-ownership: 1001:1001
- name: cfg
config-map:
items:
- name: default.conf
data: |
<?xml version="1.0"?>
<hivemq xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">

<!-- Add mqtt listener -->
<mqtt-listeners>
<tcp-listener>
<port>1883</port>
<bind-address>0.0.0.0</bind-address>
</tcp-listener>
</mqtt-listeners>

<protocol-adapters>
<!-- Create a simulation that publish on a topic called simulation -->
<simulation>
<id>temperature-simulation-fahrenheit</id>
<pollingIntervalMillis>5000</pollingIntervalMillis>
<minValue>65</minValue>
<maxValue>85</maxValue>
<subscriptions>
<subscription>
<destination>sensor/temperature/simulated</destination>
<qos>1</qos>
</subscription>
</subscriptions>
</simulation>
<modbus>
<id>modbus-connection</id>
<port>502</port>
<host>sensor.modbus-sensor.internal</host>
<publishChangedDataOnly>true</publishChangedDataOnly>
<publishingInterval>1000</publishingInterval>
<subscriptions>
<subscription>
<holding-registers>
<startIdx>0</startIdx>
<endIdx>1</endIdx>
</holding-registers>
<destination>sensor/temperature/modbus</destination>
</subscription>
</subscriptions>
</modbus>
</protocol-adapters>

<mqtt-bridges>
<mqtt-bridge>
<!-- Make sure each bridge uses a unique name -->
<id>${SYS_SITE}-remote-mqtt-broker</id>
<!-- forward to broker with authentication -->
<remote-broker>
<host>192.168.4.23</host>
<port>1883</port>
<authentication>
<mqtt-simple-authentication>
<username>${MQTT_USERNAME}</username>
<password>${MQTT_PASSWORD}</password>
</mqtt-simple-authentication>
</authentication>
</remote-broker>
<forwarded-topics>
<forwarded-topic>
<!-- Only forward topics from the simulation -->
<filters>
<mqtt-topic-filter>power</mqtt-topic-filter>
</filters>
<!-- prefix each topic with the name of the site -->
<destination>{#}/${SYS_SITE}</destination>
</forwarded-topic>
</forwarded-topics>
</mqtt-bridge>
</mqtt-bridges>

<!-- Enable admin API with username/password -->
<admin-api>
<enabled>true</enabled>
<listeners>
<http-listener>
<port>8080</port>
<bind-address>0.0.0.0</bind-address>
</http-listener>
</listeners>
<users>
<user>
<username>${ADMIN_USERNAME}</username>
<password>${ADMIN_PASSWORD}</password>
<roles>
<role>admin</role>
</roles>
</user>
</users>
</admin-api>
</hivemq>
network:
ingress-ip-per-instance:
protocols:
- name: tcp
# 1883: MQTT, 2442: MQTT-SN, 80: HiveMQ Edge WebUI
port-ranges: "1883,2442,8080"
outbound-access:
allow-all: true
containers:
- name: hivemq-edge
image: registry-1.docker.io/hivemq/hivemq-edge
env:
HIVEMQ_BIND_ADDRESS: 0.0.0.0
mounts:
# Mount the config file in the container
- volume-name: cfg
files:
- name: default.conf
mount-path: /opt/hivemq/conf/config.xml
# Mount the log volume in the container
- volume-name: log
mount-path: /opt/hivemq/log
# Mount the data volume in the container
- volume-name: data
mount-path: /opt/hivemq/data
network:
shared-application-network: sensors

credentials.hivemq-edge.secret.yaml

name: credentials
data:
# Used when forwarding mqtt topics
mqtt-bridge-username: test-user
mqtt-bridge-password: test-pwd
# Used to login to the HiveMQ admin console
admin-username: admin
admin-password: password
allow-image-access: ["*"]

transformation

transformation.app.yaml

name: transformation
services:
- name: transformation
mode: replicated
replicas: 1
containers:
- name: transformation
image: registry.gitlab.com/avassa-public/iot-demo/transformation:v1.0
approle: transformation
env:
APPROLE_SECRET_ID: "${SYS_APPROLE_SECRET_ID}"
API_CA_CERT: "${SYS_API_CA_CERT}"
MQTT_SERVER: mqtt.hivemq-edge.internal
network:
outbound-access:
allow-all: true
network:
shared-application-network: sensors

transformation.approle.yaml

name: transformation
weak-secret-id: true
token-policies:
- transformation

transformation.policy.yaml

name: transformation
rest-api:
rules:
- path: /v1/alert
operations:
execute: allow
- path: /v1/clear
operations:
execute: allow

influxdb

credentials.influxdb.secret.yaml

name: credentials
data:
token: "NjA3QTExNDMtRDJFQi00RTU0LTlDOTctRUU4NTI0NzU2MEFECg=="
username: "admin"
password: "password"
mqtt-username: "test-user"
mqtt-password: "test-pwd"
allow-image-access: ["*"]

influxdb.app.yaml

name: influxdb
services:
- name: influxdb
mode: replicated
replicas: 1
variables:
- name: TOKEN
value-from-vault-secret:
vault: influxdb
secret: credentials
key: token
- name: USERNAME
value-from-vault-secret:
vault: influxdb
secret: credentials
key: username
- name: PASSWORD
value-from-vault-secret:
vault: influxdb
secret: credentials
key: password
volumes:
- name: data
ephemeral-volume:
size: 100MB
file-mode: "700"
file-ownership: 0:0
- name: etc
ephemeral-volume:
size: 10MB
file-mode: "700"
file-ownership: 0:0
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "8086"
outbound-access:
allow-all: true
containers:
- name: debug
image: registry-1.docker.io/alpine
cmd: ["sleep", "infinity" ]
- name: db
image: registry-1.docker.io/influxdb
env:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: ${USERNAME}
DOCKER_INFLUXDB_INIT_PASSWORD: ${PASSWORD}
# NOTE, this must match the telegraf config below
DOCKER_INFLUXDB_INIT_ORG: iot-demo
DOCKER_INFLUXDB_INIT_BUCKET: mqtt
DOCKER_INFLUXDB_INIT_RETENTION: 30d
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: ${TOKEN}
mounts:
- volume-name: data
mount-path: /var/lib/influxdb2
mode: read-write
- volume-name: etc
mount-path: /etc/influxdb2
mode: read-write
- name: telegraf
mode: replicated
replicas: 1
variables:
- name: TOKEN
value-from-vault-secret:
vault: influxdb
secret: credentials
key: token
# Get Enterprise MQTT broker credentials from Strongbox
- name: MQTT_USERNAME
value-from-vault-secret:
vault: influxdb
secret: credentials
key: mqtt-username
- name: MQTT_PASSWORD
value-from-vault-secret:
vault: influxdb
secret: credentials
key: mqtt-password
volumes:
- name: cfg
config-map:
items:
- name: telegraf.conf
file-ownership: 999:999
data: |
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true

## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000

## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000

## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"

## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"

## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""

## Log at debug level.
debug = true
## Log only error level messages.
# quiet = false

## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"

## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""

## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"

## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"

## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5

## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""

## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = true

[[outputs.influxdb_v2]]
urls = ["http://influxdb:8086"]
token = "${TOKEN}"
organization = "iot-demo"
bucket = "mqtt"

# Read metrics from MQTT topic(s)
[[inputs.mqtt_consumer]]
servers = ["tcp://mqtt.mosquitto.${SYS_TENANT}.${SYS_SITE}.${SYS_GLOBAL_DOMAIN}:1883"]

topics = [
"power/#"
]

## Username and password to connect MQTT server.
username = "${MQTT_USERNAME}"
password = "${MQTT_PASSWORD}"

## Data format to consume.
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json_v2"

## Enable extracting tag values from MQTT topics
## _ denotes an ignored entry in the topic path
[[inputs.mqtt_consumer.topic_parsing]]
topic = "power/+"
measurement = "power"
tags = "_/site"
fields=""

[[inputs.mqtt_consumer.json_v2]]
measurement_name = "power"
[[inputs.mqtt_consumer.json_v2.field]]
path = "power"
type = "float"
[[inputs.mqtt_consumer.json_v2.tag]]
path = "room"
rename = "sensor-room"
type = "string"
network:
outbound-access:
allow-all: true
containers:
- name: telegraf
image: registry-1.docker.io/telegraf
user: 999
mounts:
# Mount the config file in the container
- volume-name: cfg
files:
- name: telegraf.conf
mount-path: /etc/telegraf/telegraf.conf

influxdb.vault.yaml

name: influxdb
distribute:
deployments:
- influxdb

node-red

node-red.app.yaml

name: node-red
services:
- name: node
mode: replicated
replicas: 1
volumes:
- name: data
ephemeral-volume:
size: 100MB
file-mode: "755"
file-ownership: 1000:1000
- name: cfg
config-map:
items:
- name: start.sh
data: |
#!/bin/sh
echo "Copying flows.json"
cp /tmp/flows.json /data/flows.json
chmod +rw /data/flows.json

cd /usr/src/node-red
echo "Installing modbus"
npm install node-red-contrib-modbus
./entrypoint.sh
file-mode: "700"
file-ownership: 1000:1000
- name: flows.json
data: |
[
{
"id": "a9d3f95e5cd592a8",
"type": "tab",
"label": "MQTT-demo",
"disabled": false,
"info": "",
"env": []
},
{
"id": "2509b4ced61ad420",
"type": "mqtt-broker",
"name": "",
"broker": "mqtt.hivemq-edge.internal",
"port": "1883",
"clientid": "",
"autoConnect": true,
"usetls": false,
"protocolVersion": "5",
"keepalive": "60",
"cleansession": true,
"autoUnsubscribe": true,
"birthTopic": "",
"birthQos": "0",
"birthRetain": "false",
"birthPayload": "",
"birthMsg": {},
"closeTopic": "",
"closeQos": "0",
"closeRetain": "false",
"closePayload": "",
"closeMsg": {},
"willTopic": "",
"willQos": "0",
"willRetain": "false",
"willPayload": "",
"willMsg": {},
"userProps": "",
"sessionExpiry": ""
},
{
"id": "75366a13439a6c1b",
"type": "mqtt in",
"z": "a9d3f95e5cd592a8",
"name": "",
"topic": "sensor/temperature/simulated",
"qos": "2",
"datatype": "auto-detect",
"broker": "2509b4ced61ad420",
"nl": false,
"rap": true,
"rh": 0,
"inputs": 0,
"x": 190,
"y": 220,
"wires": [
[
"cdf819cd934aadde"
]
]
},
{
"id": "cdf819cd934aadde",
"type": "function",
"z": "a9d3f95e5cd592a8",
"name": "fahrenheit-2-celcius",
"func": "const temp_c = (msg.payload.value - 32) / 1.8;\nvar dt = new Date(msg.payload.timestamp);\nconst new_msg = {\n \"payload\": {\n \"timestamp\": dt.toISOString(),\n \"temperature\": temp_c,\n \"source\": \"simulated\",\n }\n};\nreturn new_msg;",
"outputs": 1,
"timeout": 0,
"noerr": 0,
"initialize": "",
"finalize": "",
"libs": [],
"x": 440,
"y": 220,
"wires": [
[
"9df417bbf0d0d6ea"
]
]
},
{
"id": "9df417bbf0d0d6ea",
"type": "mqtt out",
"z": "a9d3f95e5cd592a8",
"name": "",
"topic": "sensor/temperature/normalized",
"qos": "2",
"retain": "",
"respTopic": "",
"contentType": "application/json",
"userProps": "",
"correl": "",
"expiry": "",
"broker": "2509b4ced61ad420",
"x": 660,
"y": 220,
"wires": []
}
]
file-ownership: 1000:1000
containers:
- name: red
image: registry-1.docker.io/nodered/node-red
entrypoint: ["/start.sh"]
# env:
# FLOWS: /flows.json
mounts:
- volume-name: data
mount-path: /data
mode: read-write
- volume-name: cfg
files:
- name: flows.json
mount-path: /tmp/flows.json
- name: start.sh
mount-path: /start.sh
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "1880"
access:
allow-all: true
outbound-access:
allow-all: true
network:
shared-application-network: sensors

virtual-sensor

modbus-sensor.app.yaml

name: modbus-sensor
services:
- name: sensor
mode: replicated
replicas: 1
containers:
- name: sensor
image: registry.gitlab.com/avassa-public/iot-demo/virtual-sensor:v1.0
cmd:
- modbus
- "--port=502"
network:
ingress-ip-per-instance:
protocols:
- name: tcp
port-ranges: "502"
outbound-access:
allow-all: true
network:
shared-application-network: sensors

credentials.sensors.yaml

name: credentials
data:
password: password
username: test-user
allow-image-access:
- "*"

mqtt-sensor.app.yaml

name: mqtt-sensor
services:
- name: sensor
mode: replicated
replicas: 1
containers:
- name: sensor
image: registry.gitlab.com/avassa-public/iot-demo/virtual-sensor:v1.0
cmd:
- mqtt
- mqtt.hivemq-edge.internal
network:
outbound-access:
allow-all: true
network:
shared-application-network: sensors