Inital Adding of Compose Files

This commit is contained in:
mindesbunister
2025-06-24 13:31:31 +02:00
parent 2a146b67fb
commit 1f2b05c937
87 changed files with 30542 additions and 0 deletions

59
compose_files/Dockerfile Executable file
View File

@@ -0,0 +1,59 @@
FROM nextcloud:26
RUN set -ex; \
\
apt-get update; \
apt-get install -y --no-install-recommends \
ffmpeg \
libmagickcore-6.q16-6-extra \
procps \
smbclient \
supervisor \
; \
rm -rf /var/lib/apt/lists/*
RUN set -ex; \
\
savedAptMark="$(apt-mark showmanual)"; \
\
apt-get update; \
apt-get install -y --no-install-recommends \
libbz2-dev \
libc-client-dev \
libkrb5-dev \
libsmbclient-dev \
; \
\
docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \
docker-php-ext-install \
bz2 \
imap \
; \
pecl install smbclient; \
docker-php-ext-enable smbclient; \
\
# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
apt-mark auto '.*' > /dev/null; \
apt-mark manual $savedAptMark; \
ldd "$(php -r 'echo ini_get("extension_dir");')"/*.so \
| awk '/=>/ { print $3 }' \
| sort -u \
| xargs -r dpkg-query -S \
| cut -d: -f1 \
| sort -u \
| xargs -rt apt-mark manual; \
\
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p \
/var/log/supervisord \
/var/run/supervisord \
;
COPY supervisord.conf /
ENV NEXTCLOUD_UPDATE=1
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]

33
compose_files/bitwarden.yml Executable file
View File

@@ -0,0 +1,33 @@
#docker-compose.yml
version: "3.3"
services:
bitwarden:
image: vaultwarden/server
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: bitwarden
volumes:
- /home/icke/bw-data:/data
environment:
WEBSOCKET_ENABLED: "true"
ADMIN_TOKEN: "gpwPpd1A60oWu6uSCCc6hIv8CtrPhuVC1rhqSBP3DmVGCUA9Q/vzKDOY8q+xxE1F"
YUBICO_CLIENT_ID: "51442"
YUBICO_SECRET_KEY: "RKirOdvbVwWMoY9V0FRHGLdGfjY="
ports:
- "81:80"
- "3012:3012"
restart: unless-stopped
networks:
bitwarden:
ipv4_address: 172.22.0.2
networks:
bitwarden:
driver: bridge
ipam:
config:
- subnet: 172.22.0.0/30

55
compose_files/blog.yml Executable file
View File

@@ -0,0 +1,55 @@
#docker-compose.yml
version: "3.3"
services:
blog:
image: dalareo/wordpress-ldap
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: blog
restart: unless-stopped
ports:
- "8083:80"
volumes:
- /home/icke/blog/:/var/www/html
- /home/icke/blog/uploads.ini:/usr/local/etc/php/conf.d/uploads.ini
links:
- mysql-blog
environment:
WORDPRESS_DB_HOST: mysql-blog
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_NAME: blog
WORDPRESS_DB_PASSWORD: eccmts42*
networks:
blog:
ipv4_address: 172.23.0.2
mysql-blog:
image: mysql:5.7
container_name: mysql-blog
environment:
# - default-authentication-plugin=mysql_native_password
- MYSQL_ROOT_PASSWORD=eccmts42*
- MYSQL_DATABASE=blog
- MYSQL_USER=wordpress
- MYSQL_PASSWORD=eccmts42*
volumes:
- /home/icke/mysql-blog/var:/var/lib/mysql
restart: unless-stopped
networks:
blog:
ipv4_address: 172.23.0.3
networks:
blog:
driver: bridge
ipam:
config:
- subnet: 172.23.0.0/29
volumes:
blog:
mysql-blog:

19
compose_files/collabora.yml Executable file
View File

@@ -0,0 +1,19 @@
#docker-compose.yml
version: "3.3"
services:
Collabora:
image: collabora/code
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: collabora
ports:
- 9980:9980
cap_add:
- MKNOD
environment:
- domain=nextcloud.egonetix.de

38
compose_files/ecodms.yml Executable file
View File

@@ -0,0 +1,38 @@
#docker-compose.yml
version: "3.3"
services:
ecodms:
#image: ecodms/allinone-18.09
image: ecodms/ecodms
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: ecodms
volumes:
- /home/icke/ecodms/data:/srv/data
# - /mnt/scaninput:/srv/scaninput
#- /mnt/nextcloud/robert.wiegand/files/Scaninput:/srv/scaninput
- /mnt/ecodms/backup:/srv/backup
- /home/icke/ecodms/restore:/srv/restore
#labels:
#- com.centurylinklabs.watchtower.enable=true
ports:
- "17001:17001"
# - "17002:17002"
- "17004:8097"
- "17005:8180"
- "8086:8086"
restart: unless-stopped
networks:
ecodms:
ipv4_address: 172.20.0.2
networks:
ecodms:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/30

4
compose_files/entrypoint.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/sh
chown -R monero:monero /home/monero/.bitmonero
#exec lchaia/monerod "$@"
exec exec /usr/local/bin/monerod "$@"

40
compose_files/firefox_sync.yml Executable file
View File

@@ -0,0 +1,40 @@
version: "3.2"
services:
firefox-syncserver:
image: crazymax/firefox-syncserver:latest
container_name: firefox_syncserver
ports:
- "5000:5000"
# - target: 5000
# published: 5000
# protocol: tcp
volumes:
- /home/icke/firefox_sync:/data
environment:
TZ: CEST
FF_SYNCSERVER_ACCESSLOG: "true"
# FF_SYNCSERVER_LOGLEVEL:
FF_SYNCSERVER_PUBLIC_URL: "https://sync.egonetix.de"
FF_SYNCSERVER_SECRET: "D3NTEnkCLwBMcaZ&Z*p*W9xvfBgDuFQ2XgHGSNhSELv"
FF_SYNCSERVER_ALLOW_NEW_USERS: "true"
FF_SYNCSERVER_FORCE_WSGI_ENVIRON: "true"
FF_SYNCSERVER_FORWARDED_ALLOW_IPS: "*"
# env_file:
# - "./firefox-syncserver.env"
restart: unless-stopped
networks:
firefox_sync:
ipv4_address: 172.27.0.2
networks:
firefox_sync:
driver: bridge
ipam:
config:
- subnet: 172.27.0.0/30
volumes:
firefox-syncserver:

20
compose_files/gitea.yml Normal file
View File

@@ -0,0 +1,20 @@
services:
gitea:
image: gitea/gitea:latest
container_name: gitea
restart: unless-stopped
environment:
USER_UID: 1000
USER_GID: 1000
volumes:
- /home/icke/gitea/data:/data
ports:
- "4000:3000" # Web interface
- "222:22" # SSH access
networks:
- gitea
networks:
gitea:
driver: bridge

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,200 @@
## grafana-loki-syslog-aio
<center><img src="https://github.com/lux4rd0/grafana-loki-syslog-aio/blob/main/loki_syslog_aio.png"></center>
## About The Project
This Loki Syslog All-In-One example is geared to help you get up and running quickly with a Syslog ingestor and visualization of logs. It uses [Grafana Loki](https://grafana.com/oss/loki/) and Promtail as a receiver for forwarded syslog-ng logs. I wrote an [introductory blog post](https://labs.lux4rd0.com/2021/01/oldskool-syslog-meets-newskool-loki/) about how this AIO project came about as well (pesky intermittent network issues!!)
<center><img src="https://github.com/lux4rd0/grafana-loki-syslog-aio/blob/main/loki_syslog_aio_overview_sized.png"></center>
*Note that this All In One is geared towards getting network traffic from legacy syslog (RFC3164 UDP port 514) into Loki via [syslog-ng](https://www.syslog-ng.com/) and [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/).*
Essentially:
> RFC3164 Network/Compute Devices -> syslog-ng (UDP port 514) ->
> Promtail (port 1514) -> Loki (port 3100) <- Grafana (port 3000)
## Getting Started
The project is built around a pre-configured Docker stack of the following:
- [Grafana](https://grafana.com/oss/grafana/)
- [Grafana Loki](https://grafana.com/oss/loki/) (configured for [MinIO](https://min.io/))
- [Grafana Promtail](https://grafana.com/docs/loki/latest/clients/promtail/)
- [syslog-ng](https://www.syslog-ng.com/)
The stack has been extended to include pre-configured monitoring with:
- [Prometheus](https://grafana.com/oss/prometheus/)
- [Node-Exporter](https://github.com/prometheus/node_exporter)
- [cAdvisor](https://github.com/google/cadvisor)
A simple Syslog generator is included based on Vicente Zepeda Mas's [random-logger](https://github.com/chentex/random-logger) project.
## Prerequisites
- [Docker](https://docs.docker.com/install)
- [Docker Compose](https://docs.docker.com/compose/install)
## Using
This project is built and tested on Linux CentOS 7. To get started, download the code from this repository and extract it into an empty directory. For example:
wget https://github.com/lux4rd0/grafana-loki-syslog-aio/archive/main.zip
unzip main.zip
cd grafana-loki-syslog-aio-main
From that directory, run the docker-compose command:
**Full Example Stack:** Grafana, Loki with s3/MinIO, Promtail, syslog-ng, Prometheus, cAdvisor, node-exporter
docker-compose -f ./docker-compose.yml up -d
This will start to download all of the needed application containers and start them up.
*(Optional docker-compose configurations are listed under **Options** below)*
**Grafana Dashboards**
Once all of the docker containers are started up, point your Web browser to the Grafana page, typically http://hostname:3000/ - with hostname being the name of the server you ran the docker-compose up -d command on. The "Loki Syslog AIO - Overview" dashboard is defaulted without having to log in.
*Note: this docker-compose stack is designed to be as easy as possible to deploy and go. Logins have been disabled, and the default user has an admin role. This can be changed to an Editor or Viewer role by changing the Grafana environmental variable in the docker-compose.yml file to:*
GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer
**Getting Started With Loki**
Here are some additional resources you might find helpful if you're just getting started with Loki:
- [Getting started with Grafana and Loki in under 4
minutes](https://grafana.com/go/webinar/loki-getting-started/)
- [An (only slightly technical) introduction to Loki](https://grafana.com/blog/2020/05/12/an-only-slightly-technical-introduction-to-loki-the-prometheus-inspired-open-source-logging-system/)
- [Video tutorial: Effective troubleshooting queries with Grafana
Loki](https://grafana.com/blog/2021/01/07/video-tutorial-effective-troubleshooting-queries-with-grafana-loki/)
## Stack Options:
A few other docker-compose files are also available:
**Full Example Stack with Syslog Generator:** Grafana, Loki with s3/MinIO, Promtail, syslog-ng, Prometheus, cAdvisor, node-exporter, Syslog Generator
docker-compose -f ./docker-compose-with-generator.yml up -d
**Example Stack without monitoring or Syslog generator**: Grafana, Loki with s3/MinIO, Promtail, syslog-ng
docker-compose -f ./docker-compose-without-monitoring.yml up -d
**Example Stack without MinIO, monitoring, or Syslog generator:** Grafana, Loki with the filesystem, Promtail, syslog-ng
docker-compose -f ./docker-compose-filesystem.yml up -d
The *Syslog Generator* configuration will need access to the Internet to do a local docker build from the configurations location in ./generator. It'll provide some named hosts and random INFO, WARN, DEBUG, ERROR logs sent over to syslog-ng/Loki.
<center><img src="https://github.com/lux4rd0/grafana-loki-syslog-aio/blob/main/loki_syslog_aio_overview_generator_sized.png"></center>
## Configuration Review:
The default Loki storage configuration docker-compose.yml uses S3 storage with MinIO. If you want to use the filesystem instead, use the different docker-compose configurations listed above or change the configuration directly. An example would be:
volumes:
- ./config/loki-config-filesystem.ym:/etc/loki/loki-config.yml:ro
**Changing MinIO Keys**
The MinIO configurations default the Access Key and Secret Key at startup. If you want to change them, you'll need to update two files:
./docker-compose.yml
MINIO_ACCESS_KEY: minio123
MINIO_SECRET_KEY: minio456
./config/loki-config-s3.yml
aws:
s3: s3://minio123:minio456@minio.:9000/loki
## Changed Default Configurations In syslog-ng and Promtail
To set this example All In One project up, the following configurations have been added to the docker-compose.yml. If you already have syslog-ng running on your deployment server - make similar changes below and comment out the docker container stanza.
#### SYSLOG-NG CONFIGURATION (docker container listens on port 514)
**# syslog-ng.conf**
source s_local {
internal();
};
source s_network {
default-network-drivers(
);
};
destination d_loki {
syslog("promtail" transport("tcp") port("1514"));
};
log {
source(s_local);
source(s_network);
destination(d_loki);
};
> Note: the above "`promtail`" configuration for `destination d_loki` is
> the *hostname* where Promtail is running. Is this example, it happens
> to be the Promtail *docker container* name that I configured for the
> All-In-One example.
#### PROMTAIL CONFIGURATION (docker container listens on port 1514)
**# promtail-config.yml**
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: syslog
syslog:
listen_address: 0.0.0.0:1514
idle_timeout: 60s
label_structured_data: yes
labels:
job: "syslog"
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: 'host'
## Contributing
Contributions make the open source community such a fantastic place to learn, inspire, and create. Any contributions you make are greatly appreciated.
- Fork the Project
- Create your Feature Branch (git checkout -b feature/AmazingFeature)
- Commit your Changes (git commit -m 'Add some AmazingFeature')
- Push to the Branch (git push origin feature/AmazingFeature)
- Open a Pull Request
## Contact
Dave Schmid - [@lux4rd0](https://twitter.com/lux4rd0) - dave@pulpfree.org
Project Link: https://github.com/lux4rd0/grafana-loki-syslog-aio
## Acknowledgements
- Grafana Labs - https://grafana.com/
- Grafana Loki - https://grafana.com/oss/loki/
- Grafana - https://grafana.com/oss/grafana/
- syslog-ng - https://www.syslog-ng.com/
- Random Logger - https://github.com/chentex/random-logger
- Grafana Dashboard Community (Performance Overviews) - https://grafana.com/grafana/dashboards

View File

@@ -0,0 +1,436 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
},
{
"datasource": "Loki",
"enable": false,
"expr": "{job=\"syslog\"} |=\"DNS request timed out\"",
"hide": false,
"iconColor": "#C4162A",
"limit": 100,
"name": "DNS Timeout",
"showIn": 0,
"tags": [],
"target": {},
"type": "tags"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 9,
"iteration": 1629593809615,
"links": [
{
"asDropdown": true,
"icon": "external link",
"keepTime": true,
"tags": [
"performance_overview"
],
"title": "Performance Overview",
"type": "dashboards"
}
],
"panels": [
{
"datasource": "Loki",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "opacity",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 24,
"x": 0,
"y": 0
},
"id": 3,
"interval": "$smooth",
"options": {
"legend": {
"calcs": [
"mean",
"sum"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"pluginVersion": "8.1.2",
"targets": [
{
"expr": "count_over_time({job=\"syslog\"}[$__interval])",
"legendFormat": "{{host}}",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Log Line Counts By Host (Unfiltered)",
"transformations": [],
"type": "timeseries"
},
{
"datasource": "Loki",
"description": "",
"fieldConfig": {
"defaults": {
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "semi-dark-orange",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 24,
"x": 0,
"y": 9
},
"id": 4,
"interval": "$smooth",
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "8.1.2",
"targets": [
{
"expr": "count_over_time({host=~\"$host\", job=\"syslog\"} [$__interval] |~ \"$free_form_filter\" |~ \"$filter\")",
"legendFormat": "{{host}}",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Counts By Host ($host) - \"$filter\" - \"$free_form_filter\" (Filtered)",
"transformations": [],
"type": "stat"
},
{
"datasource": "Loki",
"description": "",
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 21
},
"id": 5,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": true,
"showTime": true,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"pluginVersion": "7.3.6",
"targets": [
{
"expr": "{job=\"syslog\", host=~\"$host\"} |~ \"$free_form_filter\" |~ \"$filter\"",
"legendFormat": "",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Logs By Host - \"$filter\" - \"$free_form_filter\" (Filtered)",
"type": "logs"
}
],
"refresh": "30s",
"schemaVersion": 30,
"style": "dark",
"tags": [
"syslog"
],
"templating": {
"list": [
{
"allValue": null,
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": "Loki",
"definition": "label_values({job=\"syslog\"}, host)",
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": "Host",
"multi": true,
"name": "host",
"options": [],
"query": "label_values({job=\"syslog\"}, host)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 5,
"tagValuesQuery": "",
"type": "query"
},
{
"allValue": null,
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"description": null,
"error": null,
"hide": 0,
"includeAll": true,
"label": "Filter",
"multi": true,
"name": "filter",
"options": [
{
"selected": true,
"text": "All",
"value": "$__all"
},
{
"selected": false,
"text": "info",
"value": "info"
},
{
"selected": false,
"text": "debug",
"value": "debug"
},
{
"selected": false,
"text": "warn",
"value": "warn"
},
{
"selected": false,
"text": "error",
"value": "error"
},
{
"selected": false,
"text": "ERR",
"value": "ERR"
},
{
"selected": false,
"text": "DNS",
"value": "DNS"
},
{
"selected": false,
"text": "auth_failures",
"value": "auth_failures"
},
{
"selected": false,
"text": "wpa_auth_failures",
"value": "wpa_auth_failures"
}
],
"query": "info,debug,warn,error,ERR,DNS,auth_failures,wpa_auth_failures",
"queryValue": "",
"skipUrlSync": false,
"type": "custom"
},
{
"current": {
"selected": true,
"text": "",
"value": ""
},
"description": null,
"error": null,
"hide": 0,
"label": "Free Form Filter",
"name": "free_form_filter",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"allValue": null,
"current": {
"selected": false,
"text": "1m",
"value": "1m"
},
"description": null,
"error": null,
"hide": 0,
"includeAll": false,
"label": "Smooth",
"multi": false,
"name": "smooth",
"options": [
{
"selected": false,
"text": "30s",
"value": "30s"
},
{
"selected": true,
"text": "1m",
"value": "1m"
},
{
"selected": false,
"text": "2m",
"value": "2m"
},
{
"selected": false,
"text": "5m",
"value": "5m"
},
{
"selected": false,
"text": "10m",
"value": "10m"
},
{
"selected": false,
"text": "15m",
"value": "15m"
},
{
"selected": false,
"text": "30m",
"value": "30m"
},
{
"selected": false,
"text": "1h",
"value": "1h"
},
{
"selected": false,
"text": "2h",
"value": "2h"
}
],
"query": "30s,1m,2m,5m,10m,15m,30m,1h,2h",
"queryValue": "",
"skipUrlSync": false,
"type": "custom"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Loki Syslog AIO - Overview",
"uid": "lux4rd0labs_loki_syslog_aio_01",
"version": 4
}

View File

@@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: 'NoFolderDashboards'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
updateIntervalSeconds: 10
options:
path: /var/lib/grafana/dashboards/no_folder

View File

@@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: 'PerformanceOverviewDashboards'
orgId: 1
folder: 'Performance Overview'
type: file
disableDeletion: false
editable: true
updateIntervalSeconds: 10
options:
path: /var/lib/grafana/dashboards/performance_overview

View File

@@ -0,0 +1,10 @@
apiVersion: 1
datasources:
-
access: proxy
basicAuth: false
jsonData:
maxLines: 1000
name: Loki
type: loki
url: "http://loki:3100/"

View File

@@ -0,0 +1,8 @@
apiVersion: 1
datasources:
-
access: proxy
basicAuth: false
name: Prometheus
type: prometheus
url: "http://prometheus:9090/"

View File

@@ -0,0 +1,64 @@
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /tmp/loki/boltdb-shipper-active
cache_location: /tmp/loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: filesystem
filesystem:
directory: /tmp/loki/chunks
compactor:
working_directory: /tmp/loki/boltdb-shipper-compactor
shared_store: filesystem
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
ruler:
storage:
type: local
local:
directory: /tmp/loki/rules
rule_path: /tmp/loki/rules-temp
alertmanager_url: http://localhost:9093
ring:
kvstore:
store: inmemory
enable_api: true

View File

@@ -0,0 +1,79 @@
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m # Any chunk not receiving new logs in this time will be flushed (set very low just to push data into s3 exampe)
max_chunk_age: 10m # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
shared_store: s3
active_index_directory: /tmp/loki/index
cache_location: /tmp/loki/boltdb-cache
aws:
s3: s3://minio123:minio456@minio.:9000/loki
s3forcepathstyle: true
compactor:
working_directory: /tmp/loki/compactor
shared_store: s3
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 48h
table_manager:
retention_deletes_enabled: true
retention_period: 48h
query_range:
align_queries_with_step: true
max_retries: 5
split_queries_by_interval: 15m
parallelise_shardable_queries: true
cache_results: true
results_cache:
cache:
enable_fifocache: true
fifocache:
size: 1024
validity: 24h
ruler:
storage:
type: local
local:
directory: /tmp/loki/rules
rule_path: /tmp/loki/rules-temp
alertmanager_url: http://localhost:9093
ring:
kvstore:
store: inmemory
enable_api: true

View File

@@ -0,0 +1,47 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
external_labels:
origin_prometheus: aio
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['prometheus:9090']
- job_name: 'grafana'
static_configs:
- targets: ['grafana:3000']
- job_name: 'loki'
static_configs:
- targets: ['loki:3100']
- job_name: 'promtail'
static_configs:
- targets: ['promtail:9080']
- job_name: 'minio'
metrics_path: /minio/prometheus/metrics
static_configs:
- targets: ['minio:9000']
- job_name: 'node'
static_configs:
- targets: ['node-exporter:9100']
- job_name: 'caadvisor'
static_configs:
- targets: ['cadvisor:8080']

View File

@@ -0,0 +1,22 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: syslog
syslog:
listen_address: 0.0.0.0:1514
idle_timeout: 60s
label_structured_data: yes
labels:
job: "syslog"
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: 'host'

View File

@@ -0,0 +1,42 @@
#############################################################################
# Default syslog-ng.conf file which collects all local logs into a
# single file called /var/log/messages tailored to container usage.
#
# The changes from the stock, default syslog-ng.conf file is that we've
# dropped the system() source that is not needed and that we enabled network
# connections using default-network-drivers(). Customize as needed and
# override using the -v option to docker, such as:
#
# docker run ... -v "$PWD/syslog-ng.conf":/etc/syslog-ng/syslog-ng.conf
#
@version: 3.29
@include "scl.conf"
source s_local {
internal();
};
source s_network {
default-network-drivers(
# NOTE: TLS support
#
# the default-network-drivers() source driver opens the TLS
# enabled ports as well, however without an actual key/cert
# pair they will not operate and syslog-ng would display a
# warning at startup.
#
#tls(key-file("/path/to/ssl-private-key") cert-file("/path/to/ssl-cert"))
);
};
destination d_loki {
syslog("promtail" transport("tcp") port("1514"));
};
log {
source(s_local);
source(s_network);
destination(d_loki);
};

View File

@@ -0,0 +1,71 @@
networks:
loki: {}
services:
grafana:
container_name: grafana
environment:
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_DISABLE_LOGIN_FORM: "true"
GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/no_folder/loki_syslog_aio_overview.json
image: grafana/grafana:8.1.2
networks:
loki: null
ports:
- protocol: tcp
published: 3000
target: 3000
restart: always
volumes:
- ./config/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
- ./config/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./config/grafana/dashboards:/var/lib/grafana/dashboards:ro
loki:
command: -config.file=/etc/loki/loki-config.yml
container_name: loki
image: grafana/loki:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 3100
target: 3100
restart: always
volumes:
- ./config/loki-config-filesystem.yml:/etc/loki/loki-config.yml:ro
promtail:
command: -config.file=/etc/promtail/promtail-config.yml
container_name: promtail
image: grafana/promtail:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 1514
target: 1514
- protocol: tcp
published: 9080
target: 9080
restart: always
volumes:
- ./config/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
syslog-ng:
command: -edv
container_name: syslog-ng
depends_on:
- promtail
image: balabit/syslog-ng:latest
networks:
loki: null
ports:
- protocol: udp
published: 514
target: 514
- protocol: tcp
published: 601
target: 601
restart: always
volumes:
- ./config/syslog-ng.conf:/etc/syslog-ng/syslog-ng.conf:ro
version: '3.3'

View File

@@ -0,0 +1,148 @@
networks:
loki: {}
services:
cadvisor:
container_name: cadvisor
image: gcr.io/cadvisor/cadvisor:latest
networks:
loki: null
ports:
- published: 8080
target: 8080
restart: always
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
generator:
build:
context: ./generator
container_name: generator
depends_on:
- syslog-ng
networks:
loki: null
grafana:
container_name: grafana
environment:
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_DISABLE_LOGIN_FORM: "true"
GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/no_folder/loki_syslog_aio_overview.json
image: grafana/grafana:8.1.2
networks:
loki: null
ports:
- protocol: tcp
published: 3000
target: 3000
restart: always
volumes:
- ./config/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
- ./config/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./config/grafana/dashboards:/var/lib/grafana/dashboards:ro
loki:
command: -config.file=/etc/loki/loki-config.yml
container_name: loki
depends_on:
- minio
image: grafana/loki:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 3100
target: 3100
restart: always
volumes:
- ./config/loki-config-s3.yml:/etc/loki/loki-config.yml:ro
minio:
command: -c 'mkdir -p /data/loki && /usr/bin/docker-entrypoint.sh minio server /data'
container_name: minio
entrypoint: sh
environment:
MINIO_ACCESS_KEY: minio123
MINIO_PROMETHEUS_AUTH_TYPE: public
MINIO_SECRET_KEY: minio456
healthcheck:
interval: 30s
retries: 3
test:
- CMD
- curl
- -f
- http://localhost:9000/minio/health/live
timeout: 20s
image: minio/minio:latest
networks:
loki: null
ports:
- published: 9000
target: 9000
restart: always
node-exporter:
command: --path.rootfs=/host
container_name: node-exporter
image: prom/node-exporter:latest
networks:
loki: null
pid: host
ports:
- published: 9100
target: 9100
restart: always
volumes:
- /:/host:ro,rslave
prometheus:
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --web.enable-admin-api
- --web.enable-lifecycle
container_name: prometheus
image: prom/prometheus:latest
networks:
loki: null
ports:
- published: 9090
target: 9090
restart: always
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro
promtail:
command: -config.file=/etc/promtail/promtail-config.yml
container_name: promtail
image: grafana/promtail:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 1514
target: 1514
- protocol: tcp
published: 9080
target: 9080
restart: always
volumes:
- ./config/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
syslog-ng:
command: -edv
container_name: syslog-ng
depends_on:
- promtail
image: balabit/syslog-ng:latest
networks:
loki: null
ports:
- protocol: udp
published: 514
target: 514
- protocol: tcp
published: 601
target: 601
restart: always
volumes:
- ./config/syslog-ng.conf:/etc/syslog-ng/syslog-ng.conf:ro
version: '3.3'

View File

@@ -0,0 +1,97 @@
networks:
loki: {}
services:
grafana:
container_name: grafana
environment:
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_DISABLE_LOGIN_FORM: "true"
GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/no_folder/loki_syslog_aio_overview.json
image: grafana/grafana:8.1.2
networks:
loki: null
ports:
- protocol: tcp
published: 3000
target: 3000
restart: always
volumes:
- ./config/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
- ./config/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./config/grafana/dashboards:/var/lib/grafana/dashboards:ro
loki:
command: -config.file=/etc/loki/loki-config.yml
container_name: loki
depends_on:
- minio
image: grafana/loki:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 3100
target: 3100
restart: always
volumes:
- ./config/loki-config-s3.yml:/etc/loki/loki-config.yml:ro
minio:
command: -c 'mkdir -p /data/loki && /usr/bin/docker-entrypoint.sh minio server /data'
container_name: minio
entrypoint: sh
environment:
MINIO_ACCESS_KEY: minio123
MINIO_PROMETHEUS_AUTH_TYPE: public
MINIO_SECRET_KEY: minio456
healthcheck:
interval: 30s
retries: 3
test:
- CMD
- curl
- -f
- http://localhost:9000/minio/health/live
timeout: 20s
image: minio/minio:latest
networks:
loki: null
ports:
- published: 9000
target: 9000
restart: always
promtail:
command: -config.file=/etc/promtail/promtail-config.yml
container_name: promtail
image: grafana/promtail:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 1514
target: 1514
- protocol: tcp
published: 9080
target: 9080
restart: always
volumes:
- ./config/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
syslog-ng:
command: -edv
container_name: syslog-ng
depends_on:
- promtail
image: balabit/syslog-ng:latest
networks:
loki: null
ports:
- protocol: udp
published: 514
target: 514
- protocol: tcp
published: 601
target: 601
restart: always
volumes:
- ./config/syslog-ng.conf:/etc/syslog-ng/syslog-ng.conf:ro
version: '3.3'

View File

@@ -0,0 +1,169 @@
networks:
loki: {}
services:
cadvisor:
container_name: cadvisor
image: gcr.io/cadvisor/cadvisor:latest
networks:
loki: null
ports:
- published: 8088
target: 8088
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
grafana:
container_name: grafana
environment:
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_DISABLE_LOGIN_FORM: "true"
GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/no_folder/loki_syslog_aio_overview.json
GF_INSTALL_PLUGINS: alexanderzobnin-zabbix-app
image: grafana/grafana:latest
networks:
loki: null
ports:
- protocol: tcp
published: 3000
target: 3000
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./config/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
- ./config/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./config/grafana/dashboards:/var/lib/grafana/dashboards:ro
loki:
command: -config.file=/etc/loki/loki-config.yml
container_name: loki
depends_on:
- minio
image: grafana/loki:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 3100
target: 3100
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./config/loki-config-s3.yml:/etc/loki/loki-config.yml:ro
minio:
command: -c 'mkdir -p /data/loki && /usr/bin/docker-entrypoint.sh minio server /data'
container_name: minio
entrypoint: sh
environment:
MINIO_ACCESS_KEY: minio123
MINIO_PROMETHEUS_AUTH_TYPE: public
MINIO_SECRET_KEY: minio456
healthcheck:
interval: 30s
retries: 3
test:
- CMD
- curl
- -f
- http://localhost:9000/minio/health/live
timeout: 20s
image: minio/minio:latest
networks:
loki: null
ports:
- published: 9000
target: 9000
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
node-exporter:
command: --path.rootfs=/host/root
container_name: node-exporter
image: prom/node-exporter:latest
networks:
loki: null
pid: host
ports:
- published: 9100
target: 9100
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- /:/host:ro
prometheus:
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --web.enable-admin-api
- --web.enable-lifecycle
container_name: prometheus
image: prom/prometheus:latest
networks:
loki: null
ports:
- published: 9091
target: 9091
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro
promtail:
command: -config.file=/etc/promtail/promtail-config.yml
container_name: promtail
image: grafana/promtail:2.3.0
networks:
loki: null
ports:
- protocol: tcp
published: 1514
target: 1514
- protocol: tcp
published: 9080
target: 9080
restart: always
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./config/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
syslog-ng:
command: -edv
container_name: syslog-ng
depends_on:
- promtail
image: balabit/syslog-ng:latest
networks:
loki: null
ports:
- protocol: udp
published: 514
target: 514
- protocol: tcp
published: 601
target: 601
restart: always
volumes:
- ./config/syslog-ng.conf:/etc/syslog-ng/syslog-ng.conf:ro
version: '3.3'

View File

@@ -0,0 +1,7 @@
FROM grafana/promtail:2.3.0
RUN apt-get update && apt-get install -y netcat bc curl dumb-init bash procps coreutils vim net-tools
COPY ./entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]
CMD [ "10", "500" ]

View File

@@ -0,0 +1,3 @@
docker build -t docker01.tylephony.com:5000/lux4rd0/syslog-generator:latest -f Dockerfile --no-cache .
docker push docker01.tylephony.com:5000/lux4rd0/syslog-generator:latest

View File

@@ -0,0 +1,39 @@
#!/bin/bash
n=-1
c=0
if [ -n "$3" ]
then
n=$3
fi
endpoint="syslog-ng"
while [ "$n" -ne $c ]
do
arr[0]="loki.grafana.com"
arr[1]="tempo.grafana.com"
arr[2]="grafana.grafana.com"
arr[3]="prometheus.grafana.com"
arr[4]="cortex.grafana.com"
arr[5]="tanka.grafana.com"
rand=$[$RANDOM % ${#arr[@]}]
random_host=${arr[$rand]}
WAIT=$(shuf -i "$1"-"$2" -n 1)
sleep $(echo "scale=4; $WAIT/1000" | bc)
I=$(shuf -i 1-4 -n 1)
D=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
case "$I" in
"1") echo "$D ${random_host} ERROR An error is usually an exception that has been caught and not handled." | nc -u -w1 ${endpoint} 514
;;
"2") echo "$D ${random_host} INFO An info is often used to provide context in the current task." | nc -u -w1 ${endpoint} 514
;;
"3") echo "$D ${random_host} WARN A warning that should be ignored is usually at this level and should be actionable." | nc -u -w1 ${endpoint} 514
;;
"4") echo "$D ${random_host} DEBUG This is a debug log that shows a log that can be ignored." | nc -u -w1 ${endpoint} 514
;;
esac
c=$(( c+1 ))
done

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 359 KiB

82
compose_files/helferlein.yml Executable file
View File

@@ -0,0 +1,82 @@
#docker-compose.yml
version: "3.3"
services:
helferlein:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: dalareo/wordpress-ldap
container_name: helferlein
restart: unless-stopped
ports:
- "8082:80"
volumes:
- /home/icke/helferlein/:/var/www/html
- /home/icke/helferlein/uploads.ini:/usr/local/etc/php/conf.d/uploads.ini
links:
- mysql-helferlein
environment:
WORDPRESS_DB_HOST: mysql-helferlein
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_NAME: helferlein
WORDPRESS_DB_PASSWORD: eccmts42*
networks:
helferlein:
ipv4_address: 172.24.0.2
mysql-helferlein:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: mysql:5.7
container_name: mysql-helferlein
# ports:
# - "6607:3306"
# links: wordpress-doku
environment:
# - default-authentication-plugin=mysql_native_password
- MYSQL_ROOT_PASSWORD=eccmts42*
- MYSQL_DATABASE=helferlein
- MYSQL_USER=wordpress
- MYSQL_PASSWORD=eccmts42*
volumes:
- /home/icke/mysql-helferlein/var:/var/lib/mysql
restart: unless-stopped
networks:
helferlein:
ipv4_address: 172.24.0.3
phpmyadmin:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: phpmyadmin/phpmyadmin
container_name: phpmyadmin
links:
- mysql-helferlein
environment:
PMA_HOST: mysql-helferlin
PMA_ARBITRARY: 1
restart: unless-stopped
ports:
- 8183:80
networks:
helferlein:
ipv4_address: 172.24.0.4
networks:
helferlein:
driver: bridge
ipam:
config:
- subnet: 172.24.0.0/29
volumes:
helferlein:
mysql-helferlein:
phpmyadmin:

View File

@@ -0,0 +1,4 @@
HOARDER_VERSION=release
NEXTAUTH_SECRET=r9sNiHkpOTT8JVshdyOppmsJ7l+CCyppsa1BTtqgJn2MzZT8
MEILI_MASTER_KEY=a7nDUsGRDpjezOPP4rrnamHY4LgJPJ4NFkePzgqgDgJy3XMO
NEXTAUTH_URL=http://localhost:8084

View File

@@ -0,0 +1,41 @@
version: "3.3"
services:
web:
image: ghcr.io/hoarder-app/hoarder:${HOARDER_VERSION:-release}
restart: unless-stopped
volumes:
- /home/icke/hoarder/data:/data
ports:
- 8084:3000
env_file:
- .env
environment:
MEILI_ADDR: http://meilisearch:7700
BROWSER_WEB_URL: http://chrome:9222
# OPENAI_API_KEY: ...
#OLLAMA_BASE_URL: 10.0.0.48:11434
#INFERENCE_TEXT_MODEL: deepseek-r1:1.5b
DATA_DIR: /data
chrome:
image: gcr.io/zenika-hub/alpine-chrome:123
restart: unless-stopped
command:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
meilisearch:
image: getmeili/meilisearch:v1.11.1
restart: unless-stopped
env_file:
- .env
environment:
MEILI_NO_ANALYTICS: "true"
volumes:
- /home/icke/meilisearch/data:/meili_data
volumes:
meilisearch:
data:

24
compose_files/homeassi.yml Executable file
View File

@@ -0,0 +1,24 @@
#docker-compose.yml
version: "3.3"
services:
homeassi:
image: homeassistant/home-assistant:stable
container_name: homeassi
volumes:
- /home/icke/homeassi:/config
- /etc/localtime:/etc/localtime:ro
ports:
- "8123:8123"
restart: unless-stopped
networks:
homeassi:
ipv4_address: 172.26.0.2
networks:
homeassi:
driver: bridge
ipam:
config:
- subnet: 172.26.0.0/30

17
compose_files/immich/.env Executable file
View File

@@ -0,0 +1,17 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/mnt/backup/immich
# The location where your database files are stored
DB_DATA_LOCATION=/home/icke/immich/database
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=v1.105.1
# Connection secret for postgres. You should change it to a random password
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@@ -0,0 +1,82 @@
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
version: "3.3"
#name: "immich"
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION}
command: ['start.sh', 'immich']
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:3001
depends_on:
- redis
- database
restart: unless-stopped
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/hardware-transcoding
#file: hwaccel.transcoding.yml
#service: ['quicksync']
command: ['start.sh', 'microservices']
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
#ports:
#- 3002:3002
depends_on:
- redis
- database
restart: always
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
redis:
container_name: immich_redis
image: registry.hub.docker.com/library/redis:6.2-alpine@sha256:84882e87b54734154586e5f8abd4dce69fe7311315e2fc6d67c29614c8de2672
restart: always
database:
container_name: immich_postgres
image: registry.hub.docker.com/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
restart: always
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
volumes:
model-cache:

View File

@@ -0,0 +1,50 @@
<<<<<<<<<<<<<< ✨ Codeium Command 🌟 >>>>>>>>>>>>>>>>
version: "3.8"
# Configurations for hardware-accelerated machine learning
-# If using Unraid or another platform that doesn't allow multiple Compose files,
-# you can inline the config for a backend by copying its contents
-# into the immich-machine-learning service in the docker-compose.yml file.
-
-# See https://immich.app/docs/features/ml-hardware-acceleration for info on usage.
-
services:
armnn:
devices:
- /dev/mali0:/dev/mali0
volumes:
+ - /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro
+ - /usr/lib/libmali.so:/usr/lib/libmali.so:ro
- - /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- - /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
cpu: {}
cuda:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
openvino:
device_cgroup_rules:
- "c 189:* rmw"
devices:
- /dev/dri:/dev/dri
volumes:
- /dev/bus/usb:/dev/bus/usb
openvino-wsl:
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /dev/bus/usb:/dev/bus/usb
- /usr/lib/wsl:/usr/lib/wsl
<<<<<<< d16abd3d-45bb-4b23-be9f-e93c9b7d4eae >>>>>>>

View File

@@ -0,0 +1,57 @@
version: "3.8"
# Configurations for hardware-accelerated transcoding
# If using Unraid or another platform that doesn't allow multiple Compose files,
# you can inline the config for a backend by copying its contents
# into the immich-microservices service in the docker-compose.yml file.
# See https://immich.app/docs/features/hardware-transcoding for more info on using hardware transcoding.
services:
cpu: {}
nvenc:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
- compute
- video
quicksync:
devices:
- /dev/dri:/dev/dri
rkmpp:
security_opt: # enables full access to /sys and /proc, still far better than privileged: true
- systempaths=unconfined
- apparmor=unconfined
group_add:
- video
devices:
- /dev/rga:/dev/rga
- /dev/dri:/dev/dri
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
#- /dev/mali0:/dev/mali0 # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
volumes:
#- /etc/OpenCL:/etc/OpenCL:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
#- /usr/lib/aarch64-linux-gnu/libmali.so.1:/usr/lib/aarch64-linux-gnu/libmali.so.1:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
vaapi:
devices:
- /dev/dri:/dev/dri
vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2
devices:
- /dev/dri:/dev/dri
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

36
compose_files/jellyfin.yml Executable file
View File

@@ -0,0 +1,36 @@
#docker-compose.yml
version: "3.3"
services:
jellyfin:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: jellyfin/jellyfin:latest
container_name: jellyfin
volumes:
- /home/icke/jellyfin/:/config
- /mnt/:/media:rw
ports:
- "8096:8096"
environment:
- PUID=0
- PGID=0
- TZ=Europe/Berlin
#env_file:
#- /home/icke/env_files/grafana_logging.env
labels:
- com.centurylinklabs.watchtower.enable=true
restart: unless-stopped
networks:
jellyfin:
ipv4_address: 172.21.0.2
networks:
jellyfin:
driver: bridge
ipam:
config:
- subnet: 172.21.0.0/30

View File

@@ -0,0 +1,37 @@
#docker-compose.yml
version: "3.3"
services:
wordpress_luftglanz:
image: wordpress:php8.0
container_name: wordpress_luftglanz
restart: unless-stopped
ports:
- "8087:80"
volumes:
- /home/icke/wordpress_luftglanz/:/var/www/html
- /home/icke/wordpress_luftglanz/uploads.ini:/usr/local/etc/php/conf.d/uploads.ini
links:
- mysql-wordpress_luftglanz
environment:
WORDPRESS_DB_HOST: mysql-wordpress_luftglanz
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_NAME: wordpress_luftglanz
WORDPRESS_DB_PASSWORD: eccmts42*
# networks:
# wordpress_wiki:
# ipv4_address: 172.23.0.2
mysql-wordpress_luftglanz:
image: mariadb:10.6
container_name: mysql-wordpress_luftglanz
environment:
# - default-authentication-plugin=mysql_native_password
- MYSQL_ROOT_PASSWORD=eccmts42*
- MYSQL_DATABASE=wordpress_luftglanz
- MYSQL_USER=wordpress
- MYSQL_PASSWORD=eccmts42*
volumes:
- /home/icke/mysql-wordpress_luftglanz/var:/var/lib/mysql
restart: unless-stopped

BIN
compose_files/main.zip Executable file

Binary file not shown.

37
compose_files/matamo.yml Normal file
View File

@@ -0,0 +1,37 @@
version: '3.3'
services:
db:
image: mariadb:10.5
container_name: matomo-db
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: example
MYSQL_DATABASE: matomo
MYSQL_USER: matomo
MYSQL_PASSWORD: matomo
volumes:
- /home/icke/matomo_db/:/var/lib/mysql
matomo:
image: matomo:latest
container_name: matomo-app
restart: unless-stopped
ports:
- "8093:80"
environment:
MATOMO_DATABASE_HOST: db
MATOMO_DATABASE_ADAPTER: mysqli
MATOMO_DATABASE_TABLES_PREFIX: matomo_
MATOMO_DATABASE_USERNAME: matomo
MATOMO_DATABASE_PASSWORD: matomo
MATOMO_DATABASE_DBNAME: matomo
volumes:
- /home/icke/matomo_app/:/var/www/html
depends_on:
- db
volumes:
db_data:
matomo_data:

View File

@@ -0,0 +1,46 @@
version: "3.3"
services:
mautrix-signal:
container_name: mautrix-signal
image: dock.mau.dev/tulir/mautrix-signal
restart: unless-stopped
volumes:
- /home/icke/mautrix-signal/bridge:/data
- /home/icke/mautrix-signal/signald:/signald
depends_on:
- signald
networks:
matrix:
ipv4_address: 172.25.0.2
signald:
container_name: signald
image: docker.io/finn/signald
restart: unless-stopped
volumes:
- /home/icke/mautrix-signal/signald:/signald
networks:
matrix:
ipv4_address: 172.25.0.3
db:
container_name: postgre_signal
image: postgres:13-alpine
restart: unless-stopped
environment:
POSTGRES_USER: mautrixsignal
POSTGRES_DATABASE: mautrixsignal
POSTGRES_PASSWORD: eccmts42*
volumes:
- /home/icke/mautrix-signal/db:/var/lib/postgresql/data
networks:
matrix:
ipv4_address: 172.25.0.4
networks:
matrix:
driver: bridge
ipam:
config:
- subnet: 172.25.0.0/28

View File

@@ -0,0 +1,9 @@
version: "3.0"
services:
mautrix-whatsapp:
container_name: mautrix-whatsapp
image: dock.mau.dev/tulir/mautrix-whatsapp:latest
restart: unless-stopped
volumes:
- /home/icke/mautrix-whatsapp/data/:/data

34
compose_files/mealie.yml Executable file
View File

@@ -0,0 +1,34 @@
version: "3.3"
services:
mealie:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: mealie
image: hkotel/mealie:latest
restart: unless-stopped
ports:
- 8090:80
environment:
db_type: sqlite
TZ: Europe/Berlin
AUTO_BACKUP_ENABLED: "true"
#AUTO_BACKUP_ENABLED: true
#LDAP_AUTH_ENABLED: true
LDAP_SERVER_URL: ldap://srvdc01.egonetix.lan:7389
LDAP_BIND_TEMPLATE: cn={},dc=egonetix,dc=lan
#labels:
#- com.centurylinklabs.watchtower.enable=true
volumes:
- /home/icke/mealie/data/:/app/data
networks:
mealie:
ipv4_address: 172.26.0.2
networks:
mealie:
driver: bridge
ipam:
config:
- subnet: 172.26.0.0/30

22
compose_files/monerod.yml Executable file
View File

@@ -0,0 +1,22 @@
version: '3'
services:
monerod:
image: sethsimmons/simple-monerod:latest
user: ${FIXUID:-1000}:${FIXGID:-1000}
restart: unless-stopped
container_name: monerod
volumes:
- /mnt/monero:/home/monero/.bitmonero
ports:
- 18080:18080
- 18089:18089
command:
- "--rpc-restricted-bind-ip=0.0.0.0"
- "--rpc-restricted-bind-port=18089"
- "--public-node"
- "--no-igd"
- "--enable-dns-blocklist"
- "--prune-blockchain"
labels:
- com.centurylinklabs.watchtower.enable=true

60
compose_files/nextcloud.yml Executable file
View File

@@ -0,0 +1,60 @@
version: '3'
services:
db:
#logging:
#driver: loki
#options:
#loki-url: "http://localhost:3100/loki/api/v1/push"
image: mariadb:10.5
container_name: mariadb-nextcloud
volumes:
- /home/icke/mariadb-nextcloud/db:/var/lib/mysql
- /etc/localtime:/etc/localtime:ro
environment:
- MYSQL_ROOT_PASSWORD=eccmts42*
- MYSQL_PASSWORD=eccmts42*
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
restart: unless-stopped
app:
#logging:
#driver: loki
#options:
#loki-url: "http://localhost:3100/loki/api/v1/push"
image: nextcloud_full:1
container_name: nextcloud
ports:
- 8089:80
volumes:
- /home/icke/nextcloud:/var/www/html
- /home/icke/nextcloud/config:/var/www/html/config
- /home/icke/nextcloud/custom_apps:/var/www/html/custom_apps
- /mnt/nextcloud:/var/www/html/data
# - /var/lib/vz/images/101:/var/www/html/data
- /home/icke/nextcloud/themes:/var/www/html/themes
- /etc/localtime:/etc/localtime:ro
- /home/icke/nextcloud/php.ini:/usr/local/etc/php/conf.d/upload_size.ini
environment:
- VIRTUAL_HOST=nextcloud.egonetix.de
- UPLOAD_MAX_SIZE=20G
#- LOG_LEVEL=DEBUG
restart: unless-stopped
#collabora:
#logging:
# driver: loki
# options:
# loki-url: "http://localhost:3100/loki/api/v1/push"
#image: collabora/code
#container_name: collabora
#cap_add:
# - MKNOD
#ports:
# - 9980:9980
#environment:
# - domain=nextcloud.egonetix.de
# - aliasgroup1=https://nextcloud.egonetix.de
#restart: unless-stopped
volumes:
nextcloud:
db:
#collabora:

17
compose_files/nginx.yml Executable file
View File

@@ -0,0 +1,17 @@
version: "3"
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
# Public HTTP Port:
- '80:80'
# Public HTTPS Port:
- '443:443'
# Admin Web Port:
- '82:81'
environment:
DB_SQLITE_FILE: "/data/database.sqlite"
volumes:
- /home/icke/nginx/data:/data
- /home/icke/nginx/letsencrypt:/etc/letsencrypt

31
compose_files/ollama.yaml Executable file
View File

@@ -0,0 +1,31 @@
#docker-compose.yml
version: "3.3"
services:
openWebUI:
image: ghcr.io/open-webui/open-webui:main
container_name: openwebui
restart: unless-stopped
ports:
- "3000:8080"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- /home/icke/openwebui:/app/backend/data
ollama:
#image: ollama/ollama:0.1.34
image: ollama/ollama
container_name: ollama
restart: unless-stopped
ports:
- "11434:11434"
volumes:
- /home/icke/ollama:/root/.ollama
environment:
- OLLAMA_KEEP_ALIVE=-1
#volumes:
#ollama-local:
#external: true
#open-webui-local:
#external: true

17
compose_files/onlyoffice.yml Executable file
View File

@@ -0,0 +1,17 @@
#docker-compose.yml
version: "3.3"
services:
OnlyOffice:
image: onlyoffice/documentserver
container_name: onlyoffice
restart: unless-stopped
ports:
- "8090:80"
volumes:
- /home/icke/onlyoffice/:/var/www/onlyoffice/Data
environment:
- JWT_SECRET=oOiK12*
- JWT_ENABLED='true'
- ONLYOFFICE_CORE_MACHINEKEY='oOiK12*'

View File

@@ -0,0 +1 @@
COMPOSE_PROJECT_NAME=paperless

View File

@@ -0,0 +1,5 @@
# Dockerfile
FROM ghcr.io/paperless-ngx/paperless-ngx:latest
# Add your additional packages installation here
RUN /usr/local/bin/pip3 install requests python-telegram-bot

View File

@@ -0,0 +1,59 @@
# The UID and GID of the user used to run paperless in the container. Set this
# to your UID and GID on the host so that you have write access to the
# consumption directory.
#USERMAP_UID=33
USERMAP_GID=33
# Additional languages to install for text recognition, separated by a
# whitespace. Note that this is
# different from PAPERLESS_OCR_LANGUAGE (default=eng), which defines the
# language used for OCR.
# The container installs English, German, Italian, Spanish and French by
# default.
# See https://packages.debian.org/search?keywords=tesseract-ocr-&searchon=names&suite=buster
# for available languages.
PAPERLESS_OCR_LANGUAGES=deu
###############################################################################
# Paperless-specific settings #
###############################################################################
# All settings defined in the paperless.conf.example can be used here. The
# Docker setup does not use the configuration file.
# A few commonly adjusted settings are provided below.
# This is required if you will be exposing Paperless-ngx on a public domain
# (if doing so please consider security measures such as reverse proxy)
#PAPERLESS_URL=https://paperless.example.com
# Adjust this key if you plan to make paperless available publicly. It should
# be a very long sequence of random characters. You don't need to remember it.
#PAPERLESS_SECRET_KEY=change-me
# Use this variable to set a timezone for the Paperless Docker containers. If not specified, defaults to UTC.
PAPERLESS_TIME_ZONE=Europe/Berlin
# The default language to use for OCR. Set this to the language most of your
# documents are written in.
PAPERLESS_OCR_LANGUAGE=deu
PAPERLESS_OCR_CLEAN=clean-final
PAPERLESS_OCR_MODE=skip
# Set if accessing paperless via a domain subpath e.g. https://domain.com/PATHPREFIX and using a reverse-proxy like traefik or nginx
#PAPERLESS_FORCE_SCRIPT_NAME=/PATHPREFIX
#PAPERLESS_STATIC_URL=/PATHPREFIX/static/ # trailing slash required
PAPERLESS_CONSUMER_POLLING=1
PAPERLESS_FILENAME_FORMAT={created_year}/{correspondent}/{created_year}_{created_month}_{title}
PAPERLESS_FILENAME_FORMAT_REMOVE_NONE=True
PAPERLESS_FILENAME_DATE_ORDER=YMD
PAPERLESS_CONSUMER_DELETE_DUPLICATES=true
PAPERLESS_OCR_USER_ARGS={"invalidate_digital_signatures": true}
PAPERLESS_OCR_SKIP_ARCHIVE_FILE: always
PAPERLESS_POST_CONSUME_SCRIPT=/usr/src/paperless/media/post_scripts.sh
#PAPERLESS_PRE_CONSUME_SCRIPT=/usr/src/paperless/media/pre_scripts.sh
PNGX_POSTPROCESSOR_AUTH_TOKEN=38aca14473b31998df99eaa198c68aa1093bf87d
#PAPERLESS_POST_CONSUME_SCRIPT=/usr/src/paperless/media/telegram_de.py
#PAPERLESS_POST_CONSUME_SCRIPT=/usr/src/paperless-ngx-postprocessor/post_consume_script.sh

View File

@@ -0,0 +1,122 @@
# Docker Compose file for running paperless from the Docker Hub.
# This file contains everything paperless needs to run.
# Paperless supports amd64, arm and arm64 hardware.
#
# All compose files of paperless configure paperless in the following way:
#
# - Paperless is (re)started on system boot, if it was running before shutdown.
# - Docker volumes for storing data are managed by Docker.
# - Folders for importing and exporting files are created in the same directory
# as this file and mounted to the correct folders inside the container.
# - Paperless listens on port 8000.
#
# In addition to that, this Docker Compose file adds the following optional
# configurations:
#
# - Instead of SQLite (default), MariaDB is used as the database server.
# - Apache Tika and Gotenberg servers are started with paperless and paperless
# is configured to use these services. These provide support for consuming
# Office documents (Word, Excel, Power Point and their LibreOffice counter-
# parts.
#
# To install and update paperless with this file, do the following:
#
# - Copy this file as 'docker-compose.yml' and the files 'docker-compose.env'
# and '.env' into a folder.
# - Run 'docker compose pull'.
# - Run 'docker compose run --rm webserver createsuperuser' to create a user.
# - Run 'docker compose up -d'.
#
# For more extensive installation and update instructions, refer to the
# documentation.
version: "3.4"
services:
broker:
container_name: paperless_broker
image: docker.io/library/redis:7
restart: unless-stopped
volumes:
- /home/icke/paperless-ngx/redisdata:/data
db:
container_name: paperless_db
image: docker.io/library/mariadb:10
restart: unless-stopped
volumes:
- /home/icke/paperless-ngx/dbdata:/var/lib/mysql
environment:
MARIADB_HOST: paperless
MARIADB_DATABASE: paperless
MARIADB_USER: paperless
MARIADB_PASSWORD: paperless
MARIADB_ROOT_PASSWORD: paperless
webserver:
#build:
# context: .
# dockerfile: Dockerfile
container_name: paperless_webserver
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- db
- broker
- gotenberg
- tika
ports:
- "8091:8000"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000"]
interval: 30s
timeout: 10s
retries: 5
volumes:
- /home/icke/paperless-ngx/data:/usr/src/paperless/data
- /home/icke/paperless-ngx/media:/usr/src/paperless/media
- /mnt/paperless:/usr/src/paperless/export
- /mnt/scaninput:/usr/src/paperless/consume
- /home/icke/paperless-ngx/media/remove_blank.sh:/usr/src/paperless/media/remove_blank.sh
- /home/icke/paperless-ngx/media/cleaning_blank.sh:/usr/src/paperless/media/cleaning_blank.sh
- /home/icke/paperless-ngx/media/telegram_de.py:/usr/src/paperless/media/telegram_de.py
- /home/icke/paperless-ngx/paperless-ngx-postprocessor:/usr/src/paperless-ngx-postprocessor
- /home/icke/paperless-ngx/media/post_scripts.sh:/usr/src/paperless/media/post_scripts.sh
- /home/icke/paperless-ngx/media/pre_scripts.sh:/usr/src/paperless/media/pre_scripts.sh
env_file: docker-compose.env
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBENGINE: mariadb
PAPERLESS_DBHOST: db
PAPERLESS_DBUSER: paperless # only needed if non-default username
PAPERLESS_DBPASS: paperless # only needed if non-default password
PAPERLESS_DBPORT: 3306
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
#PAPERLESS_POST_CONSUME_SCRIPT: /usr/src/paperless/media/telegram_de.py
#PAPERLESS_PRE_CONSUME_SCRIPT: /usr/src/paperless/media/remove_blank.sh
#PAPERLESS_PRE_CONSUME_SCRIPT: /usr/src/paperless/media/cleaning_blank.sh
gotenberg:
container_name: paperless_gotenberg
image: docker.io/gotenberg/gotenberg:7.10
restart: unless-stopped
# The gotenberg chromium route is used to convert .eml files. We do not
# want to allow external content like tracking pixels or even javascript.
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
tika:
container_name: paperless_tika
image: ghcr.io/paperless-ngx/tika:latest
restart: unless-stopped
volumes:
data:
media:
dbdata:
redisdata:

View File

@@ -0,0 +1,2 @@
requests
python-telegram-bot

View File

@@ -0,0 +1,4 @@
#!/bin/bash
/usr/src/paperless/media/telegram_de.py
/usr/src/paperless-ngx-postprocessor/post_consume_script.sh

View File

@@ -0,0 +1,4 @@
#!/bin/bash
/usr/src/paperless/media/remove_blank.sh
/usr/src/paperless/media/cleaning_blank.sh

13
compose_files/piper.yaml Executable file
View File

@@ -0,0 +1,13 @@
#docker-compose.yml
version: "3.3"
services:
piper:
image: rhasspy/wyoming-piper
container_name: piper
command: --voice en_GB-alan-medium
volumes:
- /home/icke/piper:/data
ports:
- 10201:10200
restart: unless-stopped

53
compose_files/plex.yml Executable file
View File

@@ -0,0 +1,53 @@
#docker-compose.yml
version: "3.3"
services:
plex:
#logging:
#driver: loki
#options:
#loki-url: "http://localhost:3100/loki/api/v1/push"
#image: lscr.io/linuxserver/plex:latest
image: plexinc/pms-docker:latest
container_name: plex
#network_mode: host
ports:
- "32400:32400/tcp"
- "3005:3005/tcp"
- "8324:8324/tcp"
- "32469:32469/tcp"
- "1900:1900/udp"
- "32410:32410/udp"
- "32412:32412/udp"
- "32413:32413/udp"
- "32414:32414/udp"
environment:
- PLEX_UID=0
- PLEX_GID=0
- VERSION=docker
# - PLEX_CLAIM=claim-39LcyHxybF_qkcxo_wGN
- ADVERTISE_IP="http://10.0.0.48:32400/"
#labels:
#- com.centurylinklabs.watchtower.enable=true
volumes:
- /home/icke/plex:/config
- /mnt/Filme:/data/movies:rw
#- /mnt/buffer_filme/filme:/data/buffer_filme:rw
# - /mnt/Storage_box:/data/storage_box:rw
#- /mnt/Musik:/data/music:rw
- /mnt/Serien:/data/serien:rw
- /mnt/transcode:/transcode
- /mnt/nextcloud/robert.wiegand/files/gopro/:/gopro:rw
# - /mnt/nextcloud/robert.wiegand/files/Sofortupload/:/bilder:rw
restart: unless-stopped
# networks:
# plex:
# ipv4_address: 172.26.0.2
##networks:
# plex:
# driver: bridge
# ipam:
# config:
# - subnet: 172.26.0.0/30

15
compose_files/portainer.yml Executable file
View File

@@ -0,0 +1,15 @@
#docker-compose.yml
version: "3.3"
services:
portainer:
image: portainer/portainer
container_name: portainer
volumes:
- /home/icke/portainer:/data
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "9001:9000"
- "8000:8000"
restart: unless-stopped

22
compose_files/pureftp.yml Executable file
View File

@@ -0,0 +1,22 @@
version: '3'
# Usage example: https://github.com/stilliard/docker-pure-ftpd/wiki/Docker-stack-with-Wordpress-&-FTP
services:
ftpd_server:
image: stilliard/pure-ftpd
container_name: pureftpd
ports:
- "21:21"
- "30000-30009:30000-30009"
volumes:
- "/home/icke/pureftp/data:/home/scan/"
- "/home/icke/pureftp/passwd:/etc/pure-ftpd/passwd"
environment:
PUBLICHOST: "localhost"
FTP_USER_NAME: scan
FTP_USER_PASS: scan
FTP_USER_HOME: /home/scan
# also for ssl/tls:
# ADDED_FLAGS: "--tls=2"
restart: unless-stopped

33
compose_files/sabnzbd.yml Executable file
View File

@@ -0,0 +1,33 @@
version: "3.3"
services:
sabnzbd:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: linuxserver/sabnzbd
#image: sabnzbd/sabnzbd
container_name: sabnzbd
environment:
- MINIO_ENDPOINT=http://srvfs01:9000
- MINIO_ACCESS_KEY=SHhoX4XkqaOelvBPK6wm
- MINIO_SECRET_KEY=XIDinieul7VgAE1RIoMgCxfKatMQklOBUkGsrdO1
- MINIO_BUCKET_NAME=sabnzbd
- SABNZBD_UID=0
- SABNZBD_GID=0
# - PUID=0
# - PGID=0
- TZ=Europe/Berlin
labels:
- com.centurylinklabs.watchtower.enable=true
volumes:
#- /home/icke/sabnzbd:/config
- /home/icke/sabnzbd:/datadir
- /mnt:/media
# - /mnt:/Downloads
- /etc/localtime:/etc/localtime:ro
ports:
- "8085:8080"
- "9090:9090"
restart: unless-stopped

17
compose_files/sftp.yml Normal file
View File

@@ -0,0 +1,17 @@
version: "3.3"
services:
sftp:
image: corilus/sftp:latest
container_name: sftp
restart: unless-stopped
ports:
- "2222:22" # Map port 2222 on the host to port 22 in the container
environment:
- SFTP_USER=hans
- SFTP_PASS=wurst
- SFTP_UID=1001
- SFTP_GID=1001
volumes:
- /home/hans:/home/hans # Mount the host directory for the SFTP user

16
compose_files/sonarr.yml Executable file
View File

@@ -0,0 +1,16 @@
version: "2.1"
services:
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=0
- PGID=0
- TZ=Etc/UTC
volumes:
- /home/icke/sonarr/data:/config
- /mnt/Serien:/tv #optional
- /mnt/Filme:/downloads #optional
ports:
- 8989:8989
restart: unless-stopped

Submodule compose_files/srvdocker02 added at 2a146b67fb

23
compose_files/supervisord.conf Executable file
View File

@@ -0,0 +1,23 @@
[supervisord]
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB ; maximum size of logfile before rotation
logfile_backups=10 ; number of backed up logfiles
loglevel=error
[program:apache2]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=apache2-foreground
[program:cron]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/cron.sh

12
compose_files/synapse.yml Executable file
View File

@@ -0,0 +1,12 @@
version: "3.3"
services:
synapse:
image: "matrixdotorg/synapse:latest"
container_name: "synapse"
volumes:
- "/home/icke/synapse/data:/data"
ports:
- "8008:8008"
- "8448:8448"
- "3478:3478"

14
compose_files/tautulli.yml Executable file
View File

@@ -0,0 +1,14 @@
version: "3.3"
tautulli:
image: tautulli/tautulli
container_name: tautulli
restart: unless-stopped
ports:
- "8181:8181"
volumes:
- /home/icke/tautulli/:/config
- /home/icke/plex_library/Library/Application\ Support/Plex\ Media\ Server/Logs/:/plex_logs:ro
environment:
- PUID=0
- PGID=0
- TZ=Europe/Berlin

46
compose_files/traccar.yml Executable file
View File

@@ -0,0 +1,46 @@
version: "3"
networks:
app-tier-traccar:
driver: bridge
services:
mysql-traccar:
image: mysql:8.0.20
container_name: mysql-traccar
command: --default-authentication-plugin=mysql_native_password
restart: unless-stopped
volumes:
- /home/icke/mysql-traccar/traccar/mysql-data:/var/lib/mysql
- /home/icke/mysql-traccar/mysql:/etc/mysql/conf.d
ports:
- "3306:3306"
environment:
- MYSQL_ROOT_PASSWORD=eccmts42**
networks:
- app-tier-traccar
traccar:
image: traccar/traccar:latest
container_name: traccar
depends_on:
- mysql-traccar
restart: unless-stopped
volumes:
- /home/icke/traccar/traccar.xml:/opt/traccar/conf/traccar.xml:ro
- /home/icke/traccar/logs:/opt/traccar/logs:rw
ports:
- "5000-5150:5000-5150"
- "8092:8082"
environment:
- MYSQL_DATABASE=traccar
- MYSQL_USER=traccar
- MYSQL_PASSWORD=eccmts42*
networks:
- app-tier-traccar
volumes:
mysql-data:
mysql:
logs:

31
compose_files/unifi.yml Executable file
View File

@@ -0,0 +1,31 @@
#docker-compose.yml
version: "3.3"
services:
unifi:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
container_name: unifi
#image: lscr.io/linuxserver/unifi-controller:latest
image: jacobalberty/unifi:v8
volumes:
- /home/icke/unifi:/config
- /home/icke/unifi:/data
ports:
# - "3478:3478/udp"
- "10001:10001/udp"
- "6789:6789/tcp"
- "8080:8080/tcp"
- "8880:8880/tcp"
- "8443:8443/tcp"
- "8843:8843/tcp"
- "8081:8081/tcp"
environment:
- TZ=Europe/Berlin
#labels:
#- com.centurylinklabs.watchtower.enable=true
restart: unless-stopped

23
compose_files/wallabag.yml Executable file
View File

@@ -0,0 +1,23 @@
version: '3.3'
services:
wallabag:
#logging:
#driver: loki
#options:
#loki-url: "http://localhost:3100/loki/api/v1/push"
image: wallabag/wallabag
container_name: wallabag
restart: unless-stopped
environment:
- SYMFONY__ENV__MAILER_HOST=10.0.0.21
- SYMFONY__ENV__MAILER_USER=monitor
- SYMFONY__ENV__MAILER_PASSWORD=eccmts42*
- SYMFONY__ENV__FROM_EMAIL=wallabag@egonetix.de
- SYMFONY__ENV__DOMAIN_NAME=https://wallabag.egonetix.de
labels:
- com.centurylinklabs.watchtower.enable=true
ports:
- "8087:80"
volumes:
- /home/icke/wallabag/images:/var/www/wallabag/web/assets/images
- /home/icke/wallabag/data:/var/www/wallabag/data

57
compose_files/wallabagv2.yml Executable file
View File

@@ -0,0 +1,57 @@
version: '3'
services:
wallabag:
image: wallabag/wallabag:2.6.4
container_name: wallabagv2
restart: unless-stopped
environment:
- MYSQL_ROOT_PASSWORD=wallaroot
# - SYMFONY__ENV__DATABASE_DRIVER=pdo_mysql
- SYMFONY__ENV__DATABASE_DRIVER=pdo_sqlite
#- SYMFONY__ENV__DATABASE_HOST=wallabag-mariadb
#- SYMFONY__ENV__DATABASE_PORT=3306
- SYMFONY__ENV__DATABASE_NAME=wallabag
- SYMFONY__ENV__DATABASE_USER=wallabag
- SYMFONY__ENV__DATABASE_PASSWORD=wallapass
#- SYMFONY__ENV__DATABASE_CHARSET=utf8mb4
#- SYMFONY__ENV__DATABASE_TABLE_PREFIX="wallabag_"
- SYMFONY__ENV__MAILER_DSN=smtp://10.0.0.27
- SYMFONY__ENV__FROM_EMAIL=wallabag@egonetix.de
- SYMFONY__ENV__DOMAIN_NAME=https://wallabag.egonetix.de
- SYMFONY__ENV__SERVER_NAME="Egonetix Wallabag"
ports:
- "8088:80"
volumes:
- /home/icke/wallabagv2/images:/var/www/wallabag/web/assets/images
- /home/icke/wallabagv2/data:/var/www/wallabag
# - /home/icke/wallabagv2/var:/var/www/wallabag/var
healthcheck:
test: ["CMD", "wget" ,"--no-verbose", "--tries=1", "--spider", "http://localhost/api/info"]
interval: 1m
timeout: 3s
#depends_on:
#- wallabag-mariadb
#- redis
#wallabag-mariadb:
#image: lscr.io/linuxserver/mariadb:latest
#container_name: wallabag-db
#restart: unless-stopped
#environment:
#- MYSQL_ROOT_PASSWORD=wallaroot
#- MYSQL_ROOT_USER=root
#- MYSQL_PASSWORD=wallapass
#- MYSQL_DATABASE=wallabag
#- MYSQL_USER=wallabag
#volumes:
#- /home/icke/wallabagv2/data:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
interval: 20s
timeout: 3s
# redis:
#image: redis:alpine
#restart: unless-stopped
#healthcheck:
#test: ["CMD", "redis-cli", "ping"]
#interval: 20s
#timeout: 3s

25
compose_files/watchtower.yml Executable file
View File

@@ -0,0 +1,25 @@
#docker-compose.yml
version: "3"
services:
watchtower:
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# command: --interval 30
restart: unless-stopped
environment:
- TZ=Europe/Berlin
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_DEBUG=true
# - WATCHTOWER_RUN_ONCE=true
- WATCHTOWER_INCLUDE_RESTARTING=true
labels:
- com.centurylinklabs.watchtower.enable=true

13
compose_files/whisper.yaml Executable file
View File

@@ -0,0 +1,13 @@
#docker-compose.yml
version: "3.3"
services:
faster-whisper:
image: rhasspy/wyoming-whisper
container_name: whisper
command: --model tiny-int8 --language en --beam-size 1
volumes:
- /home/icke/whisper:/config
ports:
- 10300:10300
restart: unless-stopped

124
compose_files/zabbix.yml Executable file
View File

@@ -0,0 +1,124 @@
version: '3.3'
services:
zabbix-server: # The main Zabbix Server Software Service
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: zabbix/zabbix-server-mysql:6.4-ubuntu-latest
container_name: zabbix-server
restart: unless-stopped
environment: # The Postgres database value variable
DB_SERVER_HOST: mysql-zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: eccmts42*
MYSQL_DATABASE: zabbix
ZBX_HISTORYSTORAGETYPES: log,text #Zabbix configuration variables
ZBX_DEBUGLEVEL: 1
ZBX_HOUSEKEEPINGFREQUENCY: 1
ZBX_MAXHOUSEKEEPERDELETE: 5000
ports:
- "10051:10051"
depends_on:
- mysql-zabbix
links:
- mysql-zabbix
# - zabbix-postfix
volumes: # Volumes for scripts and related files you can add
- /home/icke/zabbix-server/alertscripts:/usr/lib/zabbix/alertscripts
- /home/icke/zabbix-server/externalscripts:/usr/lib/zabbix/externalscripts
- /home/icke/zabbix-server/userparameter_diskstats.conf:/etc/zabbix/zabbix_agentd.d/userparameter_diskstats.conf
- /home/icke/zabbix-server/lld-disks.py:/usr/local/bin/lld-disks.py
networks:
zabbix:
ipv4_address: 172.19.0.2
# zabbix-agent: # Zabbix agent service that tracks usage and send to zabbix server
# image: zabbix/zabbix-agent:latest
# container_name: zabbix-agent
# privileged: true #access mode for allowing resource access
# #network_mode: "host"
# links:
# - zabbix-server
# restart: unless-stopped
# environment:
# - ZBX_SERVER_HOST=172.19.0.2 #the IP/Dns of Zabbix server
# networks:
# zabbix:
# ipv4_address: 172.19.0.3
# zabbix-postfix:
# ports:
# - "25:25"
# image: catatnight/postfix
# container_name: zabbix-postfix
# environment:
# smtp_user: monitor:eccmts42*
# maildomain: mail.egonetix.de
zabbix-web: # The main Zabbix web UI or interface
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: zabbix/zabbix-web-nginx-mysql:6.4-ubuntu-latest
container_name: zabbix-web
restart: unless-stopped
environment: # Postgre database variables
DB_SERVER_HOST: mysql-zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: eccmts42*
MYSQL_DATABASE: zabbix
ZBX_SERVER_HOST: zabbix-server # Zabbix related and Php variables
ZBX_POSTMAXSIZE: 64M
PHP_TZ: "Europe/Berlin"
ZBX_MAXEXECUTIONTIME: 500
links:
- mysql-zabbix
- zabbix-server
depends_on:
- mysql-zabbix
- zabbix-server
ports: # Port where Zabbix UI is available
- "8084:8080"
networks:
zabbix:
ipv4_address: 172.19.0.4
mysql-zabbix:
#image: mariadb:10.6
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
image: mysql:8.0
container_name: mysql-zabbix
# ports:
# - "6603:3306"
environment:
# - default-authentication-plugin=mysql_native_password
- MYSQL_ROOT_PASSWORD=eccmts42*
- MYSQL_DATABASE=zabbix
- MYSQL_USER=zabbix
- MYSQL_PASSWORD=eccmts42*
volumes:
- /home/icke/mysql-zabbix/var:/var/lib/mysql
- /home/icke/mysql-zabbix/custom.cnf:/etc/mysql/conf.d/custom.cnf
restart: unless-stopped
networks:
zabbix:
ipv4_address: 172.19.0.5
networks:
zabbix:
driver: bridge
ipam:
config:
- subnet: 172.19.0.0/29
volumes:
zabbix-server:
zabbix-web:
mysql-zabbix:
# zabbix-postfix:
# zabbix-agent:

View File

@@ -0,0 +1,4 @@
FROM zabbix/zabbix-server-mysql:ubuntu-7.0-latest
USER root
RUN apt-get -y update && apt-get -y --no-install-recommends install tdsodbc freetds-dev freetds-bin freetds-dev whois python-is-python3
#RUN apt-get -y update && apt-get -y install tdsodbc freetds-dev freetds-bin freetds-dev whois jd curl

View File

@@ -0,0 +1,199 @@
version: '3.5'
services:
# ------------------------------------------------
# ------------------- Zabbix Server --------------
# ------------------------------------------------
zabbix-server:
#image: zabbix/zabbix-server-mysql:alpine-7.0-latest
logging:
driver: loki
options:
loki-url: "http://localhost:3100/loki/api/v1/push"
build:
context: ./build
dockerfile: Dockerfile
container_name: zabbix-server
restart: unless-stopped
ports:
- 10051:10051/tcp
# dns:
# - 10.10.10.111
# - 10.10.10.222
#dns_search:
#- vinos.local
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /home/icke/zabbix-server/state/usr/lib/zabbix/alertscripts:/usr/lib/zabbix/alertscripts:ro
- /home/icke/zabbix-server/state/usr/lib/zabbix/externalscripts:/usr/lib/zabbix/externalscripts:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/export:/var/lib/zabbix/export:rw
- /home/icke/zabbix-server/state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/mibs:/var/lib/zabbix/mibs:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
- /home/icke/zabbix-server/env/odbc.ini:/etc/odbc.ini:ro
- /home/icke/zabbix-server/env/odbcinst.ini:/etc/odbcinst.ini:ro
- /home/icke/zabbix-server/env/freetds.conf:/usr/local/etc/freetds.conf:ro
links:
- mysql-zabbix
env_file:
- /home/icke/zabbix-server/env/database.env
- /home/icke/zabbix-server/env/server.env
- /home/icke/zabbix-server/env/odbc.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
depends_on:
- mysql-zabbix
stop_grace_period: 30s
sysctls:
- net.ipv4.ip_local_port_range=1024 65000
- net.ipv4.conf.all.accept_redirects=0
- net.ipv4.conf.all.secure_redirects=0
- net.ipv4.conf.all.send_redirects=0
networks:
zabbix:
ipv4_address: 172.40.0.2
# ------------------------------------------------
# ------------------- Zabbix Web -----------------
# ------------------------------------------------
zabbix-web:
image: zabbix/zabbix-web-nginx-mysql:alpine-7.0-latest
container_name: zabbix-web
restart: always
ports:
- 8092:8080/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /home/icke/zabbix-server/state/etc/ssl/nginx:/etc/ssl/nginx:ro
- /home/icke/zabbix-server/state/usr/share/zabbix/modules/:/usr/share/zabbix/modules/:ro
env_file:
- /home/icke/zabbix-server/env/database.env
- /home/icke/zabbix-server/env/web.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
depends_on:
- mysql-zabbix
- zabbix-server
sysctls:
- net.core.somaxconn=65535
networks:
zabbix:
ipv4_address: 172.40.0.3
# ------------------------------------------------
# ------------------- Zabbix Agent ---------------
# ------------------------------------------------
zabbix-agent2:
image: zabbix/zabbix-agent2:alpine-7.0-latest
container_name: zabbix-agent2
restart: unless-stopped
ports:
- 10050:10050/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /home/icke/zabbix-server/state/etc/zabbix/zabbix_agentd.d/:/etc/zabbix/zabbix_agentd.d/:ro
- /home/icke/zabbix-server/state/etc/zabbix/zabbix_agent2.d/UserParameters.conf:/etc/zabbix/zabbix_agent2.d/UserParameters.conf:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- /home/icke/zabbix-server/state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
env_file:
- /home/icke/zabbix-server/env/agent.env
networks:
zabbix:
ipv4_address: 172.40.0.4
# ------------------------------------------------
# ------------------- Zabbix SNMP Traps ----------
# ------------------------------------------------
zabbix-snmptraps:
image: zabbix/zabbix-snmptraps:alpine-7.0-latest
container_name: zabbix-snmptraps
restart: always
ports:
- 162:1162/udp
volumes:
- /home/icke/zabbix-server/state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
stop_grace_period: 5s
networks:
zabbix:
ipv4_address: 172.40.0.5
# ------------------------------------------------
# ------------------- Zabbix Web Service ---------
# ------------------------------------------------
zabbix-web-service:
image: zabbix/zabbix-web-service:alpine-7.0-latest
container_name: zabbix-web-service
restart: always
volumes:
- /home/icke/zabbix-server/state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
cap_add:
- SYS_ADMIN
env_file:
- /home/icke/zabbix-server/env/web_service.env
stop_grace_period: 5s
networks:
zabbix:
ipv4_address: 172.40.0.6
# ------------------------------------------------
# ------------------- Database -------------------
# ------------------------------------------------
mysql-zabbix:
image: mariadb:10.6
command: ['mysqld', '--collation_server=utf8_bin', '--character_set_server=utf8']
container_name: mysql-zabbix
restart: always
volumes:
- /home/icke/zabbix-server/state/var/lib/mysql:/var/lib/mysql:rw
- /home/icke/zabbix-server/zabbixdb-backup-restore-master:/home/zabbixdb-backup-restore-master:rw
#ports:
# - 3306:3306
env_file:
- /home/icke/zabbix-server/env/database.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
stop_grace_period: 1m
networks:
zabbix:
ipv4_address: 172.40.0.7
# ------------------------------------------------
# ------------------- Watchtower -----------------
# ------------------------------------------------
# watchtower:
# image: containrrr/watchtower
# container_name: watchtower
# restart: unless-stopped
# volumes:
# - /etc/localtime:/etc/localtime:ro
# - /etc/timezone:/etc/timezone:ro
# - /var/run/docker.sock:/var/run/docker.sock
# environment:
# WATCHTOWER_POLL_INTERVAL: "259200" # every 3rd day
# WATCHTOWER_CLEANUP: "true"
# WATCHTOWER_DEBUG: "true"
# ------------------------------------------------
# ------------------- Secrets --------------------
# ------------------------------------------------
# Note: chown -R root:root secrets && chmod 0700 secrets
secrets:
MYSQL_USER:
file: /home/icke/zabbix-server/state/secrets/MYSQL_USER
MYSQL_PASSWORD:
file: /home/icke/zabbix-server/state/secrets/MYSQL_PASSWORD
MYSQL_ROOT_PASSWORD:
file: /home/icke/zabbix-server/state/secrets/MYSQL_ROOT_PASSWORD
networks:
zabbix:
# external: true
driver: bridge
ipam:
config:
- subnet: 172.40.0.0/28

View File

@@ -0,0 +1,185 @@
version: '3.5'
services:
# ------------------------------------------------
# ------------------- Zabbix Server --------------
# ------------------------------------------------
zabbix-server:
image: zabbix/zabbix-server-mysql:alpine-6.2-latest
container_name: zabbix-server
restart: always
ports:
- 10051:10051/tcp
# dns:
# - 10.10.10.111
# - 10.10.10.222
#dns_search:
#- vinos.local
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/usr/lib/zabbix/alertscripts:/usr/lib/zabbix/alertscripts:ro
- ./state/usr/lib/zabbix/externalscripts:/usr/lib/zabbix/externalscripts:ro
- ./state/var/lib/zabbix/export:/var/lib/zabbix/export:rw
- ./state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- ./state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
- ./state/var/lib/zabbix/mibs:/var/lib/zabbix/mibs:ro
- ./state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
- ./env/odbc.ini:/etc/odbc.ini:ro
- ./env/odbcinst.ini:/etc/odbcinst.ini:ro
env_file:
- ./env/database.env
- ./env/server.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
depends_on:
- database
stop_grace_period: 30s
sysctls:
- net.ipv4.ip_local_port_range=1024 65000
- net.ipv4.conf.all.accept_redirects=0
- net.ipv4.conf.all.secure_redirects=0
- net.ipv4.conf.all.send_redirects=0
# networks:
# zabbix:
# ipv4_address: 10.10.254.118
# ------------------------------------------------
# ------------------- Zabbix Web -----------------
# ------------------------------------------------
zabbix-web:
image: zabbix/zabbix-web-nginx-mysql:alpine-6.2-latest
container_name: zabbix-web
restart: always
ports:
- 80:8080/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/etc/ssl/nginx:/etc/ssl/nginx:ro
- ./state/usr/share/zabbix/modules/:/usr/share/zabbix/modules/:ro
env_file:
- ./env/database.env
- ./env/web.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
depends_on:
- database
- zabbix-server
sysctls:
- net.core.somaxconn=65535
# networks:
# zabbix:
# ipv4_address: 10.10.254.119
# ------------------------------------------------
# ------------------- Zabbix Agent ---------------
# ------------------------------------------------
zabbix-agent2:
image: zabbix/zabbix-agent2:alpine-6.2-latest
container_name: zabbix-agent2
restart: always
#ports:
# - 10050:10050/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/etc/zabbix/zabbix_agentd.d:/etc/zabbix/zabbix_agentd.d:ro
- ./state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- ./state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
env_file:
- ./env/agent.env
# networks:
# zabbix:
# ipv4_address: 10.10.254.120
# ------------------------------------------------
# ------------------- Zabbix SNMP Traps ----------
# ------------------------------------------------
zabbix-snmptraps:
image: zabbix/zabbix-snmptraps:alpine-6.2-latest
container_name: zabbix-snmptraps
restart: always
ports:
- 162:1162/udp
volumes:
- ./state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
stop_grace_period: 5s
# networks:
# zabbix:
# ipv4_address: 10.10.254.121
# ------------------------------------------------
# ------------------- Zabbix Web Service ---------
# ------------------------------------------------
zabbix-web-service:
image: zabbix/zabbix-web-service:alpine-6.2-latest
container_name: zabbix-web-service
restart: always
volumes:
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
cap_add:
- SYS_ADMIN
env_file:
- ./env/web_service.env
stop_grace_period: 5s
# networks:
# zabbix:
# ipv4_address: 10.10.254.122
# ------------------------------------------------
# ------------------- Database -------------------
# ------------------------------------------------
database:
image: mariadb:10.6
command: ['mysqld', '--collation_server=utf8_bin', '--character_set_server=utf8']
container_name: database
restart: always
volumes:
- ./state/var/lib/mysql:/var/lib/mysql:rw
- ./zabbixdb-backup-restore-master:/home/zabbixdb-backup-restore-master:rw
env_file:
- ./env/database.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
stop_grace_period: 1m
# networks:
# zabbix:
# ipv4_address: 10.10.254.123
# ------------------------------------------------
# ------------------- Watchtower -----------------
# ------------------------------------------------
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /var/run/docker.sock:/var/run/docker.sock
environment:
WATCHTOWER_POLL_INTERVAL: "259200" # every 3rd day
WATCHTOWER_CLEANUP: "true"
WATCHTOWER_DEBUG: "true"
# ------------------------------------------------
# ------------------- Secrets --------------------
# ------------------------------------------------
# Note: chown -R root:root secrets && chmod 0700 secrets
secrets:
MYSQL_USER:
file: ./secrets/MYSQL_USER
MYSQL_PASSWORD:
file: ./secrets/MYSQL_PASSWORD
MYSQL_ROOT_PASSWORD:
file: ./secrets/MYSQL_ROOT_PASSWORD
#networks:
# zabbix:
# external: true
# driver: bridge
#ipam:
#config:
# - subnet: 10.10.254.0/24

View File

@@ -0,0 +1,192 @@
version: '3.5'
services:
# ------------------------------------------------
# ------------------- Zabbix Server --------------
# ------------------------------------------------
zabbix-server:
#image: zabbix/zabbix-server-mysql:alpine-6.2-latest
build:
context: ./build
dockerfile: Dockerfile
container_name: zabbix-server
restart: always
ports:
- 10051:10051/tcp
# dns:
# - 10.10.10.111
# - 10.10.10.222
#dns_search:
#- vinos.local
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/usr/lib/zabbix/alertscripts:/usr/lib/zabbix/alertscripts:ro
- ./state/usr/lib/zabbix/externalscripts:/usr/lib/zabbix/externalscripts:ro
- ./state/var/lib/zabbix/export:/var/lib/zabbix/export:rw
- ./state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- ./state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
- ./state/var/lib/zabbix/mibs:/var/lib/zabbix/mibs:ro
- ./state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
- ./env/odbc.ini:/etc/odbc.ini:ro
- ./env/odbcinst.ini:/etc/odbcinst.ini:ro
- ./env/freetds.conf:/usr/local/etc/freetds.conf:ro
env_file:
- ./env/database.env
- ./env/server.env
- ./env/odbc.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
depends_on:
- database
stop_grace_period: 30s
sysctls:
- net.ipv4.ip_local_port_range=1024 65000
- net.ipv4.conf.all.accept_redirects=0
- net.ipv4.conf.all.secure_redirects=0
- net.ipv4.conf.all.send_redirects=0
# networks:
# zabbix:
# ipv4_address: 172.19.0.7
# ------------------------------------------------
# ------------------- Zabbix Web -----------------
# ------------------------------------------------
zabbix-web:
image: zabbix/zabbix-web-nginx-mysql:alpine-6.2-latest
container_name: zabbix-web
restart: always
ports:
- 80:8080/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/etc/ssl/nginx:/etc/ssl/nginx:ro
- ./state/usr/share/zabbix/modules/:/usr/share/zabbix/modules/:ro
env_file:
- ./env/database.env
- ./env/web.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
depends_on:
- database
- zabbix-server
sysctls:
- net.core.somaxconn=65535
# networks:
# zabbix:
# ipv4_address: 172.19.0.6
# ------------------------------------------------
# ------------------- Zabbix Agent ---------------
# ------------------------------------------------
zabbix-agent2:
image: zabbix/zabbix-agent2:alpine-6.2-latest
container_name: zabbix-agent2
restart: always
#ports:
# - 10050:10050/tcp
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./state/etc/zabbix/zabbix_agentd.d:/etc/zabbix/zabbix_agentd.d:ro
- ./state/var/lib/zabbix/modules:/var/lib/zabbix/modules:ro
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
- ./state/var/lib/zabbix/ssh_keys:/var/lib/zabbix/ssh_keys:ro
env_file:
- ./env/agent.env
# networks:
# zabbix:
# ipv4_address: 172.19.0.5
# ------------------------------------------------
# ------------------- Zabbix SNMP Traps ----------
# ------------------------------------------------
zabbix-snmptraps:
image: zabbix/zabbix-snmptraps:alpine-6.2-latest
container_name: zabbix-snmptraps
restart: always
ports:
- 162:1162/udp
volumes:
- ./state/var/lib/zabbix/snmptraps:/var/lib/zabbix/snmptraps:rw
stop_grace_period: 5s
networks:
zabbix:
ipv4_address: 172.19.0.2
# ------------------------------------------------
# ------------------- Zabbix Web Service ---------
# ------------------------------------------------
zabbix-web-service:
image: zabbix/zabbix-web-service:alpine-6.2-latest
container_name: zabbix-web-service
restart: always
volumes:
- ./state/var/lib/zabbix/enc:/var/lib/zabbix/enc:ro
cap_add:
- SYS_ADMIN
env_file:
- ./env/web_service.env
stop_grace_period: 5s
# networks:
# zabbix:
# ipv4_address: 172.19.0.3
# ------------------------------------------------
# ------------------- Database -------------------
# ------------------------------------------------
database:
image: mariadb:10.6
command: ['mysqld', '--collation_server=utf8_bin', '--character_set_server=utf8']
container_name: database
restart: always
volumes:
- ./state/var/lib/mysql:/var/lib/mysql:rw
- ./zabbixdb-backup-restore-master:/home/zabbixdb-backup-restore-master:rw
ports:
- 3306:3306
env_file:
- ./env/database.env
secrets:
- MYSQL_USER
- MYSQL_PASSWORD
- MYSQL_ROOT_PASSWORD
stop_grace_period: 1m
# networks:
# zabbix:
# ipv4_address: 172.19.0.4
# ------------------------------------------------
# ------------------- Watchtower -----------------
# ------------------------------------------------
# watchtower:
# image: containrrr/watchtower
# container_name: watchtower
# restart: unless-stopped
# volumes:
# - /etc/localtime:/etc/localtime:ro
# - /etc/timezone:/etc/timezone:ro
# - /var/run/docker.sock:/var/run/docker.sock
# environment:
# WATCHTOWER_POLL_INTERVAL: "259200" # every 3rd day
# WATCHTOWER_CLEANUP: "true"
# WATCHTOWER_DEBUG: "true"
# ------------------------------------------------
# ------------------- Secrets --------------------
# ------------------------------------------------
# Note: chown -R root:root secrets && chmod 0700 secrets
secrets:
MYSQL_USER:
file: ./secrets/MYSQL_USER
MYSQL_PASSWORD:
file: ./secrets/MYSQL_PASSWORD
MYSQL_ROOT_PASSWORD:
file: ./secrets/MYSQL_ROOT_PASSWORD
#networks:
# zabbix:
# external: true
# driver: bridge
# ipam:
# config:
# - subnet: 172.19.0.0/24

Binary file not shown.

45
compose_files/zammad.yml Normal file
View File

@@ -0,0 +1,45 @@
version: "3.3"
services:
zammad:
image: zammad/zammad:latest
container_name: zammad-app
restart: always
ports:
- "8100:3000"
environment:
- RAILS_ENV=production
volumes:
- /home/icke/zammad/app:/opt/zammad
depends_on:
- zammad-db
- zammad-es
zammad-db:
image: postgres:13
container_name: zammad-db
restart: always
environment:
POSTGRES_USER: zammad
POSTGRES_PASSWORD: zammad
POSTGRES_DB: zammad
volumes:
- /home/icke/zammad/postgres:/var/lib/postgresql/data
zammad-es:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.3
container_name: zammad-es
restart: always
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /home/icke/zammad/elasticsearch:/usr/share/elasticsearch/data