master
xs 12 months ago
commit 892ed70a1f

@ -0,0 +1,18 @@
# http://editorconfig.org
root = true
# 空格替代Tab缩进在各种编辑工具下效果一致
[*]
indent_style = space
indent_size = 4
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
[*.{json,yml,yaml}]
indent_size = 2
[*.md]
insert_final_newline = false
trim_trailing_whitespace = false

@ -0,0 +1,57 @@
name: Bug 反馈
description: 当你中发现了一个 Bug导致应用崩溃或抛出异常或者有一个组件存在问题或者某些地方看起来不对劲。
title: "[Bug]: "
labels: ["bug"]
body:
- type: textarea
id: version
attributes:
label: 版本
description: 你当前正在使用我们软件的哪个版本(pom文件内的版本号)
value: |
注意: 未填写版本号不予处理直接关闭或删除
jdk版本(带上尾号):
框架版本(项目启动时输出的版本号):
其他依赖版本(你觉得有必要的):
validations:
required: true
- type: checkboxes
attributes:
label: 功能不好用不会用是否已经看过项目文档?
options:
- label: https://plus-doc.dromara.org
required: true
- type: checkboxes
attributes:
label: 这个问题是否已经存在?
options:
- label: 我已经搜索过现有的问题 (https://gitee.com/dromara/RuoYi-Cloud-Plus/issues)
required: true
- type: textarea
attributes:
label: 异常模块
description: 此报错都涉及到那些系统模块。
validations:
required: true
- type: textarea
attributes:
label: 希望结果
description: 想知道你觉得怎么样是正常或者合理的。
validations:
required: true
- type: markdown
attributes:
label: 如何复现
description: 请详细告诉我们如何复现你遇到的问题
value: |
如涉及代码 可提供一个最小代码示例 并使用```附上它 或者截图均可 越详细越好<br>
大多数问题都是 代码编写错误问题 逻辑问题 或者用法错误等问题
validations:
required: true
- type: textarea
attributes:
label: 相关代码与报错信息(请勿发混乱格式)
description: 如果可以的话,上传任何关于 bug 的截图。
value: |
[在这里上传图片]

@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: RuoYi-Vue-Plus 文档中心
url: https://plus-doc.dromara.org
about: 提供 RuoYi-Vue-Plus 搭建使用指南、平台基本开发使用方式、介绍、基础知识和常见问题解答

@ -0,0 +1,43 @@
name: 功能建议
description: 对本项目提出一个功能建议
title: "[功能建议]: "
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
感谢提出功能建议我们将仔细考虑请持续关注该issues在加入计划后我们会有贡献者设置为负责人同时状态成为进行中。
- type: textarea
id: related-problem
attributes:
label: 你的功能建议是否和某个问题相关?
description: 清晰并简洁地描述问题是什么,例如,当我...时,我总是感到困扰。
validations:
required: false
- type: textarea
id: desired-solution
attributes:
label: 你希望看到什么解决方案?
description: 清晰并简洁地描述你希望发生的事情。
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: 你考虑过哪些替代方案?
description: 清晰并简洁地描述你考虑过的任何替代解决方案或功能。
validations:
required: false
- type: textarea
id: additional-context
attributes:
label: 你有其他上下文或截图吗?
description: 在此处添加有关功能请求的任何其他上下文或截图。
validations:
required: false
- type: checkboxes
attributes:
label: 意向参与贡献
options:
- label: 我有意向参与具体功能的开发实现并将代码贡献回到上游社区
required: false

@ -0,0 +1,7 @@
### 更改目的 解决了什么问题(请提交到dev分支)
### 改动逻辑 这么写的思路(让作者更好的理解你的意图)
### 测试 都做了哪些测试(未经过测试不采纳)

47
.gitignore vendored

@ -0,0 +1,47 @@
######################################################################
# Build Tools
.gradle
/build/
!gradle/wrapper/gradle-wrapper.jar
target/
!.mvn/wrapper/maven-wrapper.jar
######################################################################
# IDE
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### JRebel ###
rebel.xml
### NetBeans ###
nbproject/private/
build/*
nbbuild/
nbdist/
.nb-gradle/
######################################################################
# Others
*.log
*.xml.versionsBackup
*.swp
!*/build/*.java
!*/build/*.html
!*/build/*.xml
.flattened-pom.xml

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-auth" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-auth:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-auth/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-gateway" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-gateway:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-gateway/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-gen" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-gen:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-modules/ruoyi-gen/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-job" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-job:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-modules/ruoyi-job/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-monitor" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-monitor:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-visual/ruoyi-monitor/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-nacos" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-nacos:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-visual/ruoyi-nacos/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-resource" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-resource:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-modules/ruoyi-resource/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-seata-server" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-seata-server:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-visual/ruoyi-seata-server/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-sentinel-dashboard" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-sentinel-dashboard:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-visual/ruoyi-sentinel-dashboard/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-snailjob-server" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-snailjob-server:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-visual/ruoyi-snailjob-server/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-system" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-system:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-modules/ruoyi-system/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="ruoyi-workflow" type="docker-deploy" factoryName="dockerfile" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="ruoyi/ruoyi-workflow:2.2.2" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="ruoyi-modules/ruoyi-workflow/Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
</component>

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 RuoYi-Cloud-Plus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,182 @@
<img src="https://foruda.gitee.com/images/1679673780944866919/d908a86f_1766278.png" width="56%" height="56%">
<div style="height: 10px; clear: both;"></div>
- - -
## 平台简介
[![码云Gitee](https://gitee.com/dromara/RuoYi-Cloud-Plus/badge/star.svg?theme=blue)](https://gitee.com/dromara/RuoYi-Cloud-Plus)
[![GitHub](https://img.shields.io/github/stars/dromara/RuoYi-Cloud-Plus.svg?style=social&label=Stars)](https://github.com/dromara/RuoYi-Cloud-Plus)
[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://gitee.com/dromara/RuoYi-Cloud-Plus/blob/master/LICENSE)
[![使用IntelliJ IDEA开发维护](https://img.shields.io/badge/IntelliJ%20IDEA-提供支持-blue.svg)](https://www.jetbrains.com/?from=RuoYi-Cloud-Plus)
<br>
[![RuoYi-Cloud-Plus](https://img.shields.io/badge/RuoYi_Cloud_Plus-2.2.2-success.svg)](https://gitee.com/dromara/RuoYi-Cloud-Plus)
[![Spring Boot](https://img.shields.io/badge/Spring%20Boot-3.1-blue.svg)]()
[![JDK-17](https://img.shields.io/badge/JDK-17-green.svg)]()
[![JDK-21](https://img.shields.io/badge/JDK-21-green.svg)]()
> RuoYi-Cloud-Plus `微服务通用权限管理系统` 重写 RuoYi-Cloud 全方位升级(不兼容原框架)
> 项目代码、文档 均开源免费可商用 遵循开源协议在项目中保留开源协议文件即可<br>
活到老写到老 为兴趣而开源 为学习而开源 为让大家真正可以学到技术而开源
> 系统演示: [传送门](https://plus-doc.dromara.org/#/common/demo_system)
> 前端项目地址: [plus-ui](https://gitee.com/JavaLionLi/plus-ui)
> 文档地址: [plus-doc](https://plus-doc.dromara.org)
## 赞助商
MaxKey 业界领先单点登录产品 - https://gitee.com/dromara/MaxKey <br>
CCFlow 驰聘低代码-流程-表单 - https://gitee.com/opencc/RuoYi-JFlow <br>
数舵科技 软件定制开发APP小程序等 - http://www.shuduokeji.com/ <br>
引迈信息 软件开发平台 - https://www.jnpfsoft.com/index.html?from=plus-doc <br>
[如何成为赞助商 加群联系作者详谈](https://plus-doc.dromara.org/#/common/add_group)
# 本框架与RuoYi的功能差异
| 功能 | 本框架 | RuoYi |
|-------------|-------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|
| 前端项目 | 采用 Vue3 + TS + ElementPlus 重写 | 基于Vue2/Vue3 + JS |
| 后端项目结构 | 采用插件化 + 扩展包形式 结构解耦 易于扩展 | 模块相互注入耦合严重难以扩展 |
| 后端代码风格 | 严格遵守Alibaba规范与项目统一配置的代码格式化 | 代码书写与常规结构不同阅读障碍大 |
| 分布式注册中心 | 采用 Alibaba Nacos 源码集成便于调试扩展与二次开发 框架还为其增加了各种监控 | 采用 Alibaba Nacos 自行搭建纯官方版本不可靠 |
| 分布式配置中心 | 采用 Alibaba Nacos 源码集成便于调试扩展与二次开发 框架还为其增加了各种监控 | 采用 Alibaba Nacos 自行搭建纯官方版本不可靠 |
| 服务网关 | 采用 SpringCloud Gateway 框架扩展了多种功能<br/>例如:内网鉴权、请求体缓存、跨域配置、请求响应日志等 | 采用 SpringCloud Gateway 功能单一 |
| 负载均衡 | 采用 SpringCloud Loadbalancer 扩展支持了开发团队路由 便于多团队开发调试 | 采用 SpringCloud Loadbalancer 功能单一 |
| RPC远程调用 | 采用 全新 Apache Dubbo 3.X 历史悠远不用多说 | 采用 feign 功能有限编写方式 网络波动大 不稳定 |
| 分布式限流熔断 | 采用 Alibaba Sentinel 源码集成便于调试扩展与二次开发 框架还为其增加了各种监控 | 采用 Alibaba Sentinel 自行搭建纯官方版本不可靠 |
| 分布式事务 | 采用 Alibaba Seata 源码集成对接了Nacos与各种监控 简化了搭建部署流程 | 采用 Alibaba Seata 自行搭建纯官方版本 搭建繁琐与Nacos不挂钩 代码内使用方式怪异等 |
| Web容器 | 采用 Undertow 基于 XNIO 的高性能容器 | 采用 Tomcat |
| 权限认证 | 采用 Sa-Token、Jwt 静态使用功能齐全 低耦合 高扩展 | Spring Security 配置繁琐扩展性极差 |
| 权限注解 | 采用 Sa-Token 支持注解 登录校验、角色校验、权限校验、二级认证校验、HttpBasic校验、忽略校验<br/>角色与权限校验支持多种条件 如 `AND` `OR``权限 OR 角色` 等复杂表达式 | 只支持是否存在匹配 |
| 关系数据库支持 | 原生支持 MySQL、Oracle、PostgreSQL、SQLServer<br/>可同时使用异构切换(支持其他 mybatis-plus 支持的所有数据库 只需要增加jdbc依赖即可使用 达梦金仓等均有成功案例) | 支持 Mysql、Oracle 不支持同时使用、不支持异构切换 |
| 缓存数据库 | 支持 Redis 5-7 支持大部分新功能特性 如 分布式限流、分布式队列 | Redis 简单 get set 支持 |
| Redis客户端 | 采用 Redisson Redis官方推荐 基于Netty的客户端工具<br/>支持Redis 90%以上的命令 底层优化规避很多不正确的用法 例如: keys被转换为scan<br/>支持单机、哨兵、单主集群、多主集群等模式 | Lettuce + RedisTemplate 支持模式少 工具使用繁琐<br/>连接池采用 common-pool Bug多经常性出问题 |
| 缓存注解 | 采用 Spring-Cache 注解 对其扩展了实现支持了更多功能<br/>例如 过期时间 最大空闲时间 组最大长度等 只需一个注解即可完成数据自动缓存 | 需手动编写Redis代码逻辑 |
| ORM框架 | 采用 Mybatis-Plus 基于对象几乎不用写SQL全java操作 功能强大插件众多<br/>例如多租户插件 分页插件 乐观锁插件等等 | 采用 Mybatis 基于XML需要手写SQL |
| SQL监控 | 采用 p6spy 可输出完整SQL与执行时间监控 | log输出 需手动拼接sql与参数无法快速查看调试问题 |
| 数据分页 | 采用 Mybatis-Plus 分页插件<br/>框架对其进行了扩展 对象化分页对象 支持多种方式传参 支持前端多排序 复杂排序 | 采用 PageHelper 仅支持单查询分页 参数只能从param传 只能单排序 功能扩展性差 体验不好 |
| 数据权限 | 采用 Mybatis-Plus 插件 自行分析拼接SQL 无感式过滤<br/>只需为Mapper设置好注解条件 支持多种自定义 不限于部门角色 | 采用 注解+aop 实现 基于部门角色 生成的sql兼容性差 不支持其他业务扩展<br/>生成sql后需手动拼接到具体业务sql上 对于多个Mapper查询不起作用 |
| 数据脱敏 | 采用 注解 + jackson 序列化期间脱敏 支持不同模块不同的脱敏条件<br/>支持多种策略 如身份证、手机号、地址、邮箱、银行卡等 可自行扩展 | 无 |
| 数据加解密 | 采用 注解 + mybatis 拦截器 对存取数据期间自动加解密<br/>支持多种策略 如BASE64、AES、RSA、SM2、SM4等 | 无 |
| 数据翻译 | 采用 注解 + jackson 序列化期间动态修改数据 数据进行翻译<br/>支持多种模式: `映射翻译` `直接翻译` `其他扩展条件翻译` 接口化两步即可完成自定义扩展 内置多种翻译实现 | 无 |
| 多数据源框架 | 采用 dynamic-datasource 支持市面大部分数据库<br/>通过yml配置即可动态管理异构不同种类的数据库 也可通过前端页面添加数据源<br/>支持spel表达式从请求头参数等条件切换数据源 | 基于 druid 手动编写代码配置数据源 配置繁琐 支持性差 |
| 多数据源事务 | 采用 dynamic-datasource 支持多数据源不同种类的数据库事务回滚 | 不支持 |
| 数据库连接池 | 采用 HikariCP Spring官方内置连接池 配置简单 以性能与稳定性闻名天下 | 采用 druid bug众多 社区维护差 活跃度低 配置众多繁琐性能一般 |
| 数据库主键 | 采用 雪花ID 基于时间戳的 有序增长 唯一ID 再也不用为分库分表 数据合并主键冲突重复而发愁 | 采用 数据库自增ID 支持数据量有限 不支持多数据源主键唯一 |
| WebSocket协议 | 基于 Spring 封装的 WebSocket 协议 扩展了Token鉴权与分布式会话同步 不再只是基于单机的废物 | 无 |
| SSE推送 | 采用 Spring SSE 实现 扩展了Token鉴权与分布式会话同步 | 无 |
| 序列化 | 采用 Jackson Spring官方内置序列化 靠谱!!! | 采用 fastjson bugjson 远近闻名 |
| 分布式幂等 | 参考美团GTIS防重系统简化实现(细节可看文档) | 手动编写注解基于aop实现 |
| 分布式任务调度 | 采用 SnailJob 天生支持分布式 统一的管理中心 支持多种数据库 支持分片重试DAG任务流等 | 采用 Quartz 基于数据库锁性能差 集群需要做很多配置与改造 |
| 分布式日志中心 | 采用 ELK 业界成熟解决方案 实时收集所有服务的运行日志 快速发现定位问题 | 无 |
| 分布式搜索引擎 | 采用 ElasticSearch、Easy-Es 以 Mybatis-Plus 方式操作 ElasticSearch | 无 |
| 分布式消息队列 | 采用 支持 Kafka、RocketMQ、RabbitMQ 各种 延迟消息 事务消息 流消息 | 无 |
| 分布式消息总线 | 采用 SpringCloud Bus 实现事件总线 跨服务通知 支持 Kafka、RocketMQ、RabbitMQ | 无 |
| 分库分表功能 | 采用 Apache Sharding-Proxy 代理服务无入侵支持分库分表 只需编写分库分表规则即可 | 无 |
| 文件存储 | 采用 Minio 分布式文件存储 天生支持多机、多硬盘、多分片、多副本存储<br/>支持权限管理 安全可靠 文件可加密存储 | 采用 本机文件存储 文件裸漏 易丢失泄漏 不支持集群有单点效应 |
| 云存储 | 采用 AWS S3 协议客户端 支持 七牛、阿里、腾讯 等一切支持S3协议的厂家 | 不支持 |
| 短信 | 支持 阿里、腾讯 只需在yml配置好厂家密钥即可使用 接口化支持扩展其他厂家 | 不支持 |
| 邮件 | 采用 mail-api 通用协议支持大部分邮件厂商 | 不支持 |
| 接口文档 | 采用 SpringDoc、javadoc 无注解零入侵基于java注释<br/>只需把注释写好 无需再写一大堆的文档注解了 | 采用 Springfox 已停止维护 需要编写大量的注解来支持文档生成 |
| 校验框架 | 采用 Validation 支持注解与工具类校验 注解支持国际化 | 仅支持注解 且注解不支持国际化 |
| Excel框架 | 采用 Alibaba EasyExcel 基于插件化<br/>框架对其增加了很多功能 例如 自动合并相同内容 自动排列布局 字典翻译等 | 基于 POI 手写实现 功能有限 复杂 扩展性差 |
| 工作流支持 | 支持各种复杂审批 转办 委派 加减签 会签 或签 票签 等功能 | 无 |
| 工具类框架 | 采用 Hutool、Lombok 上百种工具覆盖90%的使用需求 基于注解自动生成 get set 等简化框架大量代码 | 手写工具稳定性差易出问题 工具数量有限 代码臃肿需自己手写 get set 等 |
| 服务监控框架 | 采用 SpringBoot-Admin 基于SpringBoot官方 actuator 探针机制<br/>实时监控服务状态 框架还为其扩展了在线日志查看监控 | 无 |
| 全方位监控报警 | 采用 Prometheus、Grafana 多样化采集 多模板大屏展示 实时报警监控 提供详细的搭建文档 | 无 |
| 链路追踪 | 采用 Apache SkyWalking 还在为请求不知道去哪了 到哪出了问题而烦恼吗<br/>用了它即可实时查看请求经过的每一处每一个节点 | 无 |
| 代码生成器 | 只需设计好表结构 一键生成所有crud代码与页面<br/>降低80%的开发量 把精力都投入到业务设计上<br/>框架为其适配MP、SpringDoc规范化代码 同时支持动态多数据源代码生成 | 代码生成原生结构 只支持单数据源生成 |
| 部署方式 | 支持 Docker 编排 一键搭建所有环境 让开发人员从此不再为搭建环境而烦恼 | 原生jar部署 其他环境需手动下载安装 自行搭建 |
| 项目路径修改 | 提供详细的修改方案文档 并为其做了一些改动 非常简单即可修改成自己想要的 | 需要做很多改造 文档说明有限 |
| 国际化 | 基于请求头动态返回不同语种的文本内容 开发难度低 有对应的工具类 支持大部分注解内容国际化 | 只提供基础功能 其他需自行编写扩展 |
| 代码单例测试 | 提供单例测试 使用方式编写方法与maven多环境单测插件 | 只提供基础功能 其他需自行编写扩展 |
| Demo案例 | 提供框架功能的实际使用案例 单独一个模块提供了很多很全 | 无 |
## 本框架与RuoYi的业务差异
| 业务 | 功能说明 | 本框架 | RuoYi |
|--------|-----------------------------------------|-----|------------------|
| 租户管理 | 系统内租户的管理 如:租户套餐、过期时间、用户数量、企业信息等 | 支持 | 无 |
| 租户套餐管理 | 系统内租户所能使用的套餐管理 如:套餐内所包含的菜单等 | 支持 | 无 |
| 用户管理 | 用户的管理配置 如:新增用户、分配用户所属部门、角色、岗位等 | 支持 | 支持 |
| 部门管理 | 配置系统组织机构(公司、部门、小组) 树结构展现支持数据权限 | 支持 | 支持 |
| 岗位管理 | 配置系统用户所属担任职务 | 支持 | 支持 |
| 菜单管理 | 配置系统菜单、操作权限、按钮权限标识等 | 支持 | 支持 |
| 角色管理 | 角色菜单权限分配、设置角色按机构进行数据范围权限划分 | 支持 | 支持 |
| 字典管理 | 对系统中经常使用的一些较为固定的数据进行维护 | 支持 | 支持 |
| 参数管理 | 对系统动态配置常用参数 | 支持 | 支持 |
| 通知公告 | 系统通知公告信息发布维护 | 支持 | 支持 |
| 操作日志 | 系统正常操作日志记录和查询 系统异常信息日志记录和查询 | 支持 | 支持 |
| 登录日志 | 系统登录日志记录查询包含登录异常 | 支持 | 支持 |
| 文件管理 | 系统文件展示、上传、下载、删除等管理 | 支持 | 无 |
| 文件配置管理 | 系统文件上传、下载所需要的配置信息动态添加、修改、删除等管理 | 支持 | 无 |
| 在线用户管理 | 已登录系统的在线用户信息监控与强制踢出操作 | 支持 | 支持 |
| 定时任务 | 运行报表、任务管理(添加、修改、删除)、日志管理、执行器管理等 | 支持 | 仅支持任务与日志管理 |
| 代码生成 | 多数据源前后端代码的生成java、html、xml、sql支持CRUD下载 | 支持 | 仅支持单数据源 |
| 系统接口 | 根据业务代码自动生成相关的api接口文档 | 支持 | 支持 |
| 服务监控 | 监视集群系统CPU、内存、磁盘、堆栈、在线日志、Spring相关配置等 | 支持 | 仅支持单机CPU、内存、磁盘监控 |
| 缓存监控 | 对系统的缓存信息查询,命令统计等。 | 支持 | 支持 |
| 在线构建器 | 拖动表单元素生成相应的HTML代码。 | 支持 | 支持 |
| 使用案例 | 系统的一些功能案例 | 支持 | 不支持 |
## 参考文档
使用框架前请仔细阅读文档重点注意事项
<br>
>[初始化项目 必看](https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/init)
>>[https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/init](https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/init)
>
>[专栏与视频 入门必看](https://plus-doc.dromara.org/#/common/column)
>>[https://plus-doc.dromara.org/#/common/column](https://plus-doc.dromara.org/#/common/column)
>
>[部署项目 必看](https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/deploy)
>>[https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/deploy](https://plus-doc.dromara.org/#/ruoyi-cloud-plus/quickstart/deploy)
>
>[如何加群](https://plus-doc.dromara.org/#/common/add_group)
>>[https://plus-doc.dromara.org/#/common/add_group](https://plus-doc.dromara.org/#/common/add_group)
>
>[参考文档 Wiki](https://plus-doc.dromara.org)
>>[https://plus-doc.dromara.org](https://plus-doc.dromara.org)
## 软件架构图
![Plus部署架构图](https://foruda.gitee.com/images/1678980131147747524/5c2d5a5c_1766278.png "Plus部署架构图.png")
## 贡献代码
[参与贡献的方式 https://plus-doc.dromara.org/#/common/contribution](https://plus-doc.dromara.org/#/common/contribution)
## 捐献作者
作者为兼职做开源,平时还需要工作,如果帮到了您可以请作者吃个盒饭
<img src="https://foruda.gitee.com/images/1678975784848381069/d8661ed9_1766278.png" width="300px" height="450px" />
<img src="https://foruda.gitee.com/images/1678975801230205215/6f96229d_1766278.png" width="300px" height="450px" />
## 演示图例
| | |
|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
| ![输入图片说明](https://foruda.gitee.com/images/1680077524361362822/270bb429_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680077619939771291/989bf9b6_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680077681751513929/1c27c5bd_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680077721559267315/74d63e23_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680077765638904515/1b75d4a6_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078026375951297/eded7a4b_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078237104531207/0eb1b6a7_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078254306078709/5931e22f_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078287971528493/0b9af60a_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078308138770249/8d3b6696_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078352553634393/db5ef880_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078378238393374/601e4357_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078414983206024/2aae27c1_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078446738419874/ecce7d59_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078475971341775/149e8634_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078491666717143/3fadece7_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078558863188826/fb8ced2a_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078574561685461/ae68a0b2_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078594932772013/9d8bfec6_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078626493093532/fcfe4ff6_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078643608812515/0295bd4f_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078685196286463/d7612c81_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078703877318597/56fce0bc_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078716586545643/b6dbd68f_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078734103217688/eb1e6aa6_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078759131415480/73c525d8_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078779416197879/75e3ed02_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078802329118061/77e10915_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078893627848351/34a1c342_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078928175016986/f126ec4a_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078941718318363/b68a0f72_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680078963175518631/3bb769a1_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680078982294090567/b31c343d_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680079000642440444/77ca82a9_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680079020995074177/03b7d52e_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680079039367822173/76811806_1766278.png "屏幕截图") |
| ![输入图片说明](https://foruda.gitee.com/images/1680079274333484664/4dfdc7c0_1766278.png "屏幕截图") | ![输入图片说明](https://foruda.gitee.com/images/1680079290467458224/d6715fcf_1766278.png "屏幕截图") |

Binary file not shown.

File diff suppressed because it is too large Load Diff

@ -0,0 +1 @@
# 将此文件夹下所有配置文件导入到 `grafana` 内生成展示看板(此模板与项目做了定制化与官方下载的不一致, 同名看板以框架自带的为主)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1 @@
# 将此文件夹下所有配置文件内容复制到 `nacos` 对应的配置中

@ -0,0 +1,284 @@
server:
# undertow 配置
undertow:
# HTTP post内容的最大大小。当值为-1时默认值为大小是无限的
max-http-post-size: -1
# 以下的配置会影响buffer,这些buffer会用于服务器连接的IO操作,有点类似netty的池化内存管理
# 每块buffer的空间大小,越小的空间被利用越充分
buffer-size: 512
# 是否分配的直接内存
direct-buffers: true
threads:
# 设置IO线程数, 它主要执行非阻塞的任务,它们会负责多个连接, 默认设置每个CPU核心一个线程
io: 8
# 阻塞任务线程池, 当执行类似servlet请求阻塞操作, undertow会从这个线程池中取得线程,它的值设置取决于系统的负载
worker: 256
dubbo:
application:
# 关闭qos端口避免单机多生产者端口冲突 如需使用自行开启
qos-enable: false
protocol:
# 如需使用 Triple 3.0 新协议 可查看官方文档
# 使用 dubbo 协议通信
name: dubbo
# dubbo 协议端口(-1表示自增端口,从20880开始)
port: -1
# 指定dubbo协议注册ip
# host: 192.168.0.100
# 消费者相关配置
consumer:
# 超时时间
timeout: 3000
# 自定义配置
custom:
# 全局请求log
request-log: true
# info 基础信息 param 参数信息 full 全部
log-level: info
spring:
threads:
# 开启虚拟线程 仅jdk21可用
virtual:
enabled: false
# 资源信息
messages:
# 国际化资源文件路径
basename: i18n/messages
servlet:
multipart:
# 整个请求大小限制
max-request-size: 20MB
# 上传单个文件大小限制
max-file-size: 10MB
mvc:
# 设置静态资源路径 防止所有请求都去查静态资源
static-path-pattern: /static/**
format:
date-time: yyyy-MM-dd HH:mm:ss
#jackson配置
jackson:
# 日期格式化
date-format: yyyy-MM-dd HH:mm:ss
serialization:
# 格式化输出
INDENT_OUTPUT: false
# 忽略无法转换的对象
fail_on_empty_beans: false
deserialization:
# 允许对象忽略json中不存在的属性
fail_on_unknown_properties: false
cloud:
nacos:
discovery:
metadata:
# admin 监控账号密码
username: ruoyi
userpassword: 123456
# sentinel 配置
sentinel:
# sentinel 开关
enabled: true
transport:
# dashboard控制台服务名 用于服务发现
# 如无此配置将默认使用下方 dashboard 配置直接注册
server-name: ruoyi-sentinel-dashboard
# 客户端指定注册的ip 用于多网卡ip不稳点使用
# client-ip:
# 控制台地址 从1.3.0开始使用 server-name 注册
# dashboard: localhost:8718
bus:
id: ${spring.application.name}
base-packages: org.dromara.**.event
# 消息总线 也可以使用 kafka 参考 spring-cloud-bus 用法
rabbitmq:
host: localhost
port: 5672
username: ruoyi
password: ruoyi123
# redis通用配置 子服务可以自行配置进行覆盖
data:
redis:
host: localhost
port: 6379
# redis 密码必须配置
password: ruoyi123
database: 0
# 需要使用数字
timeout: 10000
ssl.enabled: false
# redisson 配置
redisson:
# redis key前缀
keyPrefix:
# 线程池数量
threads: 4
# Netty线程池数量
nettyThreads: 8
# 单节点配置
singleServerConfig:
# 客户端名称
clientName: ${spring.application.name}
# 最小空闲连接数
connectionMinimumIdleSize: 8
# 连接池大小
connectionPoolSize: 32
# 连接空闲超时,单位:毫秒
idleConnectionTimeout: 10000
# 命令等待超时,单位:毫秒
timeout: 3000
# 发布和订阅连接池大小
subscriptionConnectionPoolSize: 50
# 分布式锁 lock4j 全局配置
lock4j:
# 获取分布式锁超时时间,默认为 3000 毫秒
acquire-timeout: 3000
# 分布式锁的超时时间,默认为 30 秒
expire: 30000
# 暴露监控端点
management:
endpoints:
web:
exposure:
include: '*'
endpoint:
health:
show-details: ALWAYS
logfile:
external-file: ./logs/${spring.application.name}/console.log
# 日志配置
logging:
level:
org.springframework: warn
org.apache.dubbo: warn
com.alibaba.nacos: warn
com.alibaba.cloud.sentinel: warn
org.mybatis.spring.mapper: error
org.apache.dubbo.config: error
# 临时处理 spring 调整日志级别导致启动警告问题 不影响使用等待 alibaba 适配
org.springframework.context.support.PostProcessorRegistrationDelegate: error
config: classpath:logback-plus.xml
# Sa-Token配置
sa-token:
# token名称 (同时也是cookie名称)
token-name: Authorization
# 开启内网服务调用鉴权(不允许越过gateway访问内网服务 保障服务安全)
check-same-token: true
# 是否允许同一账号并发登录 (为true时允许一起登录, 为false时新登录挤掉旧登录)
is-concurrent: true
# 在多人登录同一账号时是否共用一个token (为true时所有登录共用一个token, 为false时每次登录新建一个token)
is-share: false
# jwt秘钥
jwt-secret-key: abcdefghijklmnopqrstuvwxyz
# MyBatisPlus配置
# https://baomidou.com/config/
mybatis-plus:
# 多包名使用 例如 org.dromara.**.mapper,org.xxx.**.mapper
mapperPackage: org.dromara.**.mapper
# 对应的 XML 文件位置
mapperLocations: classpath*:mapper/**/*Mapper.xml
# 实体扫描多个package用逗号或者分号分隔
typeAliasesPackage: org.dromara.**.domain
global-config:
dbConfig:
# 主键类型
# AUTO 自增 NONE 空 INPUT 用户输入 ASSIGN_ID 雪花 ASSIGN_UUID 唯一 UUID
# 如需改为自增 需要将数据库表全部设置为自增
idType: ASSIGN_ID
# 数据加密
mybatis-encryptor:
# 是否开启加密
enable: false
# 默认加密算法
algorithm: BASE64
# 编码方式 BASE64/HEX。默认BASE64
encode: BASE64
# 安全秘钥 对称算法的秘钥 如AESSM4
password:
# 公私钥 非对称算法的公私钥 如SM2RSA
publicKey:
privateKey:
# api接口加密
api-decrypt:
# 是否开启全局接口加密
enabled: true
# AES 加密头标识
headerFlag: encrypt-key
# 响应加密公钥 非对称算法的公私钥 如SM2RSA 使用者请自行更换
# 对应前端解密私钥 MIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAmc3CuPiGL/LcIIm7zryCEIbl1SPzBkr75E2VMtxegyZ1lYRD+7TZGAPkvIsBcaMs6Nsy0L78n2qh+lIZMpLH8wIDAQABAkEAk82Mhz0tlv6IVCyIcw/s3f0E+WLmtPFyR9/WtV3Y5aaejUkU60JpX4m5xNR2VaqOLTZAYjW8Wy0aXr3zYIhhQQIhAMfqR9oFdYw1J9SsNc+CrhugAvKTi0+BF6VoL6psWhvbAiEAxPPNTmrkmrXwdm/pQQu3UOQmc2vCZ5tiKpW10CgJi8kCIFGkL6utxw93Ncj4exE/gPLvKcT+1Emnoox+O9kRXss5AiAMtYLJDaLEzPrAWcZeeSgSIzbL+ecokmFKSDDcRske6QIgSMkHedwND1olF8vlKsJUGK3BcdtM8w4Xq7BpSBwsloE=
publicKey: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJnNwrj4hi/y3CCJu868ghCG5dUj8wZK++RNlTLcXoMmdZWEQ/u02RgD5LyLAXGjLOjbMtC+/J9qofpSGTKSx/MCAwEAAQ==
# 请求解密私钥 非对称算法的公私钥 如SM2RSA 使用者请自行更换
# 对应前端加密公钥 MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKoR8mX0rGKLqzcWmOzbfj64K8ZIgOdHnzkXSOVOZbFu/TJhZ7rFAN+eaGkl3C4buccQd/EjEsj9ir7ijT7h96MCAwEAAQ==
privateKey: MIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAqhHyZfSsYourNxaY7Nt+PrgrxkiA50efORdI5U5lsW79MmFnusUA355oaSXcLhu5xxB38SMSyP2KvuKNPuH3owIDAQABAkAfoiLyL+Z4lf4Myxk6xUDgLaWGximj20CUf+5BKKnlrK+Ed8gAkM0HqoTt2UZwA5E2MzS4EI2gjfQhz5X28uqxAiEA3wNFxfrCZlSZHb0gn2zDpWowcSxQAgiCstxGUoOqlW8CIQDDOerGKH5OmCJ4Z21v+F25WaHYPxCFMvwxpcw99EcvDQIgIdhDTIqD2jfYjPTY8Jj3EDGPbH2HHuffvflECt3Ek60CIQCFRlCkHpi7hthhYhovyloRYsM+IS9h/0BzlEAuO0ktMQIgSPT3aFAgJYwKpqRYKlLDVcflZFCKY7u3UP8iWi1Qw0Y=
# 防止XSS攻击
xss:
enabled: true
excludeUrls:
- /system/notice
- /workflow/model/save
- /workflow/model/editModelXml
# 接口文档配置
springdoc:
api-docs:
# 是否开启接口文档
enabled: true
# swagger-ui:
# # 持久化认证数据
# persistAuthorization: true
info:
# 标题
title: '标题RuoYi-Cloud-Plus微服务权限管理系统_接口文档'
# 描述
description: '描述:微服务权限管理系统, 具体包括XXX,XXX模块...'
# 版本
version: '版本号:系统版本...'
# 作者信息
contact:
name: Lion Li
email: crazylionli@163.com
url: https://gitee.com/dromara/RuoYi-Cloud-Plus
components:
# 鉴权方式配置
security-schemes:
apiKey:
type: APIKEY
in: HEADER
name: ${sa-token.token-name}
# seata配置
seata:
# 是否启用
enabled: true
# Seata 应用编号,默认为应用名
application-id: ${spring.application.name}
# Seata 事务组编号,用于 TC 集群名
tx-service-group: ${spring.application.name}-group
# 多租户配置
tenant:
# 是否开启
enable: true
# 排除表
excludes:
- sys_menu
- sys_tenant
- sys_tenant_package
- sys_role_dept
- sys_role_menu
- sys_user_post
- sys_user_role
- sys_client
- sys_oss_config

@ -0,0 +1,54 @@
datasource:
system-master:
# jdbc 所有参数配置参考 https://lionli.blog.csdn.net/article/details/122018562
# rewriteBatchedStatements=true 批处理优化 大幅提升批量插入更新删除性能
url: jdbc:mysql://localhost:3306/ry-cloud?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true
username: root
password: password
gen:
url: jdbc:mysql://localhost:3306/ry-cloud?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true
username: root
password: password
job:
url: jdbc:mysql://localhost:3306/ry-job?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true
username: root
password: password
workflow:
url: jdbc:mysql://localhost:3306/ry-workflow?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true&nullCatalogMeansCurrent=true
username: root
password: password
# system-oracle:
# url: jdbc:oracle:thin:@//localhost:1521/XE
# username: ROOT
# password: password
# system-postgres:
# url: jdbc:postgresql://localhost:5432/postgres?useUnicode=true&characterEncoding=utf8&useSSL=true&autoReconnect=true&reWriteBatchedInserts=true
# username: root
# password: password
spring:
datasource:
type: com.zaxxer.hikari.HikariDataSource
# 动态数据源文档 https://www.kancloud.cn/tracy5546/dynamic-datasource/content
dynamic:
# 性能分析插件(有性能损耗 不建议生产环境使用)
p6spy: true
# 开启seata代理开启后默认每个数据源都代理如果某个不需要代理可单独关闭
seata: ${seata.enabled}
# 严格模式 匹配不到数据源则报错
strict: true
hikari:
# 最大连接池数量
maxPoolSize: 20
# 最小空闲线程数量
minIdle: 10
# 配置获取连接等待超时的时间
connectionTimeout: 30000
# 校验超时时间
validationTimeout: 5000
# 空闲连接存活最大时间默认10分钟
idleTimeout: 600000
# 此属性控制池中连接的最长生命周期值0表示无限生命周期默认30分钟
maxLifetime: 1800000
# 多久检查一次连接的活性
keepaliveTime: 30000

@ -0,0 +1,98 @@
# 安全配置
security:
# 验证码
captcha:
# 是否开启验证码
enabled: true
# 验证码类型 math 数组计算 char 字符验证
type: MATH
# line 线段干扰 circle 圆圈干扰 shear 扭曲干扰
category: CIRCLE
# 数字验证码位数
numberLength: 1
# 字符验证码长度
charLength: 4
# 用户配置
user:
password:
# 密码最大错误次数
maxRetryCount: 5
# 密码锁定时间默认10分钟
lockTime: 10
# 三方授权
justauth:
# 前端外网访问地址
address: http://localhost:80
type:
maxkey:
# maxkey 服务器地址
# 注意 如下均配置均不需要修改 maxkey 已经内置好了数据
server-url: http://sso.maxkey.top
client-id: 876892492581044224
client-secret: x1Y5MTMwNzIwMjMxNTM4NDc3Mzche8
redirect-uri: ${justauth.address}/social-callback?source=maxkey
topiam:
# topiam 服务器地址
server-url: http://127.0.0.1:1989/api/v1/authorize/y0q************spq***********8ol
client-id: 449c4*********937************759
client-secret: ac7***********1e0************28d
redirect-uri: ${justauth.address}/social-callback?source=topiam
scopes: [ openid, email, phone, profile ]
qq:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=qq
union-id: false
weibo:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=weibo
gitee:
client-id: 91436b7940090d09c72c7daf85b959cfd5f215d67eea73acbf61b6b590751a98
client-secret: 02c6fcfd70342980cd8dd2f2c06c1a350645d76c754d7a264c4e125f9ba915ac
redirect-uri: ${justauth.address}/social-callback?source=gitee
dingtalk:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=dingtalk
baidu:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=baidu
csdn:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=csdn
coding:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=coding
coding-group-name: xx
oschina:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=oschina
alipay_wallet:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=alipay_wallet
alipay-public-key: MIIB**************DAQAB
wechat_open:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=wechat_open
wechat_mp:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=wechat_mp
wechat_enterprise:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=wechat_enterprise
agent-id: 1000002
gitlab:
client-id: 10**********6
client-secret: 1f7d08**********5b7**********29e
redirect-uri: ${justauth.address}/social-callback?source=gitlab

@ -0,0 +1,95 @@
# 安全配置
security:
# 不校验白名单
ignore:
whites:
- /auth/code
- /auth/logout
- /auth/login
- /auth/binding/*
- /auth/social/callback
- /auth/register
- /auth/tenant/list
- /resource/sms/code
- /resource/sse/close
- /*/v3/api-docs
- /*/error
- /csrf
spring:
cloud:
# 网关配置
gateway:
# 打印请求日志(自定义)
requestLog: true
discovery:
locator:
lowerCaseServiceId: true
enabled: true
routes:
# 认证中心
- id: ruoyi-auth
uri: lb://ruoyi-auth
predicates:
- Path=/auth/**
filters:
- StripPrefix=1
# 代码生成
- id: ruoyi-gen
uri: lb://ruoyi-gen
predicates:
- Path=/tool/**
filters:
- StripPrefix=1
# 系统模块
- id: ruoyi-system
uri: lb://ruoyi-system
predicates:
- Path=/system/**,/monitor/**
filters:
- StripPrefix=1
# 资源服务
- id: ruoyi-resource
uri: lb://ruoyi-resource
predicates:
- Path=/resource/**
filters:
- StripPrefix=1
# workflow服务
- id: ruoyi-workflow
uri: lb://ruoyi-workflow
predicates:
- Path=/workflow/**
filters:
- StripPrefix=1
# 演示服务
- id: ruoyi-demo
uri: lb://ruoyi-demo
predicates:
- Path=/demo/**
filters:
- StripPrefix=1
# MQ演示服务
- id: ruoyi-test-mq
uri: lb://ruoyi-test-mq
predicates:
- Path=/test-mq/**
filters:
- StripPrefix=1
# sentinel 配置
sentinel:
filter:
enabled: false
# nacos配置持久化
datasource:
ds1:
nacos:
server-addr: ${spring.cloud.nacos.server-addr}
dataId: sentinel-${spring.application.name}.json
groupId: ${spring.cloud.nacos.config.group}
username: ${spring.cloud.nacos.username}
password: ${spring.cloud.nacos.password}
namespace: ${spring.profiles.active}
data-type: json
rule-type: gw-flow

@ -0,0 +1,37 @@
spring:
datasource:
dynamic:
# 设置默认的数据源或者数据源组,默认值即为 master
primary: master
seata: false
datasource:
# 主库数据源
master:
type: ${spring.datasource.type}
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.system-master.url}
username: ${datasource.system-master.username}
password: ${datasource.system-master.password}
# oracle:
# type: ${spring.datasource.type}
# driverClassName: oracle.jdbc.OracleDriver
# url: ${datasource.system-oracle.url}
# username: ${datasource.system-oracle.username}
# password: ${datasource.system-oracle.password}
# postgres:
# type: ${spring.datasource.type}
# driverClassName: org.postgresql.Driver
# url: ${datasource.system-postgres.url}
# username: ${datasource.system-postgres.username}
# password: ${datasource.system-postgres.password}
# 代码生成
gen:
# 作者
author: LionLi
# 默认生成包路径 system 需改成自己的模块名称 如 system monitor tool
packageName: org.dromara.system
# 自动去除表前缀默认是false
autoRemovePre: false
# 表前缀(生成类名不会包含表前缀,多个用逗号分隔)
tablePrefix: sys_

@ -0,0 +1,33 @@
spring:
datasource:
dynamic:
# 设置默认的数据源或者数据源组,默认值即为 master
primary: master
seata: false
datasource:
# 主库数据源
master:
type: ${spring.datasource.type}
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.job.url}
username: ${datasource.job.username}
password: ${datasource.job.password}
snail-job:
enabled: true
# 需要在 SnailJob 后台组管理创建对应名称的组,然后创建任务的时候选择对应的组,才能正确分派任务
group: "ruoyi_group"
# SnailJob 接入验证令牌
token: "SJ_cKqBTPzCsWA3VyuCfFoccmuIEGXjr5KT"
server:
# 从 nacos 获取服务
server-name: ruoyi-snailjob-server
# 服务名优先 ip垫底
host: 127.0.0.1
port: 17888
# 详见 script/sql/ry_job.sql `sj_namespace` 表
namespace: ${spring.profiles.active}
# 随主应用端口飘逸
port: 2${server.port}
# 客户端ip指定
host:

@ -0,0 +1,13 @@
# 监控中心配置
spring:
security:
user:
name: ruoyi
password: 123456
boot:
admin:
ui:
title: RuoYi-Cloud-Plus服务监控中心
discovery:
# seata 不具有健康检查的能力 防止报错排除掉
ignored-services: ruoyi-seata-server

@ -0,0 +1,91 @@
spring:
datasource:
dynamic:
# 设置默认的数据源或者数据源组,默认值即为 master
primary: master
datasource:
# 主库数据源
master:
type: ${spring.datasource.type}
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.system-master.url}
username: ${datasource.system-master.username}
password: ${datasource.system-master.password}
# oracle:
# type: ${spring.datasource.type}
# driverClassName: oracle.jdbc.OracleDriver
# url: ${datasource.system-oracle.url}
# username: ${datasource.system-oracle.username}
# password: ${datasource.system-oracle.password}
# postgres:
# type: ${spring.datasource.type}
# driverClassName: org.postgresql.Driver
# url: ${datasource.system-postgres.url}
# username: ${datasource.system-postgres.username}
# password: ${datasource.system-postgres.password}
# 默认/推荐使用sse推送
sse:
enabled: true
path: /sse
websocket:
# 如果关闭 需要和前端开关一起关闭
enabled: false
# 路径
path: /websocket
# 设置访问源地址
allowedOrigins: '*'
mail:
enabled: false
host: smtp.163.com
port: 465
# 是否需要用户名密码验证
auth: true
# 发送方遵循RFC-822标准
from: xxx@163.com
# 用户名注意如果使用foxmail邮箱此处user为qq号
user: xxx@163.com
# 密码注意某些邮箱需要为SMTP服务单独设置密码详情查看相关帮助
pass: xxxxxxxxxx
# 使用 STARTTLS安全连接STARTTLS是对纯文本通信协议的扩展。
starttlsEnable: true
# 使用SSL安全连接
sslEnable: true
# SMTP超时时长单位毫秒缺省值不超时
timeout: 0
# Socket连接超时值单位毫秒缺省值不超时
connectionTimeout: 0
# sms 短信 支持 阿里云 腾讯云 云片 等等各式各样的短信服务商
# https://sms4j.com/doc3/ 差异配置文档地址 支持单厂商多配置,可以配置多个同时使用
sms:
# 配置源类型用于标定配置来源(interface,yaml)
config-type: yaml
# 用于标定yml中的配置是否开启短信拦截接口配置不受此限制
restricted: true
# 短信拦截限制单手机号每分钟最大发送,只对开启了拦截的配置有效
minute-max: 1
# 短信拦截限制单手机号每日最大发送量,只对开启了拦截的配置有效
account-max: 30
# 以下配置来自于 org.dromara.sms4j.provider.config.BaseConfig类中
blends:
# 唯一ID 用于发送短信寻找具体配置 随便定义别用中文即可
# 可以同时存在两个相同厂商 例如: ali1 ali2 两个不同的阿里短信账号 也可用于区分租户
config1:
# 框架定义的厂商名称标识,标定此配置是哪个厂商,详细请看厂商标识介绍部分
supplier: alibaba
# 有些称为accessKey有些称之为apiKey也有称为sdkKey或者appId。
access-key-id: 您的accessKey
# 称为accessSecret有些称之为apiSecret
access-key-secret: 您的accessKeySecret
signature: 您的短信签名
sdk-app-id: 您的sdkAppId
config2:
# 厂商标识,标定此配置是哪个厂商,详细请看厂商标识介绍部分
supplier: tencent
access-key-id: 您的accessKey
access-key-secret: 您的accessKeySecret
signature: 您的短信签名
sdk-app-id: 您的sdkAppId

@ -0,0 +1,27 @@
spring:
mvc:
pathmatch:
# 修复 sentinel 控制台未适配 springboot 2.6 新路由方式
matching-strategy: ANT_PATH_MATCHER
server:
servlet:
encoding:
force: true
charset: UTF-8
enabled: true
session:
cookie:
name: sentinel_dashboard_cookie
logging:
level:
org.springframework.web: INFO
auth:
enabled: true
filter:
exclude-urls: /,/auth/login,/auth/logout,/registry/machine,/version,/actuator,/actuator/**
exclude-url-suffixes: htm,html,js,css,map,ico,ttf,woff,png
username: sentinel
password: sentinel

@ -0,0 +1,48 @@
spring:
datasource:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.job.url}
username: ${datasource.job.username}
password: ${datasource.job.password}
hikari:
connection-timeout: 30000
validation-timeout: 5000
minimum-idle: 10
maximum-pool-size: 20
idle-timeout: 600000
max-lifetime: 900000
keepaliveTime: 30000
cloud:
nacos:
discovery:
metadata:
# 解决 er 服务有 context-path 无法监控问题
management.context-path: ${server.servlet.context-path}/actuator
# 监控账号密码
username: ruoyi
userpassword: 123456
# snail-job 服务端配置
snail-job:
# 拉取重试数据的每批次的大小
retry-pull-page-size: 1000
# 拉取重试数据的每批次的大小
job-pull-page-size: 1000
# 服务端 netty 端口
netty-port: 17888
# 重试和死信表的分区总数
total-partition: 2
# 一个客户端每秒最多接收的重试数量指令
limiter: 1000
# 号段模式下步长配置
step: 100
# 日志保存时间(单位: day)
log-storage: 90
# 回调配置
callback:
#回调最大执行次数
max-count: 288
#间隔时间
trigger-interval: 900
retry-max-pull-count: 10

@ -0,0 +1,25 @@
spring:
datasource:
dynamic:
# 设置默认的数据源或者数据源组,默认值即为 master
primary: master
datasource:
# 主库数据源
master:
type: ${spring.datasource.type}
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.system-master.url}
username: ${datasource.system-master.username}
password: ${datasource.system-master.password}
# oracle:
# type: ${spring.datasource.type}
# driverClassName: oracle.jdbc.OracleDriver
# url: ${datasource.system-oracle.url}
# username: ${datasource.system-oracle.username}
# password: ${datasource.system-oracle.password}
# postgres:
# type: ${spring.datasource.type}
# driverClassName: org.postgresql.Driver
# url: ${datasource.system-postgres.url}
# username: ${datasource.system-postgres.username}
# password: ${datasource.system-postgres.password}

@ -0,0 +1,48 @@
spring:
datasource:
dynamic:
# 设置默认的数据源或者数据源组,默认值即为 master
primary: master
datasource:
# 主库数据源
master:
type: ${spring.datasource.type}
driver-class-name: com.mysql.cj.jdbc.Driver
url: ${datasource.workflow.url}
username: ${datasource.workflow.username}
password: ${datasource.workflow.password}
# oracle:
# type: ${spring.datasource.type}
# driverClassName: oracle.jdbc.OracleDriver
# url: ${datasource.system-oracle.url}
# username: ${datasource.system-oracle.username}
# password: ${datasource.system-oracle.password}
# postgres:
# type: ${spring.datasource.type}
# driverClassName: org.postgresql.Driver
# url: ${datasource.system-postgres.url}
# username: ${datasource.system-postgres.username}
# password: ${datasource.system-postgres.password}
# flowable配置
flowable:
# 开关 用于启动/停用工作流
enabled: true
process.enabled: ${flowable.enabled}
eventregistry.enabled: ${flowable.enabled}
# 关闭定时任务JOB
async-executor-activate: false
# 将databaseSchemaUpdate设置为true。当Flowable发现库与数据库表结构不一致时会自动将数据库表结构升级至新版本。
database-schema-update: true
activity-font-name: 宋体
label-font-name: 宋体
annotation-font-name: 宋体
# 关闭各个模块生成表,目前只使用工作流基础表
idm:
enabled: false
cmmn:
enabled: false
dmn:
enabled: false
app:
enabled: false

@ -0,0 +1,121 @@
service.vgroupMapping.ruoyi-auth-group=default
service.vgroupMapping.ruoyi-system-group=default
service.vgroupMapping.ruoyi-resource-group=default
service.vgroupMapping.ruoyi-workflow-group=default
service.enableDegrade=false
service.disableGlobalTransaction=false
#Transaction storage configuration, only for the server. The file, DB, and redis configuration values are optional.
store.mode=db
store.lock.mode=db
store.session.mode=db
#Used for password encryption
#store.publicKey=
#These configurations are required if the `store mode` is `db`. If `store.mode,store.lock.mode,store.session.mode` are not equal to `db`, you can remove the configuration block.
store.db.datasource=hikari
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://127.0.0.1:3306/ry-seata?useUnicode=true&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true
store.db.user=root
store.db.password=root
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.distributedLockTable=distributed_lock
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
# redis 模式 store.mode=redis 开启 (控制台查询功能有限,不影响实际执行功能)
# store.redis.host=127.0.0.1
# store.redis.port=6379
# 最大连接数
# store.redis.maxConn=10
# 最小连接数
# store.redis.minConn=1
# store.redis.database=0
# store.redis.password=
# store.redis.queryLimit=100
#Transaction rule configuration, only for the server
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
server.distributedLockExpireTime=10000
server.xaerNotaRetryTimeout=60000
server.session.branchAsyncQueueSize=5000
server.session.enableBranchAsyncRemove=false
#Transaction rule configuration, only for the client
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=true
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.rm.sagaJsonParser=fastjson
client.rm.tccActionInterceptorOrder=-2147482648
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
client.tm.interceptorOrder=-2147482648
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k
#For TCC transaction mode
tcc.fence.logTableName=tcc_fence_log
tcc.fence.cleanPeriod=1h
#Log rule configuration, for client and server
log.exceptionRate=100
#Metrics configuration, only for the server
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
#For details about configuration items, see https://seata.io/zh-cn/docs/user/configurations.html
#Transport configuration, for client and server
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableTmClientBatchSendRequest=false
transport.enableRmClientBatchSendRequest=true
transport.enableTcServerBatchSendResponse=false
transport.rpcRmRequestTimeout=30000
transport.rpcTmRequestTimeout=30000
transport.rpcTcRequestTimeout=30000
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
transport.serialization=seata
transport.compressor=none

@ -0,0 +1,26 @@
[
{
"resource": "ruoyi-auth",
"count": 500,
"grade": 1,
"limitApp": "default",
"strategy": 0,
"controlBehavior": 0
},
{
"resource": "ruoyi-system",
"count": 1000,
"grade": 1,
"limitApp": "default",
"strategy": 0,
"controlBehavior": 0
},
{
"resource": "ruoyi-resource",
"count": 500,
"grade": 1,
"limitApp": "default",
"strategy": 0,
"controlBehavior": 0
}
]

@ -0,0 +1,45 @@
version: '3'
services:
# 此镜像仅用于测试 正式环境需自行安装数据库
# SID: XE user: system password: oracle
oracle:
image: tekintian/oracle12c:latest
container_name: oracle
environment:
# 时区上海
TZ: Asia/Shanghai
DBCA_TOTAL_MEMORY: 16192
ports:
- "18080:8080"
- "1521:1521"
volumes:
# 数据挂载
- "/docker/oracle/data:/u01/app/oracle"
network_mode: "host"
postgres:
image: postgres:14.2
container_name: postgres
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: root
POSTGRES_DB: postgres
ports:
- "5432:5432"
volumes:
- /docker/postgres/data:/var/lib/postgresql/data
network_mode: "host"
postgres13:
image: postgres:13.6
container_name: postgres13
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: root
POSTGRES_DB: postgres
ports:
- "5433:5432"
volumes:
- /docker/postgres13/data:/var/lib/postgresql/data
network_mode: "host"

@ -0,0 +1,524 @@
version: '3'
services:
mysql:
image: mysql:8.0.33
container_name: mysql
environment:
# 时区上海
TZ: Asia/Shanghai
# root 密码
MYSQL_ROOT_PASSWORD: ruoyi123
# 初始化数据库
MYSQL_DATABASE: ry-cloud
ports:
- "3306:3306"
volumes:
# 数据挂载
- /docker/mysql/data/:/var/lib/mysql/
# 配置挂载
- /docker/mysql/conf/:/etc/mysql/conf.d/
command:
# 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
privileged: true
network_mode: "host"
nacos:
image: ruoyi/ruoyi-nacos:2.2.2
container_name: nacos
ports:
- "8848:8848"
- "9848:9848"
- "9849:9849"
environment:
TZ: Asia/Shanghai
JAVA_OPTS: "-Xms256m -Xmx512m"
volumes:
# 日志目录 注意集群模式下 日志目录不能一致 需要区分例如 nacos1 nacos2
- /docker/nacos/logs/:/root/nacos/logs
# 集群配置文件 集群所有nacos都必须使用此文件
- /docker/nacos/conf/cluster.conf:/root/nacos/conf/cluster.conf
network_mode: "host"
redis:
image: redis:6.2.12
container_name: redis
ports:
- "6379:6379"
environment:
# 时区上海
TZ: Asia/Shanghai
volumes:
# 配置文件
- /docker/redis/conf:/redis/config
# 数据文件
- /docker/redis/data/:/redis/data/
command: "redis-server /redis/config/redis.conf"
privileged: true
network_mode: "host"
minio:
image: minio/minio:RELEASE.2023-03-24T21-41-23Z
container_name: minio
ports:
# api 端口
- "9000:9000"
# 控制台端口
- "9001:9001"
environment:
# 时区上海
TZ: Asia/Shanghai
# 管理后台用户名
MINIO_ROOT_USER: ruoyi
# 管理后台密码最小8个字符
MINIO_ROOT_PASSWORD: ruoyi123
# https需要指定域名
#MINIO_SERVER_URL: "https://xxx.com:9000"
#MINIO_BROWSER_REDIRECT_URL: "https://xxx.com:9001"
# 开启压缩 on 开启 off 关闭
MINIO_COMPRESS: "off"
# 扩展名 .pdf,.doc 为空 所有类型均压缩
MINIO_COMPRESS_EXTENSIONS: ""
# mime 类型 application/pdf 为空 所有类型均压缩
MINIO_COMPRESS_MIME_TYPES: ""
volumes:
# 映射当前目录下的data目录至容器内/data目录
- /docker/minio/data:/data
# 映射配置目录
- /docker/minio/config:/root/.minio/
command: server --address ':9000' --console-address ':9001' /data # 指定容器中的目录 /data
privileged: true
network_mode: "host"
seata-server:
image: ruoyi/ruoyi-seata-server:2.2.2
container_name: seata-server
ports:
- "7091:7091"
- "8091:8091"
environment:
TZ: Asia/Shanghai
# 注意 此处ip如果是外网使用 要改为外网ip
# SEATA_IP: 127.0.0.1
SEATA_PORT: 8091
volumes:
- /docker/ruoyi-seata-server/logs/:/ruoyi/seata-server/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
nginx-web:
image: nginx:1.22.1
container_name: nginx-web
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "80:80"
- "443:443"
volumes:
# 证书映射
- /docker/nginx/cert:/etc/nginx/cert
# 配置文件映射
- /docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
# 页面目录
- /docker/nginx/html:/usr/share/nginx/html
# 日志目录
- /docker/nginx/log:/var/log/nginx
privileged: true
network_mode: "host"
sentinel:
image: ruoyi/ruoyi-sentinel-dashboard:2.2.2
container_name: sentinel
environment:
TZ: Asia/Shanghai
ports:
- "8718:8718"
volumes:
# 配置文件
- /docker/ruoyi-sentinel-dashboard/logs/:/ruoyi/sentinel-dashboard/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
restart: always
network_mode: "host"
ruoyi-monitor:
image: ruoyi/ruoyi-monitor:2.2.2
container_name: ruoyi-monitor
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9100:9100"
volumes:
# 配置文件
- /docker/ruoyi-monitor/logs/:/ruoyi/monitor/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-snailjob-server:
image: ruoyi/ruoyi-snailjob-server:2.2.2
container_name: ruoyi-snailjob-server
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "8800:8800"
- "17888:17888"
volumes:
- /docker/snailjob/logs/:/ruoyi/snailjob/logs
privileged: true
network_mode: "host"
ruoyi-gateway:
image: ruoyi/ruoyi-gateway:2.2.2
container_name: ruoyi-gateway
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "8080:8080"
volumes:
# 配置文件
- /docker/ruoyi-gateway/logs/:/ruoyi/gateway/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-auth:
image: ruoyi/ruoyi-auth:2.2.2
container_name: ruoyi-auth
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9210:9210"
volumes:
# 配置文件
- /docker/ruoyi-auth/logs/:/ruoyi/auth/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-system:
image: ruoyi/ruoyi-system:2.2.2
container_name: ruoyi-system
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9201:9201"
volumes:
# 配置文件
- /docker/ruoyi-system/logs/:/ruoyi/system/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-gen:
image: ruoyi/ruoyi-gen:2.2.2
container_name: ruoyi-gen
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9202:9202"
volumes:
# 配置文件
- /docker/ruoyi-gen/logs/:/ruoyi/gen/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-job:
image: ruoyi/ruoyi-job:2.2.2
container_name: ruoyi-job
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9203:9203"
volumes:
# 配置文件
- /docker/ruoyi-job/logs/:/ruoyi/job/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-resource:
image: ruoyi/ruoyi-resource:2.2.2
container_name: ruoyi-resource
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9204:9204"
volumes:
# 配置文件
- /docker/ruoyi-resource/logs/:/ruoyi/resource/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-workflow:
image: ruoyi/ruoyi-workflow:2.2.2
container_name: ruoyi-workflow
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9205:9205"
volumes:
# 配置文件
- /docker/ruoyi-workflow/logs/:/ruoyi/workflow/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
#################################################################################################
#################################### 以下为扩展根据需求搭建 #########################################
#################################################################################################
elasticsearch:
image: elasticsearch:7.17.6
container_name: elasticsearch
ports:
- "9200:9200"
- "9300:9300"
environment:
# 设置集群名称
cluster.name: elasticsearch
# 以单一节点模式启动
discovery.type: single-node
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- /docker/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins
- /docker/elk/elasticsearch/data:/usr/share/elasticsearch/data
- /docker/elk/elasticsearch/logs:/usr/share/elasticsearch/logs
network_mode: "host"
kibana:
image: kibana:7.17.6
container_name: kibana
ports:
- "5601:5601"
depends_on:
# kibana在elasticsearch启动之后再启动
- elasticsearch
environment:
#设置系统语言文中文
I18N_LOCALE: zh-CN
# 访问域名
# SERVER_PUBLICBASEURL: https://kibana.cloud.com
volumes:
- /docker/elk/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
network_mode: "host"
logstash:
image: logstash:7.17.6
container_name: logstash
ports:
- "4560:4560"
volumes:
- /docker/elk/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- /docker/elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
depends_on:
- elasticsearch
network_mode: "host"
rmqnamesrv:
image: apache/rocketmq:5.2.0
container_name: rmqnamesrv
ports:
- "9876:9876"
environment:
JAVA_OPT: -server -Xms512m -Xmx512m
command: sh mqnamesrv
volumes:
- /docker/rocketmq/namesrv/logs:/home/rocketmq/logs/rocketmqlogs
network_mode: "host"
rmqbroker1:
image: apache/rocketmq:5.2.0
container_name: rmqbroker1
ports:
- "10911:10911"
- "10909:10909"
- "10912:10912"
environment:
JAVA_OPT: -server -Xms512M -Xmx512M
NAMESRV_ADDR: 127.0.0.1:9876
# --enable-proxy 开启broker与proxy共用模式 生产部署建议将proxy单独部署
command: sh mqbroker --enable-proxy -c /home/rocketmq/rocketmq-5.2.0/conf/broker.conf
depends_on:
- rmqnamesrv
volumes:
- /docker/rocketmq/broker1/conf/broker.conf:/home/rocketmq/rocketmq-5.2.0/conf/broker.conf
- /docker/rocketmq/broker1/logs:/home/rocketmq/logs/rocketmqlogs
- /docker/rocketmq/broker1/store:/home/rocketmq/store
privileged: true
network_mode: "host"
rmqconsole:
image: apacherocketmq/rocketmq-dashboard:latest
container_name: rmqconsole
ports:
- "19876:19876"
environment:
JAVA_OPTS: -Dserver.port=19876 -Drocketmq.namesrv.addr=127.0.0.1:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false
depends_on:
- rmqnamesrv
network_mode: "host"
rabbitmq:
image: rabbitmq:3.13.3
container_name: rabbitmq
build:
context: ./rabbitmq
environment:
RABBITMQ_DEFAULT_USER: ruoyi
RABBITMQ_DEFAULT_PASS: ruoyi123
ports:
- "15672:15672" # 管理界面端口
- "5672:5672" # api 端口
volumes:
- /docker/rabbitmq/log:/var/log/rabbitmq
- /docker/rabbitmq/data:/var/lib/rabbitmq
network_mode: "host"
zookeeper:
image: 'bitnami/zookeeper:3.8.0'
container_name: zookeeper
ports:
- "2181:2181"
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_SERVER_ID: 1
ZOO_PORT_NUMBER: 2181
# 自带的控制台 一般用不上可自行开启
ZOO_ENABLE_ADMIN_SERVER: "no"
# 自带控制台的端口
ZOO_ADMIN_SERVER_PORT_NUMBER: 8080
network_mode: "host"
kafka:
image: 'bitnami/kafka:3.6.2'
container_name: kafka
ports:
- "9092:9092"
environment:
TZ: Asia/Shanghai
# 更多变量 查看文档 https://github.com/bitnami/bitnami-docker-kafka/blob/master/README.md
KAFKA_BROKER_ID: 1
# 监听端口
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092
# 实际访问ip 本地用 127 内网用 192 外网用 外网ip
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.31.165:9092
KAFKA_CFG_ZOOKEEPER_CONNECT: 127.0.0.1:2181
ALLOW_PLAINTEXT_LISTENER: "yes"
volumes:
- /docker/kafka/data:/bitnami/kafka/data
depends_on:
- zookeeper
network_mode: "host"
kafka-manager:
image: sheepkiller/kafka-manager:latest
container_name: kafka-manager
ports:
- "19092:19092"
environment:
ZK_HOSTS: 127.0.0.1:2181
APPLICATION_SECRET: letmein
KAFKA_MANAGER_USERNAME: ruoyi
KAFKA_MANAGER_PASSWORD: ruoyi123
KM_ARGS: -Dhttp.port=19092
depends_on:
- kafka
network_mode: "host"
sky-oap:
image: apache/skywalking-oap-server:9.7.0
container_name: sky-oap
ports:
- "11800:11800"
- "12800:12800"
environment:
JAVA_OPTS: -Xms1G -Xmx2G
#记录数据的有效期,单位天
SW_CORE_RECORD_DATA_TTL: 7
#分析指标数据的有效期,单位天
SW_CORE_METRICS_DATA_TTL: 7
SW_STORAGE: elasticsearch
SW_STORAGE_ES_CLUSTER_NODES: 127.0.0.1:9200
TZ: Asia/Shanghai
network_mode: "host"
sky-ui:
image: apache/skywalking-ui:9.7.0
container_name: sky-ui
ports:
- "18080:18080"
environment:
SW_SERVER_PORT: 18080
SW_OAP_ADDRESS: http://127.0.0.1:12800
TZ: Asia/Shanghai
depends_on:
- sky-oap
network_mode: "host"
prometheus:
image: prom/prometheus:v2.40.1
container_name: prometheus
ports:
- "9090:9090"
volumes:
- /docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
network_mode: "host"
grafana:
image: grafana/grafana:9.2.4
container_name: grafana
environment:
TZ: Asia/Shanghai
# 服务地址 用于指定外网ip或域名
GF_SERVER_ROOT_URL: ""
# admin 管理员密码
GF_SECURITY_ADMIN_PASSWORD: 123456
ports:
- "3000:3000"
volumes:
- /docker/grafana/grafana.ini:/etc/grafana/grafana.ini
- /docker/grafana:/var/lib/grafana
network_mode: "host"
shardingproxy:
image: apache/shardingsphere-proxy:5.4.0
container_name: shardingsphere-proxy
command: server /data
ports:
- "3307:3307"
volumes:
- /docker/shardingproxy/conf:/opt/shardingsphere-proxy/conf
- /docker/shardingproxy/ext-lib:/opt/shardingsphere-proxy/ext-lib
environment:
- JVM_OPTS="-Djava.awt.headless=true"
network_mode: "host"

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/elk/elasticsearch/data` 赋予读写权限 否则将无法写入数据

@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/elk/elasticsearch/logs` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置</comment>
<!--用户可以在这里配置自己的扩展字典 -->
<entry key="ext_dict"></entry>
<!--用户可以在这里配置自己的扩展停止词字典-->
<entry key="ext_stopwords"></entry>
<!--用户可以在这里配置远程扩展字典 -->
<!-- <entry key="remote_ext_dict">words_location</entry> -->
<!--用户可以在这里配置远程扩展停止词字典-->
<!-- <entry key="remote_ext_stopwords">words_location</entry> -->
</properties>

@ -0,0 +1,31 @@
也
使

@ -0,0 +1,25 @@
使

@ -0,0 +1,316 @@
世纪
位数
像素
克拉
公亩
公克
公分
公升
公尺
公担
公斤
公里
公顷
分钟
分米
加仑
千克
千米
厘米
周年
小时
平方
平方公尺
平方公里
平方分米
平方厘米
平方码
平方米
平方英寸
平方英尺
平方英里
平米
年代
年级
月份
毫升
毫米
毫克
海里
点钟
盎司
秒钟
立方公尺
立方分米
立方厘米
立方码
立方米
立方英寸
立方英尺
英亩
英寸
英尺
英里
阶段

@ -0,0 +1,33 @@
a
an
and
are
as
at
be
but
by
for
if
in
into
is
it
no
not
of
on
or
such
that
the
their
then
there
these
they
this
to
was
will
with

@ -0,0 +1,37 @@
斯基
维奇
诺夫

@ -0,0 +1,131 @@
丁
万俟
上官
东方
令狐
仲孙
公冶
公孙
公羊
单于
司徒
司空
司马
夏侯
太叔
宇文
宗政
尉迟
慕容
欧阳
淳于
澹台
濮阳
申屠
皇甫
诸葛
赫连
轩辕
钟离
长孙
闻人
闾丘
鲜于

@ -0,0 +1,56 @@
# Elasticsearch plugin descriptor file
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
#
# A plugin can be 'site', 'jvm', or both.
#
### example site plugin for "foo":
#
# foo.zip <-- zip file for the plugin, with this structure:
# _site/ <-- the contents that will be served
# plugin-descriptor.properties <-- example contents below:
#
# site=true
# description=My cool plugin
# version=1.0
#
### example jvm plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#
# jvm=true
# classname=foo.bar.BazPlugin
# description=My cool plugin
# version=2.0.0-rc1
# elasticsearch.version=2.0
# java.version=1.7
#
### mandatory elements for all plugins:
#
# 'description': simple summary of the plugin
description=IK Analyzer for Elasticsearch
#
# 'version': plugin's version
version=7.17.6
#
# 'name': the plugin name
name=analysis-ik
#
# 'classname': the name of the class to load, fully-qualified.
classname=org.elasticsearch.plugin.analysis.ik.AnalysisIkPlugin
#
# 'java.version' version of java the code is built against
# use the system property java.specification.version
# version string must be a sequence of nonnegative decimal integers
# separated by "."'s and may have leading zeros
java.version=1.8
#
# 'elasticsearch.version' version of elasticsearch compiled against
# You will have to release a new version of the plugin for each new
# elasticsearch release. This version is checked when the plugin
# is loaded so Elasticsearch will refuse to start in the presence of
# plugins with the incorrect elasticsearch.version.
elasticsearch.version=7.17.6

@ -0,0 +1,4 @@
grant {
// needed because of the hot reload functionality
permission java.net.SocketPermission "*", "connect,resolve";
};

@ -0,0 +1,4 @@
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://127.0.0.1:9200" ]
monitoring.ui.container.elasticsearch.enabled: true

@ -0,0 +1,2 @@
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://127.0.0.1:9200" ]

@ -0,0 +1,14 @@
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
}
}
output {
elasticsearch {
hosts => "127.0.0.1:9200"
index => "%{[spring.application.name]}-%{+YYYY.MM.dd}"
}
}

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/grafana` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,849 @@
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
;data = /var/lib/grafana
# Temporary files in `data` directory older than given duration will be removed
;temp_data_lifetime = 24h
# Directory where grafana can store logs
;logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
;provisioning = conf/provisioning
#################################### Server ####################################
[server]
# Protocol (http, https, h2, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
;root_url = %(protocol)s://%(domain)s:%(http_port)s/
# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
;serve_from_sub_path = false
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
;ca_cert_path =
;client_key_path =
;client_cert_path =
;server_cert_name =
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
;log_queries =
# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
;cache_mode = private
#################################### Cache server #############################
[remote_cache]
# Either "redis", "memcached" or "database" default is "database"
;type = database
# cache connectionstring options
# database: will use Grafana primary database.
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
# memcache: 127.0.0.1:11211
;connstr =
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
# How long the data proxy waits before timing out, default is 30 seconds.
# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
;timeout = 30
# How many seconds the data proxy waits before sending a keepalive probe request.
;keep_alive_seconds = 30
# How many seconds the data proxy waits for a successful TLS Handshake before timing out.
;tls_handshake_timeout_seconds = 10
# How many seconds the data proxy will wait for a server's first response headers after
# fully writing the request headers if the request has an "Expect: 100-continue"
# header. A value of 0 will result in the body being sent immediately, without
# waiting for the server to approve.
;expect_continue_timeout_seconds = 1
# The maximum number of idle connections that Grafana will keep alive.
;max_idle_connections = 100
# How many seconds the data proxy keeps an idle connection open before timing out.
;idle_conn_timeout_seconds = 90
# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false.
;send_user_header = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new versions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
# Google Tag Manager ID, only enabled if you specify an id here
;google_tag_manager_id =
#################################### Security ####################################
[security]
# disable creation of admin user on first start of grafana
;disable_initial_admin_creation = false
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
# set to true if you host Grafana behind HTTPS. default is false.
;cookie_secure = false
# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
;cookie_samesite = lax
# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false.
;allow_embedding = false
# Set to true if you want to enable http strict transport security (HSTS) response header.
# This is only sent when HTTPS is enabled in this configuration.
# HSTS tells browsers that the site should only be accessed using HTTPS.
;strict_transport_security = false
# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled.
;strict_transport_security_max_age_seconds = 86400
# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled.
;strict_transport_security_preload = false
# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled.
;strict_transport_security_subdomains = false
# Set to true to enable the X-Content-Type-Options response header.
# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised
# in the Content-Type headers should not be changed and be followed.
;x_content_type_options = true
# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading
# when they detect reflected cross-site scripting (XSS) attacks.
;x_xss_protection = true
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for
# creating and deleting snapshots.
;public_mode = false
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_refresh_interval = 5s
# Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
;default_home_dashboard_path =
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Set this value to automatically add new users to the provided organization (if auto_assign_org above is set to true)
;auto_assign_org_id = 1
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Require email validation before sign up completes
;verify_email_enabled = false
# Background text for the user field on the login page
;login_hint = email or username
;password_hint = password
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
# Editors can administrate dashboard, folders and teams they create
;editors_can_admin = false
# The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes).
;user_invite_max_lifetime_duration = 24h
[auth]
# Login cookie name
;login_cookie_name = grafana_session
# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation.
;login_maximum_inactive_lifetime_duration =
# The maximum lifetime (duration) an authenticated user can be logged in since login time before being required to login. Default is 30 days (30d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month).
;login_maximum_lifetime_duration =
# How often should auth tokens be rotated for authenticated users when being active. The default is each 10 minutes.
;token_rotation_interval_minutes = 10
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
# Set to true to attempt login with OAuth automatically, skipping the login screen.
# This setting is ignored if multiple OAuth providers are configured.
;oauth_auto_login = false
# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
;oauth_state_cookie_max_age = 600
# limit of api_key seconds to live before expiration
;api_key_max_seconds_to_live = -1
# Set to true to enable SigV4 authentication option for HTTP-based datasources.
;sigv4_auth_enabled = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
# mask the Grafana version number for unauthenticated users
;hide_version = false
#################################### GitHub Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;allowed_domains =
;team_ids =
;allowed_organizations =
#################################### GitLab Auth #########################
[auth.gitlab]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = api
;auth_url = https://gitlab.com/oauth/authorize
;token_url = https://gitlab.com/oauth/token
;api_url = https://gitlab.com/api/v4
;allowed_domains =
;allowed_groups =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
;hosted_domain =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Azure AD OAuth #######################
[auth.azuread]
;name = Azure AD
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = openid email profile
;auth_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/authorize
;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token
;allowed_domains =
;allowed_groups =
#################################### Okta OAuth #######################
[auth.okta]
;name = Okta
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = openid profile email groups
;auth_url = https://<tenant-id>.okta.com/oauth2/v1/authorize
;token_url = https://<tenant-id>.okta.com/oauth2/v1/token
;api_url = https://<tenant-id>.okta.com/oauth2/v1/userinfo
;allowed_domains =
;allowed_groups =
;role_attribute_path =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = false
;name = OAuth
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;email_attribute_name = email:primary
;email_attribute_path =
;login_attribute_path =
;id_token_attribute_name =
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;allowed_domains =
;team_ids =
;allowed_organizations =
;role_attribute_path =
;tls_skip_verify_insecure = false
;tls_client_cert =
;tls_client_key =
;tls_client_ca =
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth Proxy ##########################
[auth.proxy]
;enabled = false
;header_name = X-WEBAUTH-USER
;header_property = username
;auto_sign_up = true
;sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
;headers = Email:X-User-Email, Name:X-User-Name
# Read the auth proxy docs for details on what the setting below enables
;enable_login_token = false
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
# LDAP backround sync (Enterprise only)
# At 1 am every day
;sync_cron = "0 0 1 * * *"
;active_sync_enabled = true
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
# SMTP startTLS policy (defaults to 'OpportunisticStartTLS')
;startTLS_policy = NoStartTLS
[emails]
;welcome_email_on_sign_up = false
;templates_pattern = emails/*.html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Usage Quotas ########################
[quota]
; enabled = false
#### set quotas to -1 to make unlimited. ####
# limit number of users per Org.
; org_user = 10
# limit number of dashboards per Org.
; org_dashboard = 100
# limit number of data_sources per Org.
; org_data_source = 10
# limit number of api_keys per Org.
; org_api_key = 10
# limit number of orgs a user can create.
; user_org = 10
# Global limit of users.
; global_user = -1
# global limit of orgs.
; global_org = -1
# global limit of dashboards
; global_dashboard = -1
# global limit of api_keys
; global_api_key = -1
# global limit on number of logged in users.
; global_session = -1
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
;error_or_timeout = alerting
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
;nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
# Default setting for alert calculation timeout. Default value is 30
;evaluation_timeout_seconds = 30
# Default setting for alert notification timeout. Default value is 30
;notification_timeout_seconds = 30
# Default setting for max attempts to sending alert notifications. Default value is 3
;max_attempts = 3
# Makes it possible to enforce a minimal interval between evaluations, to reduce load on the backend
;min_interval_seconds = 1
# Configures for how long alert annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_annotation_age =
# Configures max number of alert annotations that Grafana stores. Default value is 0, which keeps all alert annotations.
;max_annotations_to_keep =
#################################### Annotations #########################
[annotations.dashboard]
# Dashboard annotations means that annotations are associated with the dashboard they are created on.
# Configures how long dashboard annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of dashboard annotations that Grafana stores. Default value is 0, which keeps all dashboard annotations.
;max_annotations_to_keep =
[annotations.api]
# API annotations means that the annotations have been created using the API without any
# association with a dashboard.
# Configures how long Grafana stores API annotations. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of API annotations that Grafana keeps. Default value is 0, which keeps all API annotations.
;max_annotations_to_keep =
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = true
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Graphite Publish interval
;interval_seconds = 10
# Disable total stats (stat_totals_*) metrics to be generated
;disable_total_stats = false
#If both are set, basic auth will be required for the metrics endpoint.
; basic_auth_username =
; basic_auth_password =
# Metrics environment info adds dimensions to the `grafana_environment_info` metric, which
# can expose more information about the Grafana instance.
[metrics.environment_info]
#exampleLabel1 = exampleValue1
#exampleLabel2 = exampleValue2
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Grafana.com integration ##########################
# Url used to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
# sampling_server_url is the URL of a sampling manager providing a sampling strategy.
;sampling_server_url =
# Whether or not to use Zipkin propagation (x-b3- HTTP headers).
;zipkin_propagation = false
# Setting this to true disables shared RPC spans.
# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
;disable_shared_zipkin_spans = false
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
;provider =
[external_image_storage.s3]
;endpoint =
;path_style_access =
;bucket =
;region =
;path =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =
;path =
[external_image_storage.azure_blob]
;account_name =
;account_key =
;container_name =
[external_image_storage.local]
# does not require any configuration
[rendering]
# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer.
# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable Grafana to render panels and dashboards to PNG-images using HTTP requests to an external service.
;server_url =
# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/.
;callback_url =
# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
# which this setting can help protect against by only allowing a certain amount of concurrent requests.
;concurrent_render_request_limit = 30
[panels]
# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
;disable_sanitize_html = false
[plugins]
;enable_alpha = false
;app_tls_skip_verify_insecure = false
# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
;allow_loading_unsigned_plugins =
;marketplace_url = https://grafana.com/grafana/plugins/
#################################### Grafana Image Renderer Plugin ##########################
[plugin.grafana-image-renderer]
# Instruct headless browser instance to use a default timezone when not provided by Grafana, e.g. when rendering panel image of alert.
# See ICUs metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported
# timezone IDs. Fallbacks to TZ environment variable if not set.
;rendering_timezone =
# Instruct headless browser instance to use a default language when not provided by Grafana, e.g. when rendering panel image of alert.
# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'.
;rendering_language =
# Instruct headless browser instance to use a default device scale factor when not provided by Grafana, e.g. when rendering panel image of alert.
# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image.
;rendering_viewport_device_scale_factor =
# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to
# the security risk it's not recommended to ignore HTTPS errors.
;rendering_ignore_https_errors =
# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will
# only capture and log error messages. When enabled, debug messages are captured and logged as well.
# For the verbose information to be included in the Grafana server log you have to adjust the rendering log level to debug, configure
# [log].filter = rendering:debug.
;rendering_verbose_logging =
# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service.
# Default is false. This can be useful to enable (true) when troubleshooting.
;rendering_dumpio =
# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found
# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character.
;rendering_args =
# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium.
# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not
# compatible with the plugin.
;rendering_chrome_bin =
# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request.
# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently.
# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
;rendering_mode =
# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
# and will cluster using browser instances.
# Mode 'context' will cluster using incognito pages.
;rendering_clustering_mode =
# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently..
;rendering_clustering_max_concurrency =
# Limit the maximum viewport width, height and device scale factor that can be requested.
;rendering_viewport_max_width =
;rendering_viewport_max_height =
;rendering_viewport_max_device_scale_factor =
# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign
# a port not in use.
;grpc_host =
;grpc_port =
[enterprise]
# Path to a valid Grafana Enterprise license.jwt file
;license_path =
[feature_toggles]
# enable features, separated by spaces
;enable =
[date_formats]
# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
# Default system date format used in time range picker and other places where full time is displayed
;full_date = YYYY-MM-DD HH:mm:ss
# Used by graph and other places where we only show small intervals
;interval_second = HH:mm:ss
;interval_minute = HH:mm
;interval_hour = MM/DD HH:mm
;interval_day = MM/DD
;interval_month = YYYY-MM
;interval_year = YYYY
# Experimental feature
;use_browser_locale = false
# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
;default_timezone = browser

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/kafka/data` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,4 @@
# 集群配置文件 ip+端口
127.0.0.1:8848
127.0.0.1:8848
127.0.0.1:8848

@ -0,0 +1,89 @@
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
# 限制body大小
client_max_body_size 100m;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
upstream server {
ip_hash;
# gateway 地址
server 127.0.0.1:8080;
# server 127.0.0.1:8081;
}
server {
listen 80;
server_name localhost;
# https配置参考 start
#listen 443 ssl;
# 证书直接存放 /docker/nginx/cert/ 目录下即可 更改证书名称即可 无需更改证书路径
#ssl on;
#ssl_certificate /etc/nginx/cert/xxx.local.crt; # /etc/nginx/cert/ 为docker映射路径 不允许更改
#ssl_certificate_key /etc/nginx/cert/xxx.local.key; # /etc/nginx/cert/ 为docker映射路径 不允许更改
#ssl_session_timeout 5m;
#ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
#ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
#ssl_prefer_server_ciphers on;
# https配置参考 end
# 演示环境配置 拦截除 GET POST 之外的所有请求
# if ($request_method !~* GET|POST) {
# rewrite ^/(.*)$ /403;
# }
# location = /403 {
# default_type application/json;
# return 200 '{"msg":"演示模式,不允许操作","code":500}';
# }
# 限制外网访问内网 actuator 相关路径
location ~ ^(/[^/]*)?/actuator.*(/.*)?$ {
return 403;
}
location / {
root /usr/share/nginx/html; # docker映射路径 不允许更改
try_files $uri $uri/ /index.html;
index index.html index.htm;
}
location /prod-api/ {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 86400s;
# sse websocket参数
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_buffering off;
proxy_cache off;
proxy_pass http://server/;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}

@ -0,0 +1,42 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: 'Prometheus'
static_configs:
- targets: ['127.0.0.1:9090']
- job_name: 'Grafana'
static_configs:
- targets: ['127.0.0.1:3000']
- job_name: 'Nacos'
metrics_path: '/nacos/actuator/prometheus'
static_configs:
- targets: ['127.0.0.1:8848']
- job_name: RuoYi-Cloud-Plus
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /actuator/prometheus
scheme: http
http_sd_configs:
- url: 'http://127.0.0.1:9100/actuator/prometheus/sd'

@ -0,0 +1,11 @@
FROM rabbitmq:3.13.3-management
LABEL maintainer="Lion Li"
ADD ./rabbitmq_delayed_message_exchange-3.13.0.ez /plugins
RUN rabbitmq-plugins enable rabbitmq_delayed_message_exchange
ENTRYPOINT ["rabbitmq-server"]

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/rabbitmq/data` 赋予读写权限 否则将无法写入数据

@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rabbitmq/data` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,28 @@
# redis 密码
requirepass ruoyi123
# key 监听器配置
# notify-keyspace-events Ex
# 配置持久化文件存储路径
dir /redis/data
# 配置rdb
# 15分钟内有至少1个key被更改则进行快照
save 900 1
# 5分钟内有至少10个key被更改则进行快照
save 300 10
# 1分钟内有至少10000个key被更改则进行快照
save 60 10000
# 开启压缩
rdbcompression yes
# rdb文件名 用默认的即可
dbfilename dump.rdb
# 开启aof
appendonly yes
# 文件名
appendfilename "appendonly.aof"
# 持久化策略,no:不同步,everysec:每秒一次,always:总是同步,速度比较慢
# appendfsync always
appendfsync everysec
# appendfsync no

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/redis/data` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,26 @@
# 所属集群名称,如果节点较多可以配置多个
brokerClusterName = DefaultCluster
#broker名称master和slave使用相同的名称表明他们的主从关系
brokerName = broker1
#0表示Master大于0表示不同的slave
brokerId = 0
#表示几点做消息删除动作默认是凌晨4点
deleteWhen = 04
#在磁盘上保留消息的时长,单位是小时
fileReservedTime = 48
#有三个值SYNC_MASTERASYNC_MASTERSLAVE同步和异步表示Master和Slave之间同步数据的机制
brokerRole = ASYNC_MASTER
#刷盘策略取值为ASYNC_FLUSHSYNC_FLUSH表示同步刷盘和异步刷盘SYNC_FLUSH消息写入磁盘后才返回成功状态ASYNC_FLUSH不需要
flushDiskType = ASYNC_FLUSH
# 设置broker节点所在服务器的ip地址**这个非常重要,主从模式下从节点会根据主节点的brokerIP2来同步数据如果不配置主从无法同步brokerIP1设置为自己外网能访问的ip服务器双网卡情况下必须配置比如阿里云这种主节点需要配置ip1和ip2从节点只需要配置ip1即可
# 此ip由使用环境决定 本机使用 127 局域网使用 192 外网使用 外网ip
brokerIP1 = 192.168.31.165
#Broker 对外服务的监听端口,
listenPort = 10911
#是否允许Broker自动创建Topic
autoCreateTopicEnable = true
#是否允许 Broker 自动创建订阅组
autoCreateSubscriptionGroup = true
#linux开启epoll
useEpollNativeSelector = true

@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rocketmq/broker1/logs` 赋予读写权限 否则将无法写入数据

@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/rocketmq/broker1/store` 赋予读写权限 否则将无法写入数据

@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rocketmq/namesrv/logs` 赋予读写权限 否则将无法写入数据

@ -0,0 +1,119 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of encrypt rule.
#
######################################################################################################
#
#databaseName: encrypt_db
#
#dataSources:
# ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !ENCRYPT
# encryptors:
# aes_encryptor:
# type: AES
# props:
# aes-key-value: 123456abc
# rc4_encryptor:
# type: RC4
# props:
# rc4-key-value: 123456abc
# tables:
# t_encrypt:
# columns:
# user_id:
# cipher:
# name: user_cipher
# encryptorName: aes_encryptor
# order_id:
# cipher:
# name: order_encrypt
# encryptorName: rc4_encryptor
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: encrypt_db
#
#dataSources:
# ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !ENCRYPT
# encryptors:
# aes_encryptor:
# type: AES
# props:
# aes-key-value: 123456abc
# rc4_encryptor:
# type: RC4
# props:
# rc4-key-value: 123456abc
# tables:
# t_encrypt:
# columns:
# user_id:
# cipher:
# name: user_cipher
# encryptorName: aes_encryptor
# order_id:
# cipher:
# name: order_encrypt
# encryptorName: rc4_encryptor

@ -0,0 +1,127 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of mask rule.
#
######################################################################################################
#
#databaseName: mask_db
#
#dataSources:
# ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !MASK
# tables:
# t_user:
# columns:
# password:
# maskAlgorithm: md5_mask
# email:
# maskAlgorithm: mask_before_special_chars_mask
# telephone:
# maskAlgorithm: keep_first_n_last_m_mask
#
# maskAlgorithms:
# md5_mask:
# type: MD5
# mask_before_special_chars_mask:
# type: MASK_BEFORE_SPECIAL_CHARS
# props:
# special-chars: '@'
# replace-char: '*'
# keep_first_n_last_m_mask:
# type: KEEP_FIRST_N_LAST_M
# props:
# first-n: 3
# last-m: 4
# replace-char: '*'
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: mask_db
#
#dataSources:
# ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !MASK
# tables:
# t_user:
# columns:
# password:
# maskAlgorithm: md5_mask
# email:
# maskAlgorithm: mask_before_special_chars_mask
# telephone:
# maskAlgorithm: keep_first_n_last_m_mask
#
# maskAlgorithms:
# md5_mask:
# type: MD5
# mask_before_special_chars_mask:
# type: MASK_BEFORE_SPECIAL_CHARS
# props:
# special-chars: '@'
# replace-char: '*'
# keep_first_n_last_m_mask:
# type: KEEP_FIRST_N_LAST_M
# props:
# first-n: 3
# last-m: 4
# replace-char: '*'

@ -0,0 +1,117 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of readwrite-splitting rule.
#
######################################################################################################
#
#databaseName: readwrite_splitting_db
#
#dataSources:
# primary_ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_primary_ds
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# replica_ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_replica_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# replica_ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_replica_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !READWRITE_SPLITTING
# dataSources:
# readwrite_ds:
# writeDataSourceName: primary_ds
# readDataSourceNames:
# - replica_ds_0
# - replica_ds_1
# loadBalancerName: random
# loadBalancers:
# random:
# type: RANDOM
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#databaseName: readwrite_splitting_db
#
#dataSources:
# write_ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_write_ds?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# read_ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_read_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# read_ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_read_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !READWRITE_SPLITTING
# dataSources:
# readwrite_ds:
# writeDataSourceName: write_ds
# readDataSourceNames:
# - read_ds_0
# - read_ds_1
# loadBalancerName: random
# loadBalancers:
# random:
# type: RANDOM

@ -0,0 +1,175 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of shadow rule.
#
######################################################################################################
#
#databaseName: shadow_db
#
#dataSources:
# ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# shadow_ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !SHADOW
# dataSources:
# shadowDataSource:
# productionDataSourceName: ds
# shadowDataSourceName: shadow_ds
# tables:
# t_order:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# t_order_item:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_update_match_algorithm
# - user_id_select_match_algorithm
# t_address:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# - sql_hint_algorithm
# shadowAlgorithms:
# user_id_insert_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: insert
# column: user_id
# regex: "[1]"
# user_id_update_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: update
# column: user_id
# regex: "[1]"
# user_id_select_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: select
# column: user_id
# regex: "[1]"
# sql_hint_algorithm:
# type: SQL_HINT
# props:
# foo: bar
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: shadow_db
#
#dataSources:
# ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# shadow_ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !SHADOW
# dataSources:
# shadowDataSource:
# productionDataSourceName: ds
# shadowDataSourceName: shadow_ds
# tables:
# t_order:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# t_order_item:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_update_match_algorithm
# - user_id_select_match_algorithm
# t_address:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# - sql_hint_algorithm
# shadowAlgorithms:
# user_id_insert_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: insert
# column: user_id
# regex: "[1]"
# user_id_update_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: update
# column: user_id
# regex: "[1]"
# user_id_select_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: select
# column: user_id
# regex: "[1]"
# sql_hint_algorithm:
# type: SQL_HINT
# props:
# foo: bar

@ -0,0 +1,124 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of sharding rule.
#
######################################################################################################
databaseName: data-center_db
dataSources:
ds_0:
url: jdbc:mysql://localhost:3306/data-center_0?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&autoReconnect=true&rewriteBatchedStatements=true
username: root
password: root
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
ds_1:
url: jdbc:mysql://localhost:3306/data-center_1?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&autoReconnect=true&rewriteBatchedStatements=true
username: root
password: root
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
rules:
- !SHARDING
tables: # 数据分片规则配置
t_order: # 订单逻辑表名称
actualDataNodes: ds_${0..1}.t_order_${0..1}
databaseStrategy: # 配置分库策略
standard:
shardingColumn: user_id
shardingAlgorithmName: database_user_inline
tableStrategy: # 分表策略
standard:
shardingColumn: order_id
shardingAlgorithmName: t_order_inline
keyGenerateStrategy:
column: order_id
keyGeneratorName: snowflake
# auditStrategy:
# auditorNames:
# - sharding_key_required_auditor
# allowHintDisable: true
t_order_item: # 子订单逻辑表名称
actualDataNodes: ds_${0..1}.t_order_item_${0..1}
databaseStrategy: # 配置分库策略
standard:
shardingColumn: user_id
shardingAlgorithmName: database_user_inline
tableStrategy: # 分表策略
standard:
shardingColumn: order_id
shardingAlgorithmName: t_order_item_inline
keyGenerateStrategy:
column: order_item_id
keyGeneratorName: snowflake
bindingTables: # 绑定表规则列表
- t_order,t_order_item
# defaultDatabaseStrategy:
# standard:
# shardingColumn: user_id
# shardingAlgorithmName: database_user_inline
# defaultTableStrategy:
# none:
# defaultAuditStrategy:
# auditorNames:
# - sharding_key_required_auditor
# allowHintDisable: true
# 分片算法配置
shardingAlgorithms:
database_user_inline:
type: INLINE
props:
algorithm-expression: ds_${user_id % 2}
t_order_inline: # 订单表分片算法名称
type: INLINE
props:
algorithm-expression: t_order_${order_id % 2}
allow-range-query-with-inline-sharding: true
t_order_item_inline: # 子订单表分片算法名称
type: INLINE
props:
algorithm-expression: t_order_item_${order_id % 2}
allow-range-query-with-inline-sharding: true
# 分布式序列算法配置
keyGenerators:
snowflake:
type: SNOWFLAKE
props:
worker-id: 1
# auditors:
# sharding_key_required_auditor:
# type: DML_SHARDING_CONDITIONS
# - !BROADCAST
# tables:
# - t_address

@ -0,0 +1,92 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# If you want to configure governance, authorization and proxy properties, please refer to this file.
#
######################################################################################################
# mode:
# type: Cluster
# repository:
# type: ZooKeeper
# props:
# namespace: governance_ds
# server-lists: localhost:2181
# retryIntervalMilliseconds: 500
# timeToLiveSeconds: 60
# maxRetries: 3
# operationTimeoutMilliseconds: 500
authority:
users:
- user: root@%
password: root
- user: sharding
password: sharding
privilege:
type: ALL_PERMITTED
transaction:
defaultType: XA
providerType: Atomikos
sqlParser:
sqlCommentParseEnabled: false
sqlStatementCache:
initialCapacity: 2000
maximumSize: 65535
parseTreeCache:
initialCapacity: 128
maximumSize: 1024
logging:
loggers:
- loggerName: ShardingSphere-SQL
additivity: true
level: INFO
props:
enable: false
sqlFederation:
sqlFederationEnabled: false
executionPlanCache:
initialCapacity: 2000
maximumSize: 65535
props:
system-log-level: INFO
max-connections-size-per-query: 1
kernel-executor-size: 16 # Infinite by default.
proxy-frontend-flush-threshold: 128 # The default value is 128.
# sql-show is the same as props in logger ShardingSphere-SQL, and its priority is lower than logging rule
sql-show: false
check-table-metadata-enabled: false
# Proxy backend query fetch size. A larger value may increase the memory usage of ShardingSphere Proxy.
# The default value is -1, which means set the minimum value for different JDBC drivers.
proxy-backend-query-fetch-size: -1
proxy-frontend-executor-size: 0 # Proxy frontend executor size. The default value is 0, which means let Netty decide.
proxy-frontend-max-connections: 0 # Less than or equal to 0 means no limitation.
proxy-default-port: 3307 # Proxy default port.
proxy-netty-backlog: 1024 # Proxy netty backlog.
cdc-server-port: 33071 # CDC server port
proxy-frontend-ssl-enabled: false
proxy-frontend-ssl-cipher: ''
proxy-frontend-ssl-version: TLSv1.2,TLSv1.3

@ -0,0 +1,14 @@
# 说明
项目使用 `skywalking` 官方 `agent` 探针做了精简和扩充
<br>
从官方自带的插件库 删除了项目中大概率不会用到的插件
<br>
保留了项目中可能会用到的插件
<br>
扩展了一些官方不支持的插件
<br>
插件过多会导致很严重的性能问题 建议不要用过多插件
# 扩展
项目开发中遇到一些插件包内没有的功能可以去 `skywalking` 官方下载
<br>
将下载好的插件放入 `plugins` 目录下即可

@ -0,0 +1,233 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache SkyWalking Subcomponents:
The Apache SkyWalking project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
raphw (byte-buddy) 1.14.9: http://bytebuddy.net/ , Apache 2.0
Google: grpc-java 1.53.0: https://github.com/grpc/grpc-java, Apache 2.0
Google: gson 2.8.9: https://github.com/google/gson , Apache 2.0
Google: proto-google-common-protos 2.0.1: https://github.com/googleapis/googleapis , Apache 2.0
Google: jsr305 3.0.2: http://central.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.pom , Apache 2.0
Google: guava 32.0.1: https://github.com/google/guava , Apache 2.0
netty 4.1.100: https://github.com/netty/netty/blob/4.1/LICENSE.txt, Apache 2.0
========================================================================
BSD licenses
========================================================================
The following components are provided under a BSD license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
asm 9.2:https://gitlab.ow2.org , BSD-3-Clause

@ -0,0 +1,299 @@
Apache SkyWalking
Copyright 2017-2024 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
========================================================================
grpc-java NOTICE
========================================================================
Copyright 2014, gRPC Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-----------------------------------------------------------------------
This product contains a modified portion of 'OkHttp', an open source
HTTP & SPDY client for Android and Java applications, which can be obtained
at:
* LICENSE:
* okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
* HOMEPAGE:
* https://github.com/square/okhttp
* LOCATION_IN_GRPC:
* okhttp/third_party/okhttp
This product contains a modified portion of 'Netty', an open source
networking library, which can be obtained at:
* LICENSE:
* netty/third_party/netty/LICENSE.txt (Apache License 2.0)
* HOMEPAGE:
* https://netty.io
* LOCATION_IN_GRPC:
* netty/third_party/netty
========================================================================
grpc NOTICE
========================================================================
Copyright 2014 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
========================================================================
netty NOTICE
========================================================================
The Netty Project
=================
Please visit the Netty web site for more information:
* http://netty.io/
Copyright 2014 The Netty Project
The Netty Project licenses this file to you under the Apache License,
version 2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Also, please refer to each LICENSE.<component>.txt file, which is located in
the 'license' directory of the distribution file, for the license terms of the
components that this product depends on.
-------------------------------------------------------------------------------
This product contains the extensions to Java Collections Framework which has
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
* LICENSE:
* license/LICENSE.jsr166y.txt (Public Domain)
* HOMEPAGE:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
This product contains a modified version of Robert Harder's Public Domain
Base64 Encoder and Decoder, which can be obtained at:
* LICENSE:
* license/LICENSE.base64.txt (Public Domain)
* HOMEPAGE:
* http://iharder.sourceforge.net/current/java/base64/
This product contains a modified portion of 'Webbit', an event based
WebSocket and HTTP server, which can be obtained at:
* LICENSE:
* license/LICENSE.webbit.txt (BSD License)
* HOMEPAGE:
* https://github.com/joewalnes/webbit
This product contains a modified portion of 'SLF4J', a simple logging
facade for Java, which can be obtained at:
* LICENSE:
* license/LICENSE.slf4j.txt (MIT License)
* HOMEPAGE:
* http://www.slf4j.org/
This product contains a modified portion of 'Apache Harmony', an open source
Java SE, which can be obtained at:
* NOTICE:
* license/NOTICE.harmony.txt
* LICENSE:
* license/LICENSE.harmony.txt (Apache License 2.0)
* HOMEPAGE:
* http://archive.apache.org/dist/harmony/
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
and decompression library written by Matthew J. Francis. It can be obtained at:
* LICENSE:
* license/LICENSE.jbzip2.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jbzip2/
This product contains a modified portion of 'libdivsufsort', a C API library to construct
the suffix array and the Burrows-Wheeler transformed string for any input string of
a constant-size alphabet written by Yuta Mori. It can be obtained at:
* LICENSE:
* license/LICENSE.libdivsufsort.txt (MIT License)
* HOMEPAGE:
* https://github.com/y-256/libdivsufsort
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
which can be obtained at:
* LICENSE:
* license/LICENSE.jctools.txt (ASL2 License)
* HOMEPAGE:
* https://github.com/JCTools/JCTools
This product optionally depends on 'JZlib', a re-implementation of zlib in
pure Java, which can be obtained at:
* LICENSE:
* license/LICENSE.jzlib.txt (BSD style License)
* HOMEPAGE:
* http://www.jcraft.com/jzlib/
This product optionally depends on 'Compress-LZF', a Java library for encoding and
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
* LICENSE:
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/ning/compress
This product optionally depends on 'lz4', a LZ4 Java compression
and decompression library written by Adrien Grand. It can be obtained at:
* LICENSE:
* license/LICENSE.lz4.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jpountz/lz4-java
This product optionally depends on 'lzma-java', a LZMA Java compression
and decompression library, which can be obtained at:
* LICENSE:
* license/LICENSE.lzma-java.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jponge/lzma-java
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
and decompression library written by William Kinney. It can be obtained at:
* LICENSE:
* license/LICENSE.jfastlz.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jfastlz/
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
interchange format, which can be obtained at:
* LICENSE:
* license/LICENSE.protobuf.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/protobuf
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
a temporary self-signed X.509 certificate when the JVM does not provide the
equivalent functionality. It can be obtained at:
* LICENSE:
* license/LICENSE.bouncycastle.txt (MIT License)
* HOMEPAGE:
* http://www.bouncycastle.org/
This product optionally depends on 'Snappy', a compression library produced
by Google Inc, which can be obtained at:
* LICENSE:
* license/LICENSE.snappy.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/snappy
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
* LICENSE:
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
* HOMEPAGE:
* http://www.jboss.org/jbossmarshalling
This product optionally depends on 'Caliper', Google's micro-
benchmarking framework, which can be obtained at:
* LICENSE:
* license/LICENSE.caliper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/caliper
This product optionally depends on 'Apache Commons Logging', a logging
framework, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-logging.txt (Apache License 2.0)
* HOMEPAGE:
* http://commons.apache.org/logging/
This product optionally depends on 'Apache Log4J', a logging framework, which
can be obtained at:
* LICENSE:
* license/LICENSE.log4j.txt (Apache License 2.0)
* HOMEPAGE:
* http://logging.apache.org/log4j/
This product optionally depends on 'Aalto XML', an ultra-high performance
non-blocking XML processor, which can be obtained at:
* LICENSE:
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
* HOMEPAGE:
* http://wiki.fasterxml.com/AaltoHome
This product contains a modified version of 'HPACK', a Java implementation of
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
* LICENSE:
* license/LICENSE.hpack.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/twitter/hpack
This product contains a modified portion of 'Apache Commons Lang', a Java library
provides utilities for the java.lang API, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-lang.txt (Apache License 2.0)
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
* LICENSE:
* license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/takari/maven-wrapper

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save