refactor(JDAirPort.Ems.Back): 添加麒麟系统环境部署命令行,修改菜单功能使用SQL server语法,修改物联网数据代码

boardTest
zch 2 months ago
parent fb766cc673
commit 41672fa3f5

@ -1,4 +1,4 @@
信明橡塑能源&人员工资结算系统后台 <br/>
胶东机场能源&人员工资结算系统后台 <br/>
分离版-SQLServer数据源 <br/>
始于3.8.2分离版进行的SQLServer改造测试无误用于生产环境保持同步更新 <br/>
1.(已完成)集成分布式锁、动态多数据源<br/>

@ -4,7 +4,7 @@ import java.util.List;
import java.util.Map;
import com.os.ems.record.domain.RecordIotenvInstant;
import io.lettuce.core.dynamic.annotation.Param;
import org.apache.ibatis.annotations.Param;
/**
* Mapper

@ -21,7 +21,20 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
</resultMap>
<sql id="selectRecordIotenvInstantVo">
select objid, monitorId, temperature, humidity, illuminance, noise, concentration, vibration_speed, vibration_displacement, vibration_acceleration, vibration_temp, collectTime, recodeTime from record_iotenv_instant
select objid,
monitorId,
temperature,
humidity,
illuminance,
noise,
concentration,
vibration_speed,
vibration_displacement,
vibration_acceleration,
vibration_temp,
collectTime,
recodeTime
from record_iotenv_instant
</sql>
<select id="selectRecordIotenvInstantList" parameterType="RecordIotenvInstant" resultMap="RecordIotenvInstantResult">
@ -39,8 +52,10 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="vibrationTemp != null "> and vibration_temp = #{vibrationTemp}</if>
<if test="collectTime != null "> and collectTime = #{collectTime}</if>
<if test="recodeTime != null "> and recodeTime = #{recodeTime}</if>
<if test="params.beginCollectTime != null adn params.endCollectTime != null ">collectTime &lt;= #{params.endCollectTime} and collectTime &gt;= #{params.beginCollectTime} </if>
</where>
<if test="beginRecordTime!= null and endRecordTime != null">
AND recodeTime BETWEEN #{beginRecordTime} AND #{endRecordTime}
</if>
</where>
</select>
<select id="selectRecordIotenvInstantByObjid" parameterType="Long" resultMap="RecordIotenvInstantResult">
@ -111,33 +126,6 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
</delete>
<!-- 从指定表查询物联网数据列表 -->
<select id="selectRecordIotenvInstantListFromTable" resultMap="RecordIotenvInstantResult">
SELECT objid, monitorId, temperature, humidity, illuminance, noise, concentration,
vibration_speed, vibration_displacement, vibration_acceleration, vibration_temp,
collectTime, recodeTime
FROM ${tableName}
<where>
<if test="record.monitorId != null and record.monitorId != ''"> and monitorId like concat('%', #{record.monitorId}, '%')</if>
<if test="record.temperature != null "> and temperature = #{record.temperature}</if>
<if test="record.humidity != null "> and humidity = #{record.humidity}</if>
<if test="record.illuminance != null "> and illuminance = #{record.illuminance}</if>
<if test="record.noise != null "> and noise = #{record.noise}</if>
<if test="record.concentration != null "> and concentration = #{record.concentration}</if>
<if test="record.vibrationSpeed != null "> and vibration_speed = #{record.vibrationSpeed}</if>
<if test="record.vibrationDisplacement != null "> and vibration_displacement = #{record.vibrationDisplacement}</if>
<if test="record.vibrationAcceleration != null "> and vibration_acceleration = #{record.vibrationAcceleration}</if>
<if test="record.vibrationTemp != null "> and vibration_temp = #{record.vibrationTemp}</if>
<if test="record.collectTime != null "> and collectTime = #{record.collectTime}</if>
<if test="record.recodeTime != null "> and recodeTime = #{record.recodeTime}</if>
<if test="record.params.beginCollectTime != null and record.params.endCollectTime != null">
AND collectTime BETWEEN #{record.params.beginCollectTime} AND #{record.params.endCollectTime}
</if>
</where>
</select>
<!-- 使用 UNION ALL 从多个表查询物联网数据列表 -->
<select id="selectRecordIotenvInstantListFromTables" resultMap="RecordIotenvInstantResult">
<foreach collection="tableNames" item="tableName" separator=" UNION ALL ">
@ -146,26 +134,25 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
collectTime, recodeTime
FROM ${tableName}
<where>
<if test="recordIotenvInstant.monitorId != null and recordIotenvInstant.monitorId != ''"> and monitorId = #{recordIotenvInstant.monitorId}</if>
<if test="recordIotenvInstant.temperature != null "> and temperature = #{recordIotenvInstant.temperature}</if>
<if test="recordIotenvInstant.humidity != null "> and humidity = #{recordIotenvInstant.humidity}</if>
<if test="recordIotenvInstant.illuminance != null "> and illuminance = #{recordIotenvInstant.illuminance}</if>
<if test="recordIotenvInstant.noise != null "> and noise = #{recordIotenvInstant.noise}</if>
<if test="recordIotenvInstant.concentration != null "> and concentration = #{recordIotenvInstant.concentration}</if>
<if test="recordIotenvInstant.vibrationSpeed != null "> and vibration_speed = #{recordIotenvInstant.vibrationSpeed}</if>
<if test="recordIotenvInstant.vibrationDisplacement != null "> and vibration_displacement = #{recordIotenvInstant.vibrationDisplacement}</if>
<if test="recordIotenvInstant.vibrationAcceleration != null "> and vibration_acceleration = #{recordIotenvInstant.vibrationAcceleration}</if>
<if test="recordIotenvInstant.vibrationTemp != null "> and vibration_temp = #{recordIotenvInstant.vibrationTemp}</if>
<if test="recordIotenvInstant.collectTime != null "> and collectTime = #{recordIotenvInstant.collectTime}</if>
<if test="recordIotenvInstant.recodeTime != null "> and recodeTime = #{recordIotenvInstant.recodeTime}</if>
<if test="record.monitorId != null and record.monitorId != ''"> and monitorId like concat('%', #{record.monitorId}, '%')</if>
<if test="record.temperature != null "> and temperature = #{record.temperature}</if>
<if test="record.humidity != null "> and humidity = #{record.humidity}</if>
<if test="record.illuminance != null "> and illuminance = #{record.illuminance}</if>
<if test="record.noise != null "> and noise = #{record.noise}</if>
<if test="record.concentration != null "> and concentration = #{record.concentration}</if>
<if test="record.vibrationSpeed != null "> and vibration_speed = #{record.vibrationSpeed}</if>
<if test="record.vibrationDisplacement != null "> and vibration_displacement = #{record.vibrationDisplacement}</if>
<if test="record.vibrationAcceleration != null "> and vibration_acceleration = #{record.vibrationAcceleration}</if>
<if test="record.vibrationTemp != null "> and vibration_temp = #{record.vibrationTemp}</if>
<if test="record.collectTime != null "> and collectTime = #{record.collectTime}</if>
<if test="record.recodeTime != null "> and recodeTime = #{record.recodeTime}</if>
<if test="record.params.beginCollectTime != null and record.params.endCollectTime != null">
AND collectTime BETWEEN #{record.params.beginCollectTime} AND #{record.params.endCollectTime}
<if test="recordIotenvInstant.params.beginRecordTime!= null and recordIotenvInstant.params.endRecordTime != null">
AND recodeTime BETWEEN #{recordIotenvInstant.params.beginRecordTime} AND #{recordIotenvInstant.params.endRecordTime}
</if>
</where>
</foreach>
ORDER BY collectTime ASC
ORDER BY recodeTime ASC
</select>
<!-- 检查表是否存在 -->

@ -28,7 +28,7 @@
</resultMap>
<sql id="selectMenuVo">
select menu_id, menu_name, parent_id, order_num, path, component, query, is_frame, is_cache, menu_type, visible, status, ifnull(perms,'') as perms, icon, create_time
select menu_id, menu_name, parent_id, order_num, path, component, `query`, is_frame, is_cache, menu_type, visible, status, ifnull(perms,'') as perms, icon, create_time
from sys_menu
</sql>
@ -49,37 +49,37 @@
</select>
<select id="selectMenuTreeAll" resultMap="SysMenuResult">
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.query, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.`query`, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
from sys_menu m where m.menu_type in ('M', 'C') and m.status = 0
order by m.parent_id, m.order_num
</select>
<select id="selectMenuListByUserId" parameterType="SysMenu" resultMap="SysMenuResult">
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.query, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.`query`, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role ro on ur.role_id = ro.role_id
where ur.user_id = #{params.userId}
<if test="menuName != null and menuName != ''">
AND m.menu_name like concat('%', #{menuName}, '%')
AND m.menu_name like concat('%', #{menuName}, '%')
</if>
<if test="visible != null and visible != ''">
AND m.visible = #{visible}
AND m.visible = #{visible}
</if>
<if test="status != null and status != ''">
AND m.status = #{status}
AND m.status = #{status}
</if>
order by m.parent_id, m.order_num
</select>
<select id="selectMenuTreeByUserId" parameterType="Long" resultMap="SysMenuResult">
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.query, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
<select id="selectMenuTreeByUserId" parameterType="Long" resultMap="SysMenuResult">
select distinct m.menu_id, m.parent_id, m.menu_name, m.path, m.component, m.`query`, m.visible, m.status, ifnull(m.perms,'') as perms, m.is_frame, m.is_cache, m.menu_type, m.icon, m.order_num, m.create_time
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role ro on ur.role_id = ro.role_id
left join sys_user u on ur.user_id = u.user_id
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role ro on ur.role_id = ro.role_id
left join sys_user u on ur.user_id = u.user_id
where u.user_id = #{userId} and m.menu_type in ('M', 'C') and m.status = 0 AND ro.status = 0
order by m.parent_id, m.order_num
</select>
@ -87,34 +87,34 @@
<select id="selectMenuListByRoleId" resultType="Long">
select m.menu_id
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
where rm.role_id = #{roleId}
<if test="menuCheckStrictly">
and m.menu_id not in (select m.parent_id from sys_menu m inner join sys_role_menu rm on m.menu_id = rm.menu_id and rm.role_id = #{roleId})
</if>
left join sys_role_menu rm on m.menu_id = rm.menu_id
where rm.role_id = #{roleId}
<if test="menuCheckStrictly">
and m.menu_id not in (select m.parent_id from sys_menu m inner join sys_role_menu rm on m.menu_id = rm.menu_id and rm.role_id = #{roleId})
</if>
order by m.parent_id, m.order_num
</select>
<select id="selectMenuPerms" resultType="String">
select distinct m.perms
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
</select>
<select id="selectMenuPermsByUserId" parameterType="Long" resultType="String">
select distinct m.perms
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role r on r.role_id = ur.role_id
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_user_role ur on rm.role_id = ur.role_id
left join sys_role r on r.role_id = ur.role_id
where m.status = '0' and r.status = '0' and ur.user_id = #{userId}
</select>
<select id="selectMenuPermsByRoleId" parameterType="Long" resultType="String">
select distinct m.perms
from sys_menu m
left join sys_role_menu rm on m.menu_id = rm.menu_id
left join sys_role_menu rm on m.menu_id = rm.menu_id
where m.status = '0' and rm.role_id = #{roleId}
</select>
@ -124,13 +124,12 @@
</select>
<select id="hasChildByMenuId" resultType="Integer">
select count(1) from sys_menu where parent_id = #{menuId}
select count(1) from sys_menu where parent_id = #{menuId}
</select>
<select id="checkMenuNameUnique" parameterType="SysMenu" resultMap="SysMenuResult">
select top(1) menu_id, menu_name, parent_id, order_num, path, component, query, is_frame, is_cache, menu_type, visible, status, ifnull(perms,'') as perms, icon, create_time
from sys_menu
where menu_name=#{menuName} and parent_id = #{parentId}
<include refid="selectMenuVo"/>
where menu_name=#{menuName} and parent_id = #{parentId} limit 1
</select>
<update id="updateMenu" parameterType="SysMenu">
@ -151,7 +150,7 @@
<if test="icon !=null and icon != ''">icon = #{icon},</if>
<if test="remark != null and remark != ''">remark = #{remark},</if>
<if test="updateBy != null and updateBy != ''">update_by = #{updateBy},</if>
update_time = now()
update_time = sysdate()
</set>
where menu_id = #{menuId}
</update>
@ -192,12 +191,12 @@
<if test="icon != null and icon != ''">#{icon},</if>
<if test="remark != null and remark != ''">#{remark},</if>
<if test="createBy != null and createBy != ''">#{createBy},</if>
now()
sysdate()
)
</insert>
<delete id="deleteMenuById" parameterType="Long">
delete from sys_menu where menu_id = #{menuId}
delete from sys_menu where menu_id = #{menuId}
</delete>
</mapper>

@ -10,7 +10,7 @@
<name>XMXS-OS</name>
<url>http://www.ruoyi.vip</url>
<description>信明橡塑运维系统</description>
<description>胶东机场运维系统</description>
<properties>
<haiwei.version>3.8.7</haiwei.version>

@ -0,0 +1,432 @@
Last login: Tue Apr 22 21:18:53 2025 from 192.168.10.11
Last login: Tue Apr 22 17:16:45 2025 from 192.168.10.11
[root@localhost ~]# ls
anaconda-ks.cfg original-ks.cfg
[root@localhost ~]# cd /
[root@localhost /]# ls
backup bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var
[root@localhost /]# cat /etc/os-release
NAME="Kylin Linux Advanced Server"
VERSION="V10 (Lance)"
ID="kylin"
VERSION_ID="V10"
PRETTY_NAME="Kylin Linux Advanced Server V10 (Lance)"
ANSI_COLOR="0;31"
[root@localhost /]# uname -a
Linux localhost.localdomain 4.19.90-52.22.v2207.ky10.aarch64 #1 SMP Tue Mar 14 11:52:45 CST 2023 aarch64 aarch64 aarch64 GNU/Linux
[root@localhost /]# free -h
total used free shared buff/cache available
Mem: 29Gi 2.6Gi 25Gi 42Mi 1.0Gi 23Gi
Swap: 15Gi 0B 15Gi
[root@localhost /]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 15G 0 15G 0% /dev
tmpfs 15G 64K 15G 1% /dev/shm
tmpfs 15G 31M 15G 1% /run
tmpfs 15G 0 15G 0% /sys/fs/cgroup
/dev/mapper/klas-root 380G 12G 368G 4% /
tmpfs 15G 64K 15G 1% /tmp
/dev/sda2 1014M 165M 850M 17% /boot
/dev/sda1 599M 6.5M 593M 2% /boot/efi
/dev/sdb1 7.3T 52G 7.3T 1% /media/raid1
tmpfs 3.0G 768K 3.0G 1% /run/user/1000
tmpfs 3.0G 0 3.0G 0% /run/user/0
[root@localhost /]# lscpu
架构: aarch64
CPU 运行模式: 64-bit
字节序: Little Endian
CPU: 64
在线 CPU 列表: 0-63
每个核的线程数: 1
每个座的核数: 64
座: 1
NUMA 节点: 8
厂商 ID Phytium
型号: 2
型号名称: FT-2000+/64
步进: 0x1
BogoMIPS 100.00
L1d 缓存: 2 MiB
L1i 缓存: 2 MiB
L2 缓存: 32 MiB
NUMA 节点0 CPU 0-7
NUMA 节点1 CPU 8-15
NUMA 节点2 CPU 16-23
NUMA 节点3 CPU 24-31
NUMA 节点4 CPU 32-39
NUMA 节点5 CPU 40-47
NUMA 节点6 CPU 48-55
NUMA 节点7 CPU 56-63
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Spec store bypass: Not affected
Vulnerability Spectre v1: Mitigation; __user pointer sanitization
Vulnerability Spectre v2: Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
标记: fp asimd evtstrm crc32 cpuid
[root@localhost /]# ^C
[root@localhost ~]# mkdir -p ~/tidb-deploy
[root@localhost ~]# cd ~/tidb-deploy
[root@localhost tidb-deploy]# tar -zxvf /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz.tabby-upload
tidb-community-server-v8.5.1-linux-arm64/
tidb-community-server-v8.5.1-linux-arm64/diag-v1.6.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/7.influxdb.json
tidb-community-server-v8.5.1-linux-arm64/387.tidb-dashboard.json
tidb-community-server-v8.5.1-linux-arm64/925.playground.json
tidb-community-server-v8.5.1-linux-arm64/blackbox_exporter-v0.23.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/6474.pd.json
tidb-community-server-v8.5.1-linux-arm64/8760.ctl.json
tidb-community-server-v8.5.1-linux-arm64/timestamp.json
tidb-community-server-v8.5.1-linux-arm64/influxdb-v2.5.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiup-v1.16.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiflash-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/alertmanager-v0.26.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiup-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tidb-v8.5.1-linux-arm64.tar.gz
gzip: stdin: unexpected end of file
tar: 归档文件中异常的 EOF
tar: 归档文件中异常的 EOF
tar: Error is not recoverable: exiting now
[root@localhost tidb-deploy]# ^C
[root@localhost tidb-deploy]# md5sum /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz.tabby-upload
md5sum: /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz.tabby-upload: 没有那个文件或目录
[root@localhost tidb-deploy]# md5sum /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz
f74a3df26fede5680d776d784798ee9a /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz
[root@localhost tidb-deploy]# cd /root/tidb-deploy
[root@localhost tidb-deploy]# tar xzvf tidb-community-server-${version}-linux-amd64.tar.gz && \
> sh tidb-community-server-${version}-linux-amd64/local_install.sh && \
>
>
>
> ^C
[root@localhost tidb-deploy]# [root@localhost tidb-deploy]# tar xzvf tidb-community-server-${version}-linux-amd64.tar.gz && \
> > sh tidb-community-server-${version}-linux-amd64/local_install.sh && \
>
>
> ^C
[root@localhost tidb-deploy]# tar xzvf tidb-community-server-v8.5.1-linux-amd64.tar.gz
tar (child): tidb-community-server-v8.5.1-linux-amd64.tar.gz无法 open: 没有那个文件或目录
tar (child): Error is not recoverable: exiting now
tar: Child returned status 2
tar: Error is not recoverable: exiting now
[root@localhost tidb-deploy]# tar xzvf tidb-community-server-v8.5.1-linux-amd64.tar.gz && \
> > sh tidb-community-server-v8.5.1-linux-amd64/local_install.sh && \
> > source /home/tidb/.bash_profile
tar (child): tidb-community-server-v8.5.1-linux-amd64.tar.gz无法 open: 没有那个文件或目录
tar (child): Error is not recoverable: exiting now
tar: Child returned status 2
tar: Error is not recoverable: exiting now
[root@localhost tidb-deploy]# tar xf tidb-community-toolkit-v8.5.1-linux-amd64.tar.gz
tar: tidb-community-toolkit-v8.5.1-linux-amd64.tar.gz无法 open: 没有那个文件或目录
tar: Error is not recoverable: exiting now
[root@localhost tidb-deploy]# tar xzvf /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/
tidb-community-server-v8.5.1-linux-arm64/diag-v1.6.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/7.influxdb.json
tidb-community-server-v8.5.1-linux-arm64/387.tidb-dashboard.json
tidb-community-server-v8.5.1-linux-arm64/925.playground.json
tidb-community-server-v8.5.1-linux-arm64/blackbox_exporter-v0.23.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/6474.pd.json
tidb-community-server-v8.5.1-linux-arm64/8760.ctl.json
tidb-community-server-v8.5.1-linux-arm64/timestamp.json
tidb-community-server-v8.5.1-linux-arm64/influxdb-v2.5.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiup-v1.16.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiflash-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/alertmanager-v0.26.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tiup-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tidb-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/cluster-v1.16.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/2236.diag.json
tidb-community-server-v8.5.1-linux-arm64/insight-v0.4.2-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/playground-v1.16.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/7.alertmanager.json
tidb-community-server-v8.5.1-linux-arm64/node_exporter-v1.5.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/1003.tiup.json
tidb-community-server-v8.5.1-linux-arm64/1003.cluster.json
tidb-community-server-v8.5.1-linux-arm64/grafana-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/tikv-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/5595.prometheus.json
tidb-community-server-v8.5.1-linux-arm64/9.node_exporter.json
tidb-community-server-v8.5.1-linux-arm64/5542.grafana.json
tidb-community-server-v8.5.1-linux-arm64/snapshot.json
tidb-community-server-v8.5.1-linux-arm64/root.json
tidb-community-server-v8.5.1-linux-arm64/keys/
tidb-community-server-v8.5.1-linux-arm64/keys/dcc60371ff9e7d84-index.json
tidb-community-server-v8.5.1-linux-arm64/keys/c15eaea99981b351-root.json
tidb-community-server-v8.5.1-linux-arm64/keys/0fd8aafae75f0a2b-snapshot.json
tidb-community-server-v8.5.1-linux-arm64/keys/879d996bd9f27ec6-pingcap.json
tidb-community-server-v8.5.1-linux-arm64/keys/44c82672eb98a1d9-root.json
tidb-community-server-v8.5.1-linux-arm64/keys/14e2a603e0a16fc6-timestamp.json
tidb-community-server-v8.5.1-linux-arm64/keys/c67d7c794870f14a-root.json
tidb-community-server-v8.5.1-linux-arm64/pd-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/9.blackbox_exporter.json
tidb-community-server-v8.5.1-linux-arm64/tiproxy-v1.3.0-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/18.insight.json
tidb-community-server-v8.5.1-linux-arm64/ctl-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/565.tiproxy.json
tidb-community-server-v8.5.1-linux-arm64/local_install.sh
tidb-community-server-v8.5.1-linux-arm64/6195.tikv.json
tidb-community-server-v8.5.1-linux-arm64/5991.tiflash.json
tidb-community-server-v8.5.1-linux-arm64/tidb-dashboard-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/1.root.json
tidb-community-server-v8.5.1-linux-arm64/10035.tidb.json
tidb-community-server-v8.5.1-linux-arm64/prometheus-v8.5.1-linux-arm64.tar.gz
tidb-community-server-v8.5.1-linux-arm64/1.index.json
[root@localhost tidb-deploy]# sh /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64/local_install.sh
Disable telemetry success
Successfully set mirror to /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64
Detected shell: bash
Shell profile: /root/.bash_profile
/root/.bash_profile has been modified to to add tiup to PATH
open a new terminal or source /root/.bash_profile to use it
Installed path: /root/.tiup/bin/tiup
===============================================
1. source /root/.bash_profile
2. Have a try: tiup playground
===============================================
[root@localhost tidb-deploy]# source /home/tidb/.bash_profile
-bash: /home/tidb/.bash_profile: 没有那个文件或目录
[root@localhost tidb-deploy]# cd
[root@localhost ~]# cd
[root@localhost ~]# cd
[root@localhost ~]# source /home/tidb/.bash_profile
-bash: /home/tidb/.bash_profile: 没有那个文件或目录
[root@localhost ~]# sh /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64/local_install.sh
Disable telemetry success
Successfully set mirror to /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64
Detected shell: bash
Shell profile: /root/.bash_profile
/root/.bash_profile has been modified to to add tiup to PATH
open a new terminal or source /root/.bash_profile to use it
Installed path: /root/.tiup/bin/tiup
===============================================
1. source /root/.bash_profile
2. Have a try: tiup playground
===============================================
[root@localhost ~]# sh /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64/local_install.sh
Disable telemetry success
Successfully set mirror to /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64
Detected shell: bash
Shell profile: /root/.bash_profile
/root/.bash_profile has been modified to to add tiup to PATH
open a new terminal or source /root/.bash_profile to use it
Installed path: /root/.tiup/bin/tiup
===============================================
1. source /root/.bash_profile
2. Have a try: tiup playground
===============================================
[root@localhost ~]#
[root@localhost ~]# source /root/.bash_profile
[root@localhost ~]# tar xf /tidb-community-toolkit-v8.5.1-linux-amd64.tar.gz
[root@localhost ~]# ls -ld /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64 /root/tidb-community-toolkit-v8.5.1-linux-amd64
drwxr-xr-x 3 root root 4096 1月 17 16:02 /root/tidb-community-toolkit-v8.5.1-linux-amd64
drwxr-xr-x 3 root root 4096 1月 17 16:00 /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64
[root@localhost ~]# cd /root/tidb-deploy/tidb-community-server-v8.5.1-linux-arm64
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# cp -rp keys ~/.tiup/
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup mirror merge ../tidb-community-toolkit-${version}-linux-amd64
Error: stat ../tidb-community-toolkit--linux-amd64: no such file or directory
[root@localhost tidb-community-server-v8.5.1-linux-arm64]#
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup mirror merge /root/tidb-community-toolkit-v8.5.1-linux-amd64
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup cluster template > topology.yaml
A new version of cluster is available: -> v1.16.1
To update this component: tiup update cluster
To update all components: tiup update --all
The component `cluster` version is not installed; downloading from repository.
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup cluster template > topology.yaml
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup cluster list
Name User Version Path PrivateKey
---- ---- ------- ---- ----------
[root@localhost tidb-community-server-v8.5.1-linux-arm64]# tiup cluster display tidb-test
Error: Cluster tidb-test not found
Verbose debug logs has been written to /root/.tiup/logs/tiup-cluster-debug-2025-04-22-22-29-17.log.
[root@localhost tidb-community-server-v8.5.1-linux-arm64]#
[root@localhost ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): zang010303
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in zang010303
Your public key has been saved in zang010303.pub
The key fingerprint is:
SHA256:niYxVopzm9Mt9SWaqoZnipyo3xHhjkaPDo+6CQq2Esw root@localhost.localdomain
The key's randomart image is:
+---[RSA 3072]----+
| |
| |
| . . |
| . o o |
|o . = * S . . . |
|.E = = B + + o |
|+o+ +.= * + . |
|*X.+..++ o |
|%+B o=... |
+----[SHA256]-----+
[root@localhost ~]# ssh-copy-id root@localhost
/usr/bin/ssh-copy-id: ERROR: No identities found
[root@localhost ~]#
[root@localhost ~]# rm zang010303 zang010303.pub
rm是否删除普通文件 'zang010303'y
rm是否删除普通文件 'zang010303.pub'y
[root@localhost ~]#
[root@localhost ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa
Your public key has been saved in /root/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:yk3sqXsspUNG9xcTX91JRdIy22h3ipk92ykp4F52F98 root@localhost.localdomain
The key's randomart image is:
+---[RSA 3072]----+
| o+B|
| . oo=|
| o B |
| ... o = +|
| . .S. O +.|
| .o=.o. = + +|
| oo+= .+ ..=E|
| +.o.o..oo..|
| o=.. . . |
+----[SHA256]-----+
[root@localhost ~]# ssh-copy-id root@localhost
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is SHA256:kWUu1O+Rl9nPUDs594h2gNsfUBLfb0WusbxxZZtTNnU.
Are you sure you want to continue connecting (yes/no/[fingerprint])?
[root@localhost ~]# ssh-copy-id root@localhost
\/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Authorized users only. All activities may be monitored and reported.
root@localhost's password:
Permission denied, please try again.
root@localhost's password:
Permission denied, please try again.
root@localhost's password:
root@localhost: Permission denied (publickey,gssapi-keyex,gssapi-with-mic,password).
[root@localhost ~]# ssh-copy-id root@localhost
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Authorized users only. All activities may be monitored and reported.
root@localhost's password:
Permission denied, please try again.
root@localhost's password:
Permission denied, please try again.
root@localhost's password:
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Authorized users only. All activities may be monitored and reported.
root@localhost's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@localhost'"
and check to make sure that only the key(s) you wanted were added.
[root@localhost ~]# source /root/.bash_profile
[root@localhost ~]# tiup playground
A new version of playground is available: -> v1.16.1
To update this component: tiup update playground
To update all components: tiup update --all
The component `playground` version is not installed; downloading from repository.
Note: Version constraint is resolved to v8.5.1. If you'd like to use other versions:
Use exact version: tiup playground v7.1.0
Use version range: tiup playground ^5
Use nightly: tiup playground nightly
The component `pd` version v8.5.1 is not installed; downloading from repository.
Start pd instance: v8.5.1
The component `tikv` version v8.5.1 is not installed; downloading from repository.
Start tikv instance: v8.5.1
The component `tidb` version v8.5.1 is not installed; downloading from repository.
Start tidb instance: v8.5.1
Waiting for tidb instances ready
127.0.0.1:4000 ... Done
The component `prometheus` version v8.5.1 is not installed; downloading from repository.
The component `tiflash` version v8.5.1 is not installed; downloading from repository.
Start tiflash instance: v8.5.1
Waiting for tiflash instances ready
127.0.0.1:3930 ... Done
🎉 TiDB Playground Cluster is started, enjoy!
Connect TiDB: mysql --comments --host 127.0.0.1 --port 4000 -u root
TiDB Dashboard: http://127.0.0.1:2379/dashboard
Grafana: http://127.0.0.1:3000
[root@localhost ~]# mysql --host 127.0.0.1 --port 4000 -u rootmysql --host 127.0.0.1 --port 4000 -u root
ERROR 2002 (HY000): Can't connect to MySQL server on '127.0.0.1' (115)
[root@localhost ~]# ^C
[root@localhost ~]# ps -ef | grep tidb
root 4012 3574 0 15:18 pts/0 00:00:00 grep tidb
[root@localhost ~]# ^C
[root@localhost ~]# tiup playground
Note: Version constraint is resolved to v8.5.1. If you'd like to use other versions:
Use exact version: tiup playground v7.1.0
Use version range: tiup playground ^5
Use nightly: tiup playground nightly
Start pd instance: v8.5.1
Start tikv instance: v8.5.1
Start tidb instance: v8.5.1
Waiting for tidb instances ready
127.0.0.1:4000 ... Done
Start tiflash instance: v8.5.1
Waiting for tiflash instances ready
127.0.0.1:3930 ... Done
🎉 TiDB Playground Cluster is started, enjoy!
Connect TiDB: mysql --comments --host 127.0.0.1 --port 4000 -u root
TiDB Dashboard: http://127.0.0.1:2379/dashboard
Grafana: http://127.0.0.1:3000
^CGot signal interrupt (Component: playground ; PID: 4049)
Playground receive signal: interrupt
Wait tiflash(4609) to quit...
Wait grafana(4603) to quit...
Wait prometheus(4522) to quit...
Wait ng-monitoring(4523) to quit...
Grafana quit
prometheus quit
ng-monitoring quit
tiflash quit
Wait tidb(4063) to quit...
tidb quit
Wait tikv(4062) to quit...
tikv quit
Wait pd(4061) to quit...
pd quit

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,490 @@
Authorized users only. All activities may be monitored and reported.
Activate the web console with: systemctl enable --now cockpit.socket
Last login: Sun Apr 27 17:57:43 2025 from 10.42.0.12
[root@localhost ~]# tiup cluster display tidb-cluster
Cluster type: tidb
Cluster name: tidb-cluster
Cluster version: v8.5.1
Deploy user: tidb
SSH type: builtin
Dashboard URL: http://10.42.0.1:2379/dashboard
Grafana URL: http://10.42.0.1:3000
ID Role Host Ports OS/Arch Status Data Dir Deploy Dir
-- ---- ---- ----- ------- ------ -------- ----------
10.42.0.1:9093 alertmanager 10.42.0.1 9093/9094 linux/aarch64 Up /tidb-data/alertmanager-9093 /tidb-deploy/alertmanager-9093
10.42.0.1:3000 grafana 10.42.0.1 3000 linux/aarch64 Up - /tidb-deploy/grafana-3000
10.42.0.1:2379 pd 10.42.0.1 2379/2380 linux/aarch64 Up|L|UI /tidb-data/pd-2379 /tidb-deploy/pd-2379
10.42.0.1:9090 prometheus 10.42.0.1 9090/12020 linux/aarch64 Up /tidb-data/prometheus-9090 /tidb-deploy/prometheus-9090
10.42.0.1:4000 tidb 10.42.0.1 4000/10080 linux/aarch64 Up - /tidb-deploy/tidb-4000
10.42.0.1:20160 tikv 10.42.0.1 20160/20180 linux/aarch64 Up /tidb-data/tikv-20160 /tidb-deploy/tikv-20160
Total nodes: 6
[root@localhost ~]# ps -ef | grep tidb
tidb 2005 1 0 13:46 ? 00:00:00 bin/alertmanager/alertmanager --config.file=conf/alertmanager.yml --storage.path=/tidb-data/alertmanager-9093 --data.retention=120h --log.level=info --web.listen-address=0.0.0.0:9093 --web.external-url=http://10.42.0.1:9093 --cluster.peer=10.42.0.1:9094 --cluster.listen-address=10.42.0.1:9094
tidb 2006 1 0 13:46 ? 00:00:00 bin/blackbox_exporter/blackbox_exporter --web.listen-address=:9115 --log.level=info --config.file=conf/blackbox.yml
tidb 2007 1 5 13:46 ? 00:00:14 bin/bin/grafana-server --homepath=/tidb-deploy/grafana-3000/bin --config=/tidb-deploy/grafana-3000/conf/grafana.ini
tidb 2009 1 0 13:46 ? 00:00:01 bin/node_exporter/node_exporter --web.listen-address=:9100 --collector.tcpstat --collector.mountstats --collector.meminfo_numa --collector.buddyinfo --collector.vmstat.fields=^.* --log.level=info
tidb 2010 1 22 13:46 ? 00:00:54 bin/pd-server --name=pd-10.42.0.1-2379 --client-urls=http://0.0.0.0:2379 --advertise-client-urls=http://10.42.0.1:2379 --peer-urls=http://0.0.0.0:2380 --advertise-peer-urls=http://10.42.0.1:2380 --data-dir=/tidb-data/pd-2379 --initial-cluster=pd-10.42.0.1-2379=http://10.42.0.1:2380 --config=conf/pd.toml --log-file=/tidb-deploy/pd-2379/log/pd.log
tidb 2011 1 24 13:46 ? 00:00:58 bin/prometheus/prometheus --config.file=/tidb-deploy/prometheus-9090/conf/prometheus.yml --web.listen-address=:9090 --web.external-url=http://10.42.0.1:9090/ --web.enable-admin-api --log.level=info --storage.tsdb.path=/tidb-data/prometheus-9090 --storage.tsdb.retention=30d
tidb 2013 1 6 13:46 ? 00:00:16 bin/tidb-server -P 4000 --status=10080 --host=0.0.0.0 --advertise-address=10.42.0.1 --store=tikv --initialize-insecure --path=10.42.0.1:2379 --log-slow-query=/tidb-deploy/tidb-4000/log/tidb_slow_query.log --config=conf/tidb.toml --log-file=/tidb-deploy/tidb-4000/log/tidb.log
tidb 2014 1 10 13:46 ? 00:00:24 bin/tikv-server --addr 0.0.0.0:20160 --advertise-addr 10.42.0.1:20160 --status-addr 0.0.0.0:20180 --advertise-status-addr 10.42.0.1:20180 --pd 10.42.0.1:2379 --data-dir /tidb-data/tikv-20160 --config conf/tikv.toml --log-file /tidb-deploy/tikv-20160/log/tikv.log
tidb 2018 2005 0 13:46 ? 00:00:00 /bin/bash /tidb-deploy/alertmanager-9093/scripts/run_alertmanager.sh
tidb 2019 2018 0 13:46 ? 00:00:00 tee -i -a /tidb-deploy/alertmanager-9093/log/alertmanager.log
tidb 2024 2006 0 13:46 ? 00:00:00 /bin/bash /tidb-deploy/monitor-9100/scripts/run_blackbox_exporter.sh
tidb 2026 2009 0 13:46 ? 00:00:00 /bin/bash /tidb-deploy/monitor-9100/scripts/run_node_exporter.sh
tidb 2027 2024 0 13:46 ? 00:00:00 tee -i -a /tidb-deploy/monitor-9100/log/blackbox_exporter.log
tidb 2028 2026 0 13:46 ? 00:00:00 tee -i -a /tidb-deploy/monitor-9100/log/node_exporter.log
tidb 2030 2011 0 13:46 ? 00:00:00 /bin/bash scripts/ng-wrapper.sh
tidb 2031 2011 0 13:46 ? 00:00:00 /bin/bash /tidb-deploy/prometheus-9090/scripts/run_prometheus.sh
tidb 2032 2031 0 13:46 ? 00:00:00 tee -i -a /tidb-deploy/prometheus-9090/log/prometheus.log
tidb 2035 2030 1 13:46 ? 00:00:04 bin/ng-monitoring-server --config /tidb-deploy/prometheus-9090/conf/ngmonitoring.toml
root 4537 4346 0 13:51 pts/0 00:00:00 grep tidb
[root@localhost ~]# systemctl status redis
● redis.service - Redis In-Memory Data Store
Loaded: loaded (/etc/systemd/system/redis.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2025-04-29 13:46:59 CST; 4min 11s ago
Process: 1981 ExecStart=/media/redis/redis-5.0.5/src/redis-server /media/redis/redis-5.0.5/redis.conf > Main PID: 1989 (redis-server)
Tasks: 4
Memory: 13.8M
CGroup: /system.slice/redis.service
└─1989 /media/redis/redis-5.0.5/src/redis-server 127.0.0.1:6379
4月 29 13:46:59 localhost.localdomain systemd[1]: Starting Redis In-Memory Data Store...
4月 29 13:46:59 localhost.localdomain systemd[1]: Started Redis In-Memory Data Store.
lines 1-12/12 (END)
[root@localhost ~]# rm /media/tao_iot/dist
rm: 无法删除 '/media/tao_iot/dist': 是一个目录
[root@localhost ~]# systemctl status nginx
● nginx.service - nginx service
Loaded: loaded (/usr/lib/systemd/system/nginx.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2025-04-29 13:46:59 CST; 7min ago
Process: 1980 ExecStart=/usr/local/nginx/sbin/nginx (code=exited, status=0/SUCCESS)
Main PID: 1988 (nginx)
Tasks: 2
Memory: 7.5M
CGroup: /system.slice/nginx.service
├─1988 nginx: master process /usr/local/nginx/sbin/nginx
└─1993 nginx: worker process
4月 29 13:46:59 localhost.localdomain systemd[1]: Starting nginx service...
4月 29 13:46:59 localhost.localdomain systemd[1]: Started nginx service.
[root@localhost ~]# rm -r /media/tao_iot/dist
rm是否进入目录'/media/tao_iot/dist'? y
rm是否删除普通文件 '/media/tao_iot/dist/favicon.ico'y
rm是否进入目录'/media/tao_iot/dist/html'? y
rm是否删除普通文件 '/media/tao_iot/dist/html/ie.html'y
rm是否删除普通文件 '/media/tao_iot/dist/html/ie.html.gz'y
rm是否删除目录 '/media/tao_iot/dist/html'y
rm是否删除普通文件 '/media/tao_iot/dist/index.html'y
rm是否删除普通文件 '/media/tao_iot/dist/index.html.gz'y
rm是否进入目录'/media/tao_iot/dist/model'? y
rm是否删除普通文件 '/media/tao_iot/dist/model/daxingjichang.mtl'y
rm是否删除普通文件 '/media/tao_iot/dist/model/daxingjichang.obj'y
rm是否删除普通文件 '/media/tao_iot/dist/model/jiaodongjichang.mtl'y
rm是否删除普通文件 '/media/tao_iot/dist/model/jiaodongjichang.obj'y
rm是否进入目录'/media/tao_iot/dist/model/maps'? y
rm是否删除普通文件 '/media/tao_iot/dist/model/maps/3d66Model-1176892-files-21.JPG'y
rm是否删除普通文件 '/media/tao_iot/dist/model/maps/3d66Model-1176892-files-23.jpg'y
rm是否删除普通文件 '/media/tao_iot/dist/model/maps/3d66Model-1176892-files-3.jpg'y
rm是否删除普通文件 '/media/tao_iot/dist/model/maps/wenli2.png'y
rm是否删除目录 '/media/tao_iot/dist/model/maps'y
rm是否删除目录 '/media/tao_iot/dist/model'y
rm是否删除普通文件 '/media/tao_iot/dist/robots.txt'y
rm是否进入目录'/media/tao_iot/dist/static'? y
rm是否进入目录'/media/tao_iot/dist/static/css'? y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/app.94234889.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/app.94234889.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-05d75466.9dad4dc5.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-05d75466.9dad4dc5.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-082d8c34.49e9b769.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-082d8c34.49e9b769.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-10f4b0e8.772dd061.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-10f4b0e8.772dd061.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-17ee1abe.ecea2c5f.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-17ee1abe.ecea2c5f.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-18f96050.03c3a260.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-2025613a.d63b57ad.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-26395caf.470fc1fa.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-26395caf.470fc1fa.css.gz'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-291854d3.da0f15ef.css'y
rm是否删除普通文件 '/media/tao_iot/dist/static/css/chunk-291854d3.da0f15ef.css.gz'^C
[root@localhost ~]# rm -rf /media/tao_iot/dist
[root@localhost ~]# rm -rf /media/tao_iot/haiwei-admin.jar
[root@localhost ~]# mkdir -p /media/tao_iot/dist
[root@localhost ~]# unzip -oq dist.zip -d /media/tao_iot/dist
unzip: cannot find or open dist.zip, dist.zip.zip or dist.zip.ZIP.
[root@localhost ~]# cd /media/tao_iot
[root@localhost tao_iot]# unzip -oq dist.zip -d /media/tao_iot/dist
[root@localhost tao_iot]# systemctl status nginx
● nginx.service - nginx service
Loaded: loaded (/usr/lib/systemd/system/nginx.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2025-04-29 13:46:59 CST; 29min ago
Process: 1980 ExecStart=/usr/local/nginx/sbin/nginx (code=exited, status=0/SUCCESS)
Main PID: 1988 (nginx)
Tasks: 2
Memory: 9.5M
CGroup: /system.slice/nginx.service
├─1988 nginx: master process /usr/local/nginx/sbin/nginx
└─1993 nginx: worker process
4月 29 13:46:59 localhost.localdomain systemd[1]: Starting nginx service...
4月 29 13:46:59 localhost.localdomain systemd[1]: Started nginx service.
[root@localhost tao_iot]# systemctl daemon-reload
[root@localhost tao_iot]# cat /usr/local/nginx/conf/nginx.conf
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
#gzip on;
# 开启gzip压缩
gzip on;
# 不压缩临界值大于1K的才压缩一般不用改
gzip_min_length 1k;
# 压缩缓冲区
gzip_buffers 16 64K;
# 压缩版本默认1.1前端如果是squid2.5请使用1.0
gzip_http_version 1.1;
# 压缩级别1-10数字越大压缩的越好时间也越长
gzip_comp_level 3;
# 进行压缩的文件类型
gzip_types text/plain application/x-javascript text/css application/xml application/javascript;
# 跟Squid等缓存服务有关on的话会在Header里增加"Vary: Accept-Encoding"
gzip_vary on;
# IE6对Gzip不怎么友好不给它Gzip了
gzip_disable "MSIE [1-6]\.";
server {
listen 6061;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root /media/tao_iot/dist;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
location /prod-api/ {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:8020/;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
[root@localhost tao_iot]# /usr/local/nginx/sbin/nginx -s reload # 重新加载配置
[root@localhost tao_iot]# ps aux | grep nginx
root 1988 0.0 0.0 20224 3648 ? Ss 13:46 0:00 nginx: master process /usr/local/nginx/sbin/nginx
nobody 5920 0.0 0.0 32256 4736 ? S 14:21 0:00 nginx: worker process
root 6244 0.0 0.0 214144 1600 pts/0 S+ 14:22 0:00 grep nginx
[root@localhost tao_iot]# ls -la /media/tao_iot/dist/index.html
-rw-r--r-- 1 root root 16667 4月 29 13:49 /media/tao_iot/dist/index.html
[root@localhost tao_iot]# getenforce
Disabled
[root@localhost tao_iot]# vi /etc/systemd/system/haiwei-admin.service
E325: ATTENTION
Found a swap file by the name "/etc/systemd/system/.haiwei-admin.service.swp"
owned by: root dated: 日 4月 27 17:57:40 2025
file name: /etc/systemd/system/haiwei-admin.service
modified: YES
user name: root host name: localhost.localdomain
process ID: 14448
While opening file "/etc/systemd/system/haiwei-admin.service"
dated: 二 4月 29 14:25:17 2025
NEWER than swap file!
(1) Another program may be editing the same file. If this is the case,
be careful not to end up with two different instances of the same
file when making changes. Quit, or continue with caution.
(2) An edit session for this file crashed.
If this is the case, use ":recover" or "vim -r /etc/systemd/system/haiwei-admin.service"
to recover the changes (see ":help recovery").
If you did this already, delete the swap file "/etc/systemd/system/.haiwei-admin.service.swp"
to avoid this message.
"/etc/systemd/system/haiwei-admin.service" 15L, 277C
Press ENTER or type command to continue
[root@localhost tao_iot]# vi /etc/systemd/system/haiwei-admin.service
[root@localhost tao_iot]# rm /etc/systemd/system/.haiwei-admin.service.swp
rm是否删除普通文件 '/etc/systemd/system/.haiwei-admin.service.swp'y
[root@localhost tao_iot]# vi /etc/systemd/system/haiwei-admin.service
[root@localhost tao_iot]# vi /etc/systemd/system/haiwei-admin.service
[root@localhost tao_iot]# chmod 644 /etc/systemd/system/haiwei-admin.service
[root@localhost tao_iot]# systemctl daemon-reload
[root@localhost tao_iot]# systemctl enable haiwei-admin.service
Created symlink /etc/systemd/system/multi-user.target.wants/haiwei-admin.service → /etc/systemd/system/haiwei-admin.service.
[root@localhost tao_iot]# systemctl status haiwei-admin.service
● haiwei-admin.service - Haiwei Admin Service
Loaded: loaded (/etc/systemd/system/haiwei-admin.service; enabled; vendor preset: disabled)
Active: inactive (dead)
[root@localhost tao_iot]# systemctl start haiwei-admin.service
[root@localhost tao_iot]# systemctl status haiwei-admin.service
● haiwei-admin.service - Haiwei Admin Service
Loaded: loaded (/etc/systemd/system/haiwei-admin.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2025-04-29 14:28:55 CST; 4s ago
Main PID: 6399 (java)
Tasks: 112
Memory: 481.6M
CGroup: /system.slice/haiwei-admin.service
└─6399 /usr/bin/java -jar /media/tao_iot/haiwei-admin.jar
4月 29 14:28:57 localhost.localdomain java[6399]: // \ \ `-. \_ __\ /__ _/ .-` / / >4月 29 14:28:57 localhost.localdomain java[6399]: // ========`-.____`-.___\_____/___.-`____.-'=====>4月 29 14:28:57 localhost.localdomain java[6399]: // `=---=' >4月 29 14:28:57 localhost.localdomain java[6399]: // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^>4月 29 14:28:57 localhost.localdomain java[6399]: // 佛祖保佑 永不宕机 永无BUG >4月 29 14:28:57 localhost.localdomain java[6399]: //////////////////////////////////////////////////////>4月 29 14:28:57 localhost.localdomain java[6399]: 14:28:57.724 [main] INFO c.o.RuoYiApplication - [logS>4月 29 14:28:57 localhost.localdomain java[6399]: 14:28:57.731 [main] DEBUG c.o.RuoYiApplication - [logS>4月 29 14:28:57 localhost.localdomain java[6399]: 14:28:57.732 [background-preinit] INFO o.h.v.i.util.V>4月 29 14:28:57 localhost.localdomain java[6399]: 14:28:57.733 [main] INFO c.o.RuoYiApplication - [logS>
[root@localhost tao_iot]# cat /usr/local/nginx/conf/nginx.conf
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
#gzip on;
# 开启gzip压缩
gzip on;
# 不压缩临界值大于1K的才压缩一般不用改
gzip_min_length 1k;
# 压缩缓冲区
gzip_buffers 16 64K;
# 压缩版本默认1.1前端如果是squid2.5请使用1.0
gzip_http_version 1.1;
# 压缩级别1-10数字越大压缩的越好时间也越长
gzip_comp_level 3;
# 进行压缩的文件类型
gzip_types text/plain application/x-javascript text/css application/xml application/javascript;
# 跟Squid等缓存服务有关on的话会在Header里增加"Vary: Accept-Encoding"
gzip_vary on;
# IE6对Gzip不怎么友好不给它Gzip了
gzip_disable "MSIE [1-6]\.";
server {
listen 6061;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root /media/tao_iot/dist;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
location /prod-api/ {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://127.0.0.1:8020/;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
[root@localhost tao_iot]#
[root@localhost nginx]# /usr/local/nginx/sbin/nginx -t # 检查配置是否有语法错误
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
[root@localhost nginx]# /usr/local/nginx/sbin/nginx -s reload # 重新加载配置
[root@localhost nginx]#
Loading…
Cancel
Save