Compare commits

...

12 Commits

Author SHA1 Message Date
6136a6685f update 2024-01-08 09:32:10 +08:00
Jane
58df49f91c update 2024-01-02 19:38:11 +08:00
fc6d88a783 update 2023-12-28 19:41:53 +08:00
ff6153349b update 2023-12-28 19:04:20 +08:00
e74207d372 update 2023-12-28 09:32:38 +08:00
fa88f9a88a update 2023-12-27 19:23:12 +08:00
Jane
c674440585 update 2023-12-26 20:32:33 +08:00
Jane
c639590744 update 2023-12-26 09:28:45 +08:00
Jane
a805c85b73 update 2023-12-26 09:06:44 +08:00
c35a6151bc Merge remote-tracking branch 'origin/develop-20231222' into develop-20231222 2023-12-25 20:33:55 +08:00
62637fcd7f update 2023-12-25 20:16:15 +08:00
Jane
0bcb7cdf15 update 2023-12-25 11:20:12 +08:00
73 changed files with 835 additions and 194 deletions

View File

@ -20,14 +20,14 @@ eureka:
# 设置使用IP
prefer-ip-address: true
# 设置外网IP号
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka
# 暴露监控端点
management:

View File

@ -24,6 +24,10 @@ spring:
url: ${common.mysql.master.url}
username: ${common.mysql.master.username}
password: ${common.mysql.master.password}
servlet:
multipart:
max-file-size: 50MB
mybatis-plus:
mapper-locations: classpath*:mapper/*Mapper.xml

View File

@ -12,10 +12,10 @@ eureka:
# 设置使用IP
prefer-ip-address: true
# 设置外网IP号
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: false
fetch-registry: false
instance-info-replication-interval-seconds: 30
service-url:
defaultZone: http://192.168.1.169:8610/eureka/
defaultZone: http://192.168.1.217:8610/eureka/

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-standard-service/data-standard-service.jar --server.port=8825 > data-standard-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-standard-service/data-standard-service.jar --server.port=8825 > data-standard-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-visual-service/data-visual-service.jar --server.port=8827 > data-visual-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-visual-service/data-visual-service.jar --server.port=8827 > data-visual-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./email-service/email-service.jar --server.port=8812 > email-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./email-service/email-service.jar --server.port=8812 > email-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./file-service/file-service.jar --server.port=8811 > data-market-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./file-service/file-service.jar --server.port=8811 > data-market-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms1024m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./gateway/gateway.jar --server.port=9538 > gateway.log 2>&1 &
nohup java -jar -Xms1024m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./gateway/gateway.jar --server.port=9538 > gateway.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./quartz-service/quartz-service.jar --server.port=8813 > quartz-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./quartz-service/quartz-service.jar --server.port=8813 > quartz-service.log 2>&1 &

View File

@ -1,7 +1,2 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-compare-service.jar --server.port=8096 --spring.profiles.active=dev > data-compare-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-compare-service.jar --server.port=8096 --spring.profiles.active=dev > data-compare-service.log 2>&1 &

View File

@ -1,9 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service-integration.jar --server.port=8824 --spring.profiles.active=dev > data-market-service-integration.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service-integration.jar --server.port=8824 --spring.profiles.active=dev > data-market-service-integration.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service.jar --server.port=8822 --spring.profiles.active=dev > data-market-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service.jar --server.port=8822 --spring.profiles.active=dev > data-market-service.log 2>&1 &

View File

@ -1,7 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-metadata-service.jar --server.port=8820 --spring.profiles.active=dev > data-metadata-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-metadata-service.jar --server.port=8820 --spring.profiles.active=dev > data-metadata-service.log 2>&1 &

View File

@ -1,7 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-system-service.jar --server.port=8810 --spring.profiles.active=dev > data-system-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-system-service.jar --server.port=8810 --spring.profiles.active=dev > data-system-service.log 2>&1 &

View File

@ -1,7 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./service-data-dts.jar --server.port=8810 --spring.profiles.active=dev > service-data-dts.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./service-data-dts.jar --server.port=8810 --spring.profiles.active=dev > service-data-dts.log 2>&1 &

View File

@ -1,7 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./system-service.jar --server.port=8000 --spring.profiles.active=dev > system-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./system-service.jar --server.port=8000 --spring.profiles.active=dev > system-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service-mapping/data-market-service-mapping.jar --server.port=8823 > data-market-service-mapping.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-market-service-mapping/data-market-service-mapping.jar --server.port=8823 > data-market-service-mapping.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-masterdata-service/data-masterdata-service.jar --server.port=8828 > data-masterdata-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-masterdata-service/data-masterdata-service.jar --server.port=8828 > data-masterdata-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-metadata-service-console/data-metadata-service-console.jar --server.port=8821 > data-metadata-service-console.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-metadata-service-console/data-metadata-service-console.jar --server.port=8821 > data-metadata-service-console.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms128m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-quality-service/data-quality-service.jar --server.port=8826 > data-quality-service.log 2>&1 &
nohup java -jar -Xms128m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./data-quality-service/data-quality-service.jar --server.port=8826 > data-quality-service.log 2>&1 &

View File

@ -1,8 +1,3 @@
#!/bin/sh
source /etc/profile
cd $(dirname $0)
nohup java -jar -Xms1024m -Xmx2048m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./eureka/eureka.jar --server.port=8610 > eureka.log 2>&1 &
nohup java -jar -Xms1024m -Xmx512m -XX:PermSize=128M -XX:MaxPermSize=256M -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 ./eureka/eureka.jar --server.port=8610 > eureka.log 2>&1 &

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -33,11 +33,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -0,0 +1,87 @@
package cn.datax.service.data.metadata.api.entity;
import com.baomidou.mybatisplus.annotation.*;
import com.fasterxml.jackson.annotation.JsonFormat;
import lombok.Data;
import lombok.experimental.Accessors;
import java.time.LocalDateTime;
/**
* <p>
* 数据源信息表
* </p>
*
* @author AllDataDC
* @date 2022-11-14
*/
@Data
@Accessors(chain = true)
@TableName(value = "offline_data_file", autoResultMap = true)
public class OfflineDataFileEntity {
private static final long serialVersionUID=1L;
@TableId(value = "id", type = IdType.ASSIGN_ID)
private String id;
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
@TableField(value = "create_time", fill = FieldFill.INSERT)
private LocalDateTime createTime;
/**
* 创建人
*/
@TableField(value = "create_by", fill = FieldFill.INSERT)
private String createBy;
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
@TableField(value = "switch_time")
private LocalDateTime switchTime;
@TableField(value = "remark")
private String remark;
@TableField(value = "offline_type")
private String offlineType;
@TableField(value = "is_switch")
private String isSwitch;
/**
* 文件原始名称
*/
@TableField(value = "original_file_name")
private String originalFileName;
/**
* 文件名称
*/
@TableField(value = "file_name")
private String fileName;
/**
* 文件大小
*/
@TableField(value = "file_size")
private Long fileSize;
/**
* 访问路径
*/
@TableField(value = "file_path")
private String filePath;
/**
* 文件类型
*/
@TableField(value = "content_type")
private String contentType;
/**
* 文件来源
*/
@TableField(value = "file_type")
private String fileType;
}

View File

@ -0,0 +1,22 @@
package cn.datax.service.data.metadata.api.query;
import cn.datax.common.base.BaseQueryParams;
import lombok.Data;
import lombok.EqualsAndHashCode;
/**
* <p>
* 数据源信息表 查询实体
* </p>
*
* @author AllDataDC
* @date 2022-11-14
*/
@Data
@EqualsAndHashCode(callSuper = true)
public class OfflineDataFileQuery extends BaseQueryParams {
private static final long serialVersionUID=1L;
private String fileName;
}

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -29,12 +29,13 @@ import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.http.HttpServletResponse;
import java.io.OutputStream;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.io.OutputStream;
/**
* <p>
@ -255,4 +256,16 @@ public class MetadataSourceController extends BaseController {
metadataSourceService.refreshMetadata();
return R.ok();
}
@PostMapping("/getMetadatablood")
public R getMetadatablood(@RequestBody Map<String, String> params) {
List<Map<String, Object>> list = metadataSourceService.getMetadatablood(params.get("sourceId"), params.get("tableId"));
return R.ok().setData(list);
}
@PostMapping("/upload/{type}")
public R upload(@RequestParam("file") MultipartFile file, @PathVariable String type) {
metadataSourceService.uploadFile(file, type);
return R.ok();
}
}

View File

@ -0,0 +1,60 @@
package cn.datax.service.data.metadata.controller;
import cn.datax.common.base.BaseController;
import cn.datax.common.core.JsonPage;
import cn.datax.common.core.R;
import cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity;
import cn.datax.service.data.metadata.api.query.OfflineDataFileQuery;
import cn.datax.service.data.metadata.service.OfflineDataFileService;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
@RestController
@RequestMapping("/offline")
public class OfflineDataFileController extends BaseController {
@Autowired
private OfflineDataFileService offlineDataFileService;
/**
* 通过ID查询信息
*
* @param id
* @return
*/
@ApiOperation(value = "获取详细信息", notes = "根据url的id来获取详细信息")
@ApiImplicitParam(name = "id", value = "ID", required = true, dataType = "String", paramType = "path")
@GetMapping("/{id}")
public R getOfflineDataFileById(@PathVariable String id) {
OfflineDataFileEntity offlineDataFileEntity = offlineDataFileService.getOfflineDataFileById(id);
return R.ok().setData(offlineDataFileEntity);
}
/**
* 分页查询信息
*
* @return
*/
@GetMapping("/page")
public R getOfflineDataFilePage(OfflineDataFileQuery offlineDataFileQuery) {
QueryWrapper<OfflineDataFileEntity> queryWrapper = new QueryWrapper<>();
queryWrapper.like(StrUtil.isNotBlank(offlineDataFileQuery.getFileName()), "s.file_name", offlineDataFileQuery.getFileName());
IPage<OfflineDataFileEntity> page = offlineDataFileService.pageWithAuth(new Page<>(offlineDataFileQuery.getPageNum(), offlineDataFileQuery.getPageSize()), queryWrapper);
List<OfflineDataFileEntity> collect = page.getRecords();
JsonPage<OfflineDataFileEntity> jsonPage = new JsonPage<>(page.getCurrent(), page.getSize(), page.getTotal(), collect);
return R.ok().setData(jsonPage);
}
}

View File

@ -0,0 +1,24 @@
package cn.datax.service.data.metadata.dao;
import cn.datax.common.base.BaseDao;
import cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.core.toolkit.Constants;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Param;
import java.io.Serializable;
import java.util.List;
@Mapper
public interface OfflineDataFileDao extends BaseDao<OfflineDataFileEntity> {
@Override
OfflineDataFileEntity selectById(Serializable id);
@Override
List<OfflineDataFileEntity> selectList(@Param(Constants.WRAPPER) Wrapper<OfflineDataFileEntity> queryWrapper);
<E extends IPage<OfflineDataFileEntity>> E selectPageWithAuth(E page, @Param(Constants.WRAPPER) Wrapper<OfflineDataFileEntity> queryWrapper, @Param("roles") List<String> roles);
}

View File

@ -6,11 +6,15 @@ import cn.datax.common.database.core.DbColumn;
import cn.datax.common.database.core.DbTable;
import cn.datax.service.data.metadata.api.dto.MetadataSourceDto;
import cn.datax.service.data.metadata.api.entity.MetadataSourceEntity;
import cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity;
import com.aspose.words.Document;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import org.apache.hc.core5.http.io.entity.FileEntity;
import org.springframework.web.multipart.MultipartFile;
import java.util.List;
import java.util.Map;
/**
* <p>
@ -51,4 +55,9 @@ public interface MetadataSourceService extends BaseService<MetadataSourceEntity>
List<MetadataSourceEntity> getMetadataSourceList();
<E extends IPage<MetadataSourceEntity>> E pageWithAuth(E page, Wrapper<MetadataSourceEntity> queryWrapper);
List<Map<String, Object>> getMetadatablood(String datasourceId, String tableName);
OfflineDataFileEntity uploadFile(MultipartFile file, String type);
}

View File

@ -0,0 +1,23 @@
package cn.datax.service.data.metadata.service;
import cn.datax.common.base.BaseService;
import cn.datax.service.data.metadata.api.entity.MetadataSourceEntity;
import cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
/**
* <p>
* 数据源信息表 服务类
* </p>
*
* @author AllDataDC
* @date 2022-11-14
*/
public interface OfflineDataFileService extends BaseService<OfflineDataFileEntity> {
OfflineDataFileEntity getOfflineDataFileById(String id);
<E extends IPage<OfflineDataFileEntity>> E pageWithAuth(E page, Wrapper<OfflineDataFileEntity> queryWrapper);
}

View File

@ -12,32 +12,29 @@ import cn.datax.common.database.core.DbTable;
import cn.datax.common.exception.DataException;
import cn.datax.common.redis.service.RedisService;
import cn.datax.common.utils.SecurityUtil;
import cn.datax.common.utils.ThrowableUtil;
import cn.datax.service.data.market.api.entity.DataApiEntity;
import cn.datax.service.data.market.api.feign.DataApiServiceFeign;
import cn.datax.service.data.metadata.api.dto.DbSchema;
import cn.datax.service.data.metadata.api.dto.MetadataSourceDto;
import cn.datax.service.data.metadata.api.entity.MetadataAuthorizeEntity;
import cn.datax.service.data.metadata.api.entity.MetadataChangeRecordEntity;
import cn.datax.service.data.metadata.api.entity.MetadataColumnEntity;
import cn.datax.service.data.metadata.api.entity.MetadataSourceEntity;
import cn.datax.service.data.metadata.api.entity.MetadataTableEntity;
import cn.datax.service.data.metadata.api.entity.*;
import cn.datax.service.data.metadata.api.enums.DataLevel;
import cn.datax.service.data.metadata.api.enums.SyncStatus;
import cn.datax.service.data.metadata.async.AsyncTask;
import cn.datax.service.data.metadata.dao.MetadataAuthorizeDao;
import cn.datax.service.data.metadata.dao.MetadataChangeRecordDao;
import cn.datax.service.data.metadata.dao.MetadataColumnDao;
import cn.datax.service.data.metadata.dao.MetadataSourceDao;
import cn.datax.service.data.metadata.dao.MetadataTableDao;
import cn.datax.service.data.metadata.dao.*;
import cn.datax.service.data.metadata.mapstruct.MetadataSourceMapper;
import cn.datax.service.data.metadata.service.MetadataSourceService;
import cn.datax.service.data.metadata.service.MetadataTableService;
import cn.datax.service.data.quality.api.entity.CheckRuleEntity;
import cn.datax.service.data.quality.api.feign.QualityServiceFeign;
import cn.datax.service.data.standard.api.entity.ContrastEntity;
import cn.datax.service.data.standard.api.feign.StandardServiceFeign;
import cn.datax.service.data.visual.api.entity.DataSetEntity;
import cn.datax.service.data.visual.api.feign.VisualServiceFeign;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.system.SystemUtil;
import com.aspose.words.Document;
import com.aspose.words.MailMerge;
import com.aspose.words.net.System.Data.DataRelation;
@ -55,15 +52,16 @@ import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -121,6 +119,12 @@ public class MetadataSourceServiceImpl extends BaseServiceImpl<MetadataSourceDao
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private MetadataTableService metadataTableService;
@Autowired
private OfflineDataFileDao offlineDataFileDao;
@Override
@Transactional(rollbackFor = Exception.class)
public void saveMetadataSource(MetadataSourceDto metadataSourceDto) {
@ -395,4 +399,88 @@ public class MetadataSourceServiceImpl extends BaseServiceImpl<MetadataSourceDao
Map<String, List<MetadataColumnEntity>> columnListMap = columnEntityList.stream().collect(Collectors.groupingBy(MetadataColumnEntity::getTableId));
redisTemplate.opsForHash().putAll(columnKey, columnListMap);
}
@Override
public List<Map<String, Object>> getMetadatablood(String datasourceId, String tableId) {
Map<String, Map<String,Object>> columns = new LinkedHashMap();
Map<String,Object> reslutMap = new LinkedHashMap<>();
List<Map<String, Object>> rows = new ArrayList<>();
try {
MetadataSourceEntity dataSource = super.getById(datasourceId);
DbQuery dbQuery = this.getDbQuery(datasourceId);
Connection connection = dbQuery.getConnection();
Statement statement = connection.createStatement();
String sql = "SELECT TABLE_NAME,COLUMN_NAME,CONSTRAINT_NAME,REFERENCED_TABLE_NAME,REFERENCED_COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE WHERE TABLE_SCHEMA = '" + dataSource.getDbSchema().getDbName() + "' AND REFERENCED_TABLE_NAME IS NOT NULL";
if(StrUtil.isNotEmpty(tableId)){
MetadataTableEntity metadataTableEntity = metadataTableService.getMetadataTableById(tableId);
sql += " AND table_name = '" + metadataTableEntity.getTableName() + "'";
}
ResultSet resultSet = statement.executeQuery(sql);
ResultSetMetaData metaData = resultSet.getMetaData();
int columnCount = metaData.getColumnCount();
for (int i = 0; i < columnCount; i++) {
Map<String,Object> map = new HashMap<>();
map.put("name",metaData.getColumnName(i+1));
map.put("type",metaData.getColumnTypeName(i+1));
columns.put(metaData.getColumnName(i+1),map);
}
while(resultSet.next()){
Map<String, Object> row = new LinkedHashMap<>();
for (int i = 0; i < columnCount; i++) {
Object object = resultSet.getObject(metaData.getColumnLabel(i+1));
if (metaData.getColumnTypeName(i+1).equals("DATETIME") || metaData.getColumnTypeName(i+1).equals("TIMESTAMP") || metaData.getColumnTypeName(i+1).equals("DATE")) {
row.put(metaData.getColumnName(i+1),resultSet.getString(metaData.getColumnLabel(i+1)));
} else {
row.put(metaData.getColumnName(i+1),object);
}
}
rows.add(row);
}
resultSet.close();//关闭ResultSet对象
statement.close();//关闭Statement对象
connection.close();//关闭Connection对象
reslutMap.put("columns",columns);
reslutMap.put("rows",rows);
} catch (Exception throwAbles) {
}
return rows;
}
@Override
public OfflineDataFileEntity uploadFile(MultipartFile file, String type) {
OfflineDataFileEntity fileEntity = new OfflineDataFileEntity();
fileEntity.setContentType(file.getContentType())
.setOriginalFileName(file.getOriginalFilename())
.setFileSize(file.getSize());
String fileName = file.getOriginalFilename();
fileEntity.setFileName(fileName);
uploadLocalFile(file, fileEntity);
// 设置文件来源
fileEntity.setFileType("local");
fileEntity.setOfflineType(type);
fileEntity.setIsSwitch("djr");
// 将文件信息保存到数据库
offlineDataFileDao.insert(fileEntity);
return fileEntity;
}
protected void uploadLocalFile(MultipartFile file, OfflineDataFileEntity fileEntity) {
String localPath = System.getProperty(SystemUtil.USER_DIR) + File.separator + "home" + File.separator + "zoomlion" + File.separator + "uploadTemp";
File parentFile = new File(localPath);
if (!parentFile.exists()) {
if (!parentFile.mkdirs()) {
throw new RuntimeException("创建保存路径失败");
}
}
fileEntity.setFilePath(localPath + File.separator + fileEntity.getOriginalFileName());
File dest = new File(localPath + File.separator + fileEntity.getOriginalFileName());
try {
file.transferTo(dest);
} catch (IOException e) {
System.out.println("离线文件上传异常ex={}, StackTrace={}" + e.getMessage() + ThrowableUtil.getStackTrace(e));
}
}
}

View File

@ -0,0 +1,50 @@
package cn.datax.service.data.metadata.service.impl;
import cn.datax.common.base.BaseServiceImpl;
import cn.datax.common.utils.SecurityUtil;
import cn.datax.service.data.metadata.api.entity.MetadataSourceEntity;
import cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity;
import cn.datax.service.data.metadata.dao.OfflineDataFileDao;
import cn.datax.service.data.metadata.service.OfflineDataFileService;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.List;
/**
* <p>
* 数据源信息表 服务实现类
* </p>
*
* @author AllDataDC
* @date 2022-11-14
*/
@Service
@Transactional(propagation = Propagation.SUPPORTS, readOnly = true, rollbackFor = Exception.class)
public class OfflineDataFileServiceImpl extends BaseServiceImpl<OfflineDataFileDao, OfflineDataFileEntity> implements OfflineDataFileService {
@Autowired
private OfflineDataFileDao offlineDataFileDao;
@Override
public OfflineDataFileEntity getOfflineDataFileById(String id) {
OfflineDataFileEntity offlineDataFileEntity = super.getById(id);
return offlineDataFileEntity;
}
@Override
public <E extends IPage<OfflineDataFileEntity>> E pageWithAuth(E page, Wrapper<OfflineDataFileEntity> queryWrapper) {
boolean admin = SecurityUtil.isAdmin();
List<String> roles = new ArrayList<>();
if (!admin) {
roles = SecurityUtil.getUserRoleIds();
}
return offlineDataFileDao.selectPageWithAuth(page, queryWrapper, roles);
}
}

View File

@ -20,14 +20,14 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka
#jwt
jwt:
header: Authorization

View File

@ -0,0 +1,72 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="cn.datax.service.data.metadata.dao.OfflineDataFileDao">
<!-- 通用查询映射结果 -->
<resultMap id="BaseResultMap" type="cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity">
<result column="id" property="id" />
<result column="create_by" property="createBy" />
<result column="create_time" property="createTime" />
<result column="switch_time" property="switchTime" />
<result column="remark" property="remark" />
<result column="offline_type" property="offlineType" />
<result column="is_switch" property="isSwitch" />
<result column="original_file_name" property="originalFileName" />
<result column="file_name" property="fileName" />
<result column="file_size" property="fileSize" />
<result column="file_path" property="filePath" />
<result column="content_type" property="contentType" />
<result column="file_type" property="fileType" />
</resultMap>
<resultMap id="ExtendResultMap" type="cn.datax.service.data.metadata.api.entity.OfflineDataFileEntity" extends="BaseResultMap">
<result column="db_schema" property="dbSchema" typeHandler="com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler" />
</resultMap>
<!-- 通用查询结果列 -->
<sql id="Base_Column_List">
id,
create_by,
create_time,
switch_time,
remark,
offline_type,
is_switch,
original_file_name, file_name, file_size, file_path, content_type, file_type
</sql>
<sql id="Source_Column_List">
${alias}.id,
${alias}.create_by,
${alias}.create_time,
${alias}.switch_time,
${alias}.remark,
${alias}.offline_type,
${alias}.is_switch,
${alias}.original_file_name, ${alias}.file_name, ${alias}.file_size, ${alias}.file_path, ${alias}.content_type, ${alias}.file_type
</sql>
<select id="selectById" resultMap="ExtendResultMap">
SELECT
<include refid="Source_Column_List"><property name="alias" value="s"/></include>
FROM offline_data_file s
WHERE 1=1 AND s.id = #{id}
</select>
<select id="selectList" resultMap="BaseResultMap">
SELECT
<include refid="Base_Column_List"></include>
FROM offline_data_file
${ew.customSqlSegment}
</select>
<select id="selectPageWithAuth" resultMap="BaseResultMap">
SELECT
<include refid="Source_Column_List"><property name="alias" value="s"/></include>
FROM offline_data_file s
<trim prefix="WHERE" prefixOverrides="WHERE |AND |OR ">
${ew.customSqlSegment}
</trim>
</select>
</mapper>

View File

@ -9,6 +9,7 @@ import javax.validation.Valid;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
import java.io.Serializable;
import java.util.List;
/**
* <p>
@ -61,4 +62,8 @@ public class CheckRuleDto implements Serializable {
private String status;
@ApiModelProperty(value = "备注")
private String remark;
private List<String> transferValue;
private String qualityType;
}

View File

@ -0,0 +1,20 @@
package cn.datax.service.data.quality.api.dto;
import lombok.Data;
import java.io.Serializable;
/**
* @author ch
* @date 2023/12/25 17:18
*
* 正则校验
*/
@Data
public class Regular implements Serializable {
/**
* 正则表达
*/
private String regular;
}

View File

@ -32,4 +32,11 @@ public class RuleConfig implements Serializable {
* 准确性
*/
private Accuracy accuracy;
/**
* 正则表达
*/
private Regular regular;
}

View File

@ -61,6 +61,10 @@ public class CheckReportEntity implements Serializable {
*/
private String checkBatch;
private String checkReportType;
private int checkTimeConsuming;
/**
* 规则名称
*/

View File

@ -7,6 +7,7 @@ import lombok.Data;
import lombok.experimental.Accessors;
import java.io.Serializable;
import java.util.Date;
/**
* <p>
@ -34,6 +35,11 @@ public class ScheduleJobEntity implements Serializable {
*/
private String jobName;
/**
* 任务类型
*/
private String jobType;
/**
* bean名称
*/
@ -58,4 +64,11 @@ public class ScheduleJobEntity implements Serializable {
* 状态1运行 0暂停
*/
private String status;
/**
* 最后完成时间
*/
private Date afterDate;
private String remark;
}

View File

@ -7,6 +7,8 @@ public enum RuleItem {
Integrity("integrity_key", "验证表中必须出现的字段非空"),
Relevance("relevance_key", "验证关联性"),
Timeliness("timeliness_key", "验证及时性"),
Regular("regular_key","正则表达式"),
Consistent("consistent_key", "验证用户指定的字段枚举值是否合乎要求");
private final String code;

View File

@ -23,4 +23,6 @@ public class CheckRuleQuery extends BaseQueryParams {
private String ruleSource;
private String ruleTable;
private String ruleColumn;
private String ruleType;
}

View File

@ -25,6 +25,8 @@ public class CheckReportVo implements Serializable {
private LocalDateTime checkDate;
private String checkResult;
private Integer checkTotalCount;
private String checkReportType;
private int checkTimeConsuming;
private Integer checkErrorCount;
private String ruleName;
private String ruleType;

View File

@ -3,6 +3,7 @@ package cn.datax.service.data.quality.api.vo;
import lombok.Data;
import java.io.Serializable;
import java.util.Date;
/**
* <p>
@ -20,8 +21,10 @@ public class ScheduleJobVo implements Serializable {
private String id;
private String status;
private String jobName;
private String jobType;
private String beanName;
private String methodName;
private String methodParams;
private String cronExpression;
private Date afterDate; // 最后完成时间
}

View File

@ -84,6 +84,11 @@
<artifactId>data-standard-service-api</artifactId>
<version>0.4.x</version>
</dependency>
<dependency>
<groupId>com.platform</groupId>
<artifactId>data-metadata-service</artifactId>
<version>0.4.x</version>
</dependency>
</dependencies>
<build>

View File

@ -47,7 +47,7 @@ public class StartedUpRunner implements ApplicationRunner {
List<ScheduleJobEntity> list = scheduleJobService.list(Wrappers.<ScheduleJobEntity>lambdaQuery().eq(ScheduleJobEntity::getStatus, DataConstant.TrueOrFalse.TRUE.getKey()));
if (CollUtil.isNotEmpty(list)) {
list.forEach(job -> {
SchedulingRunnable task = new SchedulingRunnable(job.getId(), job.getBeanName(), job.getMethodName(), job.getMethodParams());
SchedulingRunnable task = new SchedulingRunnable(job.getId(), job.getBeanName(), job.getMethodName(), job.getMethodParams(), job.getJobType());
cronTaskRegistrar.addCronTask(task, job.getCronExpression());
});
}

View File

@ -78,8 +78,6 @@ public class CheckReportController extends BaseController {
queryWrapper.like(StrUtil.isNotBlank(checkReportQuery.getRuleSource()), "r.rule_source", checkReportQuery.getRuleSource());
queryWrapper.like(StrUtil.isNotBlank(checkReportQuery.getRuleTable()), "r.rule_table", checkReportQuery.getRuleTable());
queryWrapper.like(StrUtil.isNotBlank(checkReportQuery.getRuleColumn()), "r.rule_column", checkReportQuery.getRuleColumn());
// 确定唯一核查报告
queryWrapper.apply("c.check_batch = r.last_check_batch");
IPage<CheckReportEntity> page = checkReportService.page(new Page<>(checkReportQuery.getPageNum(), checkReportQuery.getPageSize()), queryWrapper);
List<CheckReportVo> collect = page.getRecords().stream().map(checkReportMapper::toVO).collect(Collectors.toList());
JsonPage<CheckReportVo> jsonPage = new JsonPage<>(page.getCurrent(), page.getSize(), page.getTotal(), collect);

View File

@ -85,6 +85,16 @@ public class CheckRuleController extends BaseController {
queryWrapper.like(StrUtil.isNotBlank(checkRuleQuery.getRuleSource()), "r.rule_source", checkRuleQuery.getRuleSource());
queryWrapper.like(StrUtil.isNotBlank(checkRuleQuery.getRuleTable()), "r.rule_table", checkRuleQuery.getRuleTable());
queryWrapper.like(StrUtil.isNotBlank(checkRuleQuery.getRuleColumn()), "r.rule_column", checkRuleQuery.getRuleColumn());
//结构符合校验过滤
if("jg".equals(checkRuleQuery.getRuleType())){
queryWrapper.in("t.code","table_jc","length_jc","null_jc","pk_jc","fk_jc");
} else if ("gl".equals(checkRuleQuery.getRuleType())){
// 关联性
queryWrapper.in("t.code","relevance");
} else {
queryWrapper.in("t.code","unique","integrity","accuracy","consistent","regular","timeliness");
}
// queryWrapper.in("t.code","")
IPage<CheckRuleEntity> page = checkRuleService.page(new Page<>(checkRuleQuery.getPageNum(), checkRuleQuery.getPageSize()), queryWrapper);
List<CheckRuleVo> collect = page.getRecords().stream().map(checkRuleMapper::toVO).collect(Collectors.toList());
JsonPage<CheckRuleVo> jsonPage = new JsonPage<>(page.getCurrent(), page.getSize(), page.getTotal(), collect);
@ -104,6 +114,7 @@ public class CheckRuleController extends BaseController {
return R.ok().setData(checkRuleMapper.toVO(checkRuleEntity));
}
/**
* 修改
* @param checkRule
@ -145,4 +156,15 @@ public class CheckRuleController extends BaseController {
checkRuleService.deleteCheckRuleBatch(ids);
return R.ok();
}
@GetMapping("/listSourceIdBytable/{id}/ruleId/{ruleId}")
public R listSourceIdBytable(@PathVariable String id,@PathVariable String ruleId) {
QueryWrapper<CheckRuleEntity> queryWrapper = new QueryWrapper<>();
queryWrapper.eq("rule_source_id",id);
queryWrapper.eq("rule_item_id",ruleId);
List<CheckRuleEntity> checkRuleList = checkRuleService.list(queryWrapper);
return R.ok().setData(checkRuleMapper.toVO(checkRuleList));
}
}

View File

@ -1,10 +1,12 @@
package cn.datax.service.data.quality.controller;
import cn.datax.common.base.BaseController;
import cn.datax.common.core.JsonPage;
import cn.datax.common.core.R;
import cn.datax.common.validate.ValidationGroups;
import cn.datax.service.data.quality.api.entity.ScheduleJobEntity;
import cn.datax.service.data.quality.api.vo.ScheduleJobVo;
import cn.datax.service.data.quality.api.query.ScheduleJobQuery;
import cn.datax.service.data.quality.api.vo.ScheduleJobVo;
import cn.datax.service.data.quality.mapstruct.ScheduleJobMapper;
import cn.datax.service.data.quality.service.ScheduleJobService;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
@ -15,10 +17,10 @@ import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import cn.datax.common.base.BaseController;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;
@ -74,6 +76,61 @@ public class ScheduleJobController extends BaseController {
return R.ok().setData(jsonPage);
}
/**
* 添加
* @param scheduleJob
* @return
*/
@ApiOperation(value = "添加信息", notes = "根据checkRule对象添加信息")
@ApiImplicitParam(name = "scheduleJob", value = "详细实体scheduleJob", required = true, dataType = "scheduleJob")
@PostMapping()
public R saveScheduleJob(@RequestBody @Validated({ValidationGroups.Insert.class}) ScheduleJobEntity scheduleJob) {
ScheduleJobEntity scheduleJobEntity = scheduleJobService.saveScheduleJob(scheduleJob);
return R.ok().setData(scheduleJobEntity);
}
/**
* 修改
* @param scheduleJob
* @return
*/
@ApiOperation(value = "修改信息", notes = "根据url的id来指定修改对象并根据传过来的信息来修改详细信息")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "ID", required = true, dataType = "String", paramType = "path"),
@ApiImplicitParam(name = "scheduleJob", value = "详细实体scheduleJob", required = true, dataType = "scheduleJob")
})
@PutMapping("/{id}")
public R updateScheduleJob(@PathVariable String id, @RequestBody @Validated({ValidationGroups.Update.class}) ScheduleJobEntity scheduleJob) {
ScheduleJobEntity scheduleJobEntity = scheduleJobService.updateScheduleJob(scheduleJob);
return R.ok().setData(scheduleJobEntity);
}
/**
* 删除
* @param id
* @return
*/
@ApiOperation(value = "删除", notes = "根据url的id来指定删除对象")
@ApiImplicitParam(name = "id", value = "ID", required = true, dataType = "String", paramType = "path")
@DeleteMapping("/{id}")
public R deleteScheduleJobById(@PathVariable String id) {
scheduleJobService.deleteScheduleJobById(id);
return R.ok();
}
/**
* 批量删除
* @param ids
* @return
*/
@ApiOperation(value = "批量删除", notes = "根据url的ids来批量删除对象")
@ApiImplicitParam(name = "ids", value = "ID集合", required = true, dataType = "List", paramType = "path")
@DeleteMapping("/batch/{ids}")
public R deleteCheckRuleBatch(@PathVariable List<String> ids) {
scheduleJobService.deleteScheduleJobBatch(ids);
return R.ok();
}
/**
* 暂停任务
* @param id
@ -110,6 +167,11 @@ public class ScheduleJobController extends BaseController {
@PostMapping("/run/{id}")
public R runScheduleJobById(@PathVariable("id") String id) {
scheduleJobService.runScheduleJobById(id);
QueryWrapper<ScheduleJobEntity> queryWrapper = new QueryWrapper<>();
queryWrapper.eq("id", id);
ScheduleJobEntity updatedEntity = new ScheduleJobEntity();
updatedEntity.setAfterDate(new Date());
scheduleJobService.update(updatedEntity,queryWrapper);
return R.ok();
}
}

View File

@ -25,11 +25,14 @@ public class SchedulingRunnable implements Runnable {
private String params;
public SchedulingRunnable(String id, String beanName, String methodName, String params) {
private String jobType;
public SchedulingRunnable(String id, String beanName, String methodName, String params, String jobType) {
this.id = id;
this.beanName = beanName;
this.methodName = methodName;
this.params = params;
this.jobType = jobType;
}
@Override
@ -47,6 +50,7 @@ public class SchedulingRunnable implements Runnable {
}
batch = DateUtil.format(LocalDateTime.now(), DatePattern.PURE_DATETIME_PATTERN);
map.put("batch", batch);
map.put("jobType", jobType);
ReflectionUtils.makeAccessible(method);
method.invoke(target, map);
} catch (Exception ex) {

View File

@ -0,0 +1,39 @@
package cn.datax.service.data.quality.schedule.rules;
import cn.datax.common.database.constants.DbType;
import java.util.Map;
/**
* @author ch
* @date 2023/12/25 18:14
*/
public class RegularRule implements RuleItem{
private static String REGULAR = "regular";
@Override
public String parse(DbType dbType, String table, String column, Map<String, Object> map) {
final StringBuilder builder = new StringBuilder();
switch (dbType) {
case ORACLE:
case ORACLE_12C:
case MYSQL:
// mysql 执行正则
builder.append("SELECT SUM(CASE WHEN ").append(column).append(" REGEXP ").append("'"+map.get(REGULAR)+"'").append(" != '' THEN 0 ELSE 1 END),").append(" COUNT(*) FROM ").append(table);
break;
case MARIADB:
case SQL_SERVER:
case SQL_SERVER2008:
case POSTGRE_SQL:
case OTHER:
default:
break;
}
return builder.toString();
}
@Override
public String code() {
return "timeliness_key";
}
}

View File

@ -1,5 +1,7 @@
package cn.datax.service.data.quality.schedule.rules;
import cn.datax.service.data.quality.api.dto.Regular;
import java.util.HashMap;
import java.util.Map;
@ -13,6 +15,7 @@ public class RuleItemRegistry {
this.rule_item_map.put("integrity_key", new IntegrityRule());
this.rule_item_map.put("relevance_key", new RelevanceRule());
this.rule_item_map.put("timeliness_key", new TimelinessRule());
this.rule_item_map.put("regular_key", new RegularRule());
this.rule_item_map.put("accuracy_key_length", new AccuracyLengthRule());
}

View File

@ -65,6 +65,8 @@ public class QualityTask {
tasks.add(task);
});
List<Future<CheckReportEntity>> futures;
long checkConsumeTime = 0L;
long startTime = System.currentTimeMillis();
try {
futures = threadPoolExecutor.invokeAll(tasks);
// 处理线程返回结果
@ -76,13 +78,18 @@ public class QualityTask {
} catch (Exception e) {
e.printStackTrace();
}
checkConsumeTime = System.currentTimeMillis() - startTime;
// 关闭线程池
threadPoolExecutor.shutdown();
// 核查报告
long finalCheckConsumeTime = checkConsumeTime;
result.forEach(s -> {
// 插入核查结果正常的数据
String status = StrUtil.isBlank(s.getCheckResult()) ? DataConstant.TrueOrFalse.TRUE.getKey() : DataConstant.TrueOrFalse.FALSE.getKey();
if (StrUtil.isBlank(s.getCheckResult())) {
s.setCheckTimeConsuming((int) finalCheckConsumeTime);
s.setCheckResult(DataConstant.TrueOrFalse.TRUE.getKey());
s.setCheckReportType((String) map.get("jobType"));
s.setCheckBatch((String) map.get("batch"));
checkReportService.save(s);
// 更新最近核查批次号

View File

@ -1,8 +1,12 @@
package cn.datax.service.data.quality.service;
import cn.datax.service.data.quality.api.dto.CheckRuleDto;
import cn.datax.service.data.quality.api.entity.CheckRuleEntity;
import cn.datax.service.data.quality.api.entity.ScheduleJobEntity;
import cn.datax.common.base.BaseService;
import java.util.List;
/**
* <p>
* 数据质量监控任务信息表 服务类
@ -15,6 +19,14 @@ public interface ScheduleJobService extends BaseService<ScheduleJobEntity> {
ScheduleJobEntity getScheduleJobById(String id);
ScheduleJobEntity saveScheduleJob(ScheduleJobEntity scheduleJob);
ScheduleJobEntity updateScheduleJob(ScheduleJobEntity scheduleJob);
void deleteScheduleJobById(String id);
void deleteScheduleJobBatch(List<String> ids);
void pauseScheduleJobById(String id);
void resumeScheduleJobById(String id);

View File

@ -4,12 +4,8 @@ import cn.datax.common.base.BaseServiceImpl;
import cn.datax.common.core.RedisConstant;
import cn.datax.common.database.constants.DbType;
import cn.datax.common.redis.service.RedisService;
import cn.datax.service.data.quality.api.dto.Accuracy;
import cn.datax.service.data.quality.api.dto.CheckRuleDto;
import cn.datax.service.data.quality.api.dto.Consistent;
import cn.datax.service.data.quality.api.dto.Relevance;
import cn.datax.service.data.quality.api.dto.RuleConfig;
import cn.datax.service.data.quality.api.dto.Timeliness;
import cn.datax.service.data.metadata.api.entity.MetadataTableEntity;
import cn.datax.service.data.quality.api.dto.*;
import cn.datax.service.data.quality.api.entity.CheckRuleEntity;
import cn.datax.service.data.quality.api.enums.RuleItem;
import cn.datax.service.data.quality.dao.CheckRuleDao;
@ -49,25 +45,49 @@ public class CheckRuleServiceImpl extends BaseServiceImpl<CheckRuleDao, CheckRul
@Autowired
private RedisService redisService;
/*@Autowired
private MetadataTableDao metadataTableDao;*/
private static String BIND_GB_CODE = "gb_code";
private static String BIND_GB_NAME = "gb_name";
@Override
@Transactional(rollbackFor = Exception.class)
public CheckRuleEntity saveCheckRule(CheckRuleDto checkRuleDto) {
// 结构性检测
if("jg".equals(checkRuleDto.getQualityType())){
CheckRuleEntity checkRule = null;
for (int i = 0; i < checkRuleDto.getTransferValue().size(); i++) {
String currentString = checkRuleDto.getTransferValue().get(i);
QueryWrapper<MetadataTableEntity> queryWrapper = new QueryWrapper<>();
queryWrapper.eq("source_id", checkRuleDto.getRuleSourceId());
queryWrapper.eq("table_name",currentString);
//MetadataTableEntity metadataTableEntity = metadataTableDao.selectOne(queryWrapper);
checkRule = checkRuleMapper.toEntity(checkRuleDto);
checkRule.setRuleTable(currentString);
//checkRule.setRuleTableId(metadataTableEntity.getId());
checkRuleDao.insert(checkRule);
}
return checkRule;
}else {
CheckRuleEntity checkRule = checkRuleMapper.toEntity(checkRuleDto);
String sql = parseSql(checkRule);
checkRule.setRuleSql(sql);
checkRuleDao.insert(checkRule);
return checkRule;
}
}
@Override
@Transactional(rollbackFor = Exception.class)
public CheckRuleEntity updateCheckRule(CheckRuleDto checkRuleDto) {
CheckRuleEntity checkRule = checkRuleMapper.toEntity(checkRuleDto);
if(!"jg".equals(checkRuleDto.getQualityType())){
String sql = parseSql(checkRule);
checkRule.setRuleSql(sql);
}
checkRuleDao.updateById(checkRule);
return checkRule;
}
@ -95,6 +115,7 @@ public class CheckRuleServiceImpl extends BaseServiceImpl<CheckRuleDao, CheckRul
return checkRuleDao.selectOne(new QueryWrapper<CheckRuleEntity>().eq("rule_source_id", sourceId).last("limit 1"));
}
private String parseSql(CheckRuleEntity checkRule) {
RuleConfig ruleConfig = checkRule.getRuleConfig();
Map<String, Object> map = new HashMap<>();
@ -132,6 +153,11 @@ public class CheckRuleServiceImpl extends BaseServiceImpl<CheckRuleDao, CheckRul
Accuracy accuracy = ruleConfig.getAccuracy();
map.put("max_length", accuracy.getMaxLength());
break;
// 正则表达
case Regular:
Regular regular = ruleConfig.getRegular();
map.put("regular", regular.getRegular());
break;
default:
return null;
}

View File

@ -1,19 +1,22 @@
package cn.datax.service.data.quality.service.impl;
import cn.datax.common.base.BaseServiceImpl;
import cn.datax.common.core.DataConstant;
import cn.datax.service.data.quality.api.entity.CheckRuleEntity;
import cn.datax.service.data.quality.api.entity.ScheduleJobEntity;
import cn.datax.service.data.quality.dao.ScheduleJobDao;
import cn.datax.service.data.quality.mapstruct.ScheduleJobMapper;
import cn.datax.service.data.quality.schedule.CronTaskRegistrar;
import cn.datax.service.data.quality.schedule.SchedulingRunnable;
import cn.datax.service.data.quality.service.ScheduleJobService;
import cn.datax.service.data.quality.mapstruct.ScheduleJobMapper;
import cn.datax.service.data.quality.dao.ScheduleJobDao;
import cn.datax.common.base.BaseServiceImpl;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.util.List;
/**
* <p>
* 数据质量监控任务信息表 服务实现类
@ -29,6 +32,9 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJobDao, Sche
@Autowired
private ScheduleJobDao scheduleJobDao;
@Autowired
private ScheduleJobMapper scheduleJobMapper;
@Autowired
private CronTaskRegistrar cronTaskRegistrar;
@ -38,10 +44,36 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJobDao, Sche
return scheduleJobEntity;
}
@Override
@Transactional(rollbackFor = Exception.class)
public ScheduleJobEntity saveScheduleJob(ScheduleJobEntity scheduleJob) {
scheduleJobDao.insert(scheduleJob);
return scheduleJob;
}
@Override
@Transactional(rollbackFor = Exception.class)
public ScheduleJobEntity updateScheduleJob(ScheduleJobEntity scheduleJob) {
scheduleJobDao.updateById(scheduleJob);
return scheduleJob;
}
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteScheduleJobById(String id) {
scheduleJobDao.deleteById(id);
}
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteScheduleJobBatch(List<String> ids) {
scheduleJobDao.deleteBatchIds(ids);
}
@Override
public void pauseScheduleJobById(String id) {
ScheduleJobEntity scheduleJobEntity = super.getById(id);
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams());
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams(), scheduleJobEntity.getJobType());
cronTaskRegistrar.removeCronTask(task);
scheduleJobEntity.setStatus(DataConstant.TrueOrFalse.FALSE.getKey());
scheduleJobDao.updateById(scheduleJobEntity);
@ -50,7 +82,7 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJobDao, Sche
@Override
public void resumeScheduleJobById(String id) {
ScheduleJobEntity scheduleJobEntity = super.getById(id);
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams());
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams(), scheduleJobEntity.getJobType());
cronTaskRegistrar.addCronTask(task, scheduleJobEntity.getCronExpression());
scheduleJobEntity.setStatus(DataConstant.TrueOrFalse.TRUE.getKey());
scheduleJobDao.updateById(scheduleJobEntity);
@ -60,7 +92,7 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJobDao, Sche
@Async("taskExecutor")
public void runScheduleJobById(String id) {
ScheduleJobEntity scheduleJobEntity = super.getById(id);
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams());
SchedulingRunnable task = new SchedulingRunnable(id, scheduleJobEntity.getBeanName(), scheduleJobEntity.getMethodName(), scheduleJobEntity.getMethodParams(), scheduleJobEntity.getJobType());
task.run();
}
}

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -11,6 +11,7 @@
<result column="check_total_count" property="checkTotalCount" />
<result column="check_error_count" property="checkErrorCount" />
<result column="check_batch" property="checkBatch" />
<result column="check_report_type" property="checkReportType" />
</resultMap>
<resultMap id="ExtendResultMap" type="cn.datax.service.data.quality.api.entity.CheckReportEntity" extends="BaseResultMap">
@ -24,12 +25,12 @@
<!-- 通用查询结果列 -->
<sql id="Base_Column_List">
id,
check_rule_id, check_date, check_result, check_total_count, check_error_count, check_batch
check_rule_id, check_date, check_result, check_total_count, check_error_count, check_batch, check_report_type, check_time_consuming
</sql>
<sql id="Report_Column_List">
${alias}.id,
${alias}.check_rule_id, ${alias}.check_date, ${alias}.check_result, ${alias}.check_total_count, ${alias}.check_error_count, ${alias}.check_batch
${alias}.check_rule_id, ${alias}.check_date, ${alias}.check_result, ${alias}.check_total_count, ${alias}.check_error_count, ${alias}.check_batch, ${alias}.check_report_type, ${alias}.check_time_consuming
</sql>
<select id="selectPage" resultMap="ExtendResultMap">
@ -51,6 +52,8 @@
<result column="rule_level_id" property="ruleLevelId" />
<result column="rule_level_name" property="ruleLevelName" />
<result column="check_error_count" property="checkErrorCount" />
<result column="check_report_type" property="checkReportType" />
<result column="check_time_consuming" property="checkTimeConsuming" />
</resultMap>
<resultMap id="ExtendReportResultMap" type="cn.datax.service.data.quality.api.entity.DataReportEntity" extends="ReportResultMap">
@ -60,6 +63,8 @@
<result column="rule_column_name" property="ruleColumnName" />
<result column="rule_column_comment" property="ruleColumnComment" />
<result column="check_total_count" property="checkTotalCount" />
<result column="check_report_type" property="checkReportType" />
<result column="check_time_consuming" property="checkTimeConsuming" />
</resultMap>
<select id="getReportBySource" resultMap="ReportResultMap">

View File

@ -7,17 +7,20 @@
<result column="id" property="id" />
<result column="status" property="status" />
<result column="job_name" property="jobName" />
<result column="job_type" property="jobType" />
<result column="bean_name" property="beanName" />
<result column="method_name" property="methodName" />
<result column="method_params" property="methodParams" />
<result column="cron_expression" property="cronExpression" />
<result column="after_date" property="afterDate" />
<result column="remark" property="remark" />
</resultMap>
<!-- 通用查询结果列 -->
<sql id="Base_Column_List">
id,
status,
job_name, bean_name, method_name, method_params, cron_expression
job_name, job_type, bean_name, method_name, method_params, cron_expression, after_date, remark
</sql>
</mapper>

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -22,11 +22,11 @@ eureka:
# 设置使用IP
prefer-ip-address: true
# 设置外网IP号
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka

View File

@ -63,7 +63,7 @@ public class AuthorizationController {
public ResponseEntity<Object> login(@Validated @RequestBody AuthUserDto authUser, HttpServletRequest request) throws Exception {
// 密码解密
String password = RsaUtils.decryptByPrivateKey(RsaProperties.privateKey, authUser.getPassword());
// 查询验证码
/* // 查询验证码
String code = (String) redisUtils.get(authUser.getUuid());
// 清除验证码
redisUtils.del(authUser.getUuid());
@ -72,7 +72,7 @@ public class AuthorizationController {
}
if (StringUtils.isBlank(authUser.getCode()) || !authUser.getCode().equalsIgnoreCase(code)) {
throw new BadRequestException("验证码错误");
}
}*/
UsernamePasswordAuthenticationToken authenticationToken =
new UsernamePasswordAuthenticationToken(authUser.getUsername(), password);
Authentication authentication = authenticationManagerBuilder.getObject().authenticate(authenticationToken);

View File

@ -46,11 +46,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka/
defaultZone: http://192.168.1.217:8610/eureka/

View File

@ -20,11 +20,11 @@ eureka:
instance:
lease-renewal-interval-in-seconds: 20
prefer-ip-address: true
ip-address: 192.168.1.169
ip-address: 192.168.1.217
client:
register-with-eureka: true
fetch-registry: true
instance-info-replication-interval-seconds: 30
registry-fetch-interval-seconds: 3
service-url:
defaultZone: http://192.168.1.169:8610/eureka
defaultZone: http://192.168.1.217:8610/eureka