docs 优化dubbo注释

2.X
AprilWind 8 months ago
parent 075a41cdb0
commit e09bbc249f

@ -49,6 +49,9 @@ import static org.apache.dubbo.common.constants.LoggerCodeConstants.PROXY_FAILED
import static org.apache.dubbo.common.utils.StringUtils.replace;
import static org.apache.dubbo.metadata.report.support.Constants.*;
/**
*
*/
public abstract class AbstractMetadataReport implements MetadataReport {
protected static final String DEFAULT_ROOT = "dubbo";
@ -58,18 +61,17 @@ public abstract class AbstractMetadataReport implements MetadataReport {
// Log output
protected final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
// Local disk cache, where the special key value.registries records the list of metadata centers, and the others are
// the list of notified service providers
// 本地磁盘缓存,特定键值 registries 记录元数据中心列表,其他是通知的服务提供者列表
final Properties properties = new Properties();
private final ExecutorService reportCacheExecutor =
Executors.newFixedThreadPool(1, new NamedThreadFactory("DubboSaveMetadataReport", true));
Executors.newFixedThreadPool(1, new NamedThreadFactory("DubboSaveMetadataReport", true));
final Map<MetadataIdentifier, Object> allMetadataReports = new ConcurrentHashMap<>(4);
private final AtomicLong lastCacheChanged = new AtomicLong();
final Map<MetadataIdentifier, Object> failedReports = new ConcurrentHashMap<>(4);
private URL reportURL;
boolean syncReport;
// Local disk cache file
// 本地磁盘缓存文件
File file;
private AtomicBoolean initialized = new AtomicBoolean(false);
public MetadataReportRetry metadataReportRetry;
@ -79,29 +81,30 @@ public abstract class AbstractMetadataReport implements MetadataReport {
private final boolean reportDefinition;
protected ApplicationModel applicationModel;
/**
*
*
* @param reportServerURL URL
*/
public AbstractMetadataReport(URL reportServerURL) {
setUrl(reportServerURL);
applicationModel = reportServerURL.getOrDefaultApplicationModel();
boolean localCacheEnabled = reportServerURL.getParameter(REGISTRY_LOCAL_FILE_CACHE_ENABLED, true);
// Start file save timer
// 启动文件保存定时器
String defaultFilename = System.getProperty("user.home") + DUBBO_METADATA
+ reportServerURL.getApplication()
+ "-" + replace(reportServerURL.getAddress(), ":", "-")
+ CACHE;
+ reportServerURL.getApplication() + "-" + replace(reportServerURL.getAddress(), ":", "-") + CACHE;
String filename = reportServerURL.getParameter(FILE_KEY, defaultFilename);
File file = null;
if (localCacheEnabled && ConfigUtils.isNotEmpty(filename)) {
file = new File(filename);
if (!file.exists()
&& file.getParentFile() != null
&& !file.getParentFile().exists()) {
if (!file.exists() && file.getParentFile() != null && !file.getParentFile().exists()) {
if (!file.getParentFile().mkdirs()) {
throw new IllegalArgumentException("Invalid service store file " + file
+ ", cause: Failed to create directory " + file.getParentFile() + "!");
+ ", cause: Failed to create directory " + file.getParentFile() + "!");
}
}
// if this file exists, firstly delete it.
// 如果文件存在,首先删除它
if (!initialized.getAndSet(true) && file.exists()) {
file.delete();
}
@ -110,24 +113,33 @@ public abstract class AbstractMetadataReport implements MetadataReport {
loadProperties();
syncReport = reportServerURL.getParameter(SYNC_REPORT_KEY, false);
metadataReportRetry = new MetadataReportRetry(
reportServerURL.getParameter(RETRY_TIMES_KEY, DEFAULT_METADATA_REPORT_RETRY_TIMES),
reportServerURL.getParameter(RETRY_PERIOD_KEY, DEFAULT_METADATA_REPORT_RETRY_PERIOD));
// cycle report the data switch
reportServerURL.getParameter(RETRY_TIMES_KEY, DEFAULT_METADATA_REPORT_RETRY_TIMES),
reportServerURL.getParameter(RETRY_PERIOD_KEY, DEFAULT_METADATA_REPORT_RETRY_PERIOD));
// 循环上报数据开关
if (reportServerURL.getParameter(CYCLE_REPORT_KEY, DEFAULT_METADATA_REPORT_CYCLE_REPORT)) {
reportTimerScheduler = Executors.newSingleThreadScheduledExecutor(
new NamedThreadFactory("DubboMetadataReportTimer", true));
reportTimerScheduler.scheduleAtFixedRate(
this::publishAll, calculateStartTime(), ONE_DAY_IN_MILLISECONDS, TimeUnit.MILLISECONDS);
reportTimerScheduler = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("DubboMetadataReportTimer", true));
reportTimerScheduler.scheduleAtFixedRate(this::publishAll, calculateStartTime(), ONE_DAY_IN_MILLISECONDS, TimeUnit.MILLISECONDS);
}
this.reportMetadata = reportServerURL.getParameter(REPORT_METADATA_KEY, false);
this.reportDefinition = reportServerURL.getParameter(REPORT_DEFINITION_KEY, true);
}
/**
* URL
*
* @return URL
*/
public URL getUrl() {
return reportURL;
}
/**
* URL
*
* @param url URL
*/
protected void setUrl(URL url) {
if (url == null) {
throw new IllegalArgumentException("metadataReport url == null");
@ -135,6 +147,11 @@ public abstract class AbstractMetadataReport implements MetadataReport {
this.reportURL = url;
}
/**
*
*
* @param version
*/
private void doSaveProperties(long version) {
if (version < lastCacheChanged.get()) {
return;
@ -142,21 +159,21 @@ public abstract class AbstractMetadataReport implements MetadataReport {
if (file == null) {
return;
}
// Save
// 保存操作
try {
File lockfile = new File(file.getAbsolutePath() + ".lock");
if (!lockfile.exists()) {
lockfile.createNewFile();
}
try (RandomAccessFile raf = new RandomAccessFile(lockfile, "rw");
FileChannel channel = raf.getChannel()) {
FileChannel channel = raf.getChannel()) {
FileLock lock = channel.tryLock();
if (lock == null) {
throw new IOException(
"Can not lock the metadataReport cache file " + file.getAbsolutePath()
+ ", ignore and retry later, maybe multi java process use the file, please config: dubbo.metadata.file=xxx.properties");
"Can not lock the metadataReport cache file " + file.getAbsolutePath()
+ ", ignore and retry later, maybe multi java process use the file, please config: dubbo.metadata.file=xxx.properties");
}
// Save
// 保存
try {
if (!file.exists()) {
file.createNewFile();
@ -164,12 +181,10 @@ public abstract class AbstractMetadataReport implements MetadataReport {
Properties tmpProperties;
if (!syncReport) {
// When syncReport = false, properties.setProperty and properties.store are called from the same
// thread(reportCacheExecutor), so deep copy is not required
// 当 syncReport = false 时,从同一个线程(reportCacheExecutor)中调用 properties.setProperty 和 properties.store因此不需要深度复制
tmpProperties = properties;
} else {
// Using store method and setProperty method of the this.properties will cause lock contention
// under multi-threading, so deep copy a new container
// 使用 store 方法和 this.properties 的 setProperty 方法会在多线程环境下引起锁竞争,因此需要深度复制一个新的容器
tmpProperties = new Properties();
Set<Map.Entry<Object, Object>> entries = properties.entrySet();
for (Map.Entry<Object, Object> entry : entries) {
@ -190,15 +205,14 @@ public abstract class AbstractMetadataReport implements MetadataReport {
} else {
reportCacheExecutor.execute(new SaveProperties(lastCacheChanged.incrementAndGet()));
}
logger.warn(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Failed to save service store file, cause: " + e.getMessage(),
e);
logger.warn(COMMON_UNEXPECTED_EXCEPTION, "", "",
"Failed to save service store file, cause: " + e.getMessage(), e);
}
}
/**
*
*/
void loadProperties() {
if (file != null && file.exists()) {
try (InputStream in = new FileInputStream(file)) {
@ -212,6 +226,14 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @param value
* @param add true
* @param sync true
*/
private void saveProperties(MetadataIdentifier metadataIdentifier, String value, boolean add, boolean sync) {
if (file == null) {
return;
@ -219,14 +241,19 @@ public abstract class AbstractMetadataReport implements MetadataReport {
try {
if (add) {
// 添加元数据信息到 properties 中
properties.setProperty(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), value);
} else {
// 移除指定的元数据信息
properties.remove(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
}
// 更新缓存变化版本号
long version = lastCacheChanged.incrementAndGet();
if (sync) {
// 同步保存属性到文件
new SaveProperties(version).run();
} else {
// 异步执行保存属性到文件任务
reportCacheExecutor.execute(new SaveProperties(version));
}
@ -240,6 +267,9 @@ public abstract class AbstractMetadataReport implements MetadataReport {
return getUrl().toString();
}
/**
* `Runnable`
*/
private class SaveProperties implements Runnable {
private long version;
@ -253,9 +283,14 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
/**
*
*
* @param providerMetadataIdentifier
* @param serviceDefinition
*/
@Override
public void storeProviderMetadata(
MetadataIdentifier providerMetadataIdentifier, ServiceDefinition serviceDefinition) {
public void storeProviderMetadata(MetadataIdentifier providerMetadataIdentifier, ServiceDefinition serviceDefinition) {
if (syncReport) {
storeProviderMetadataTask(providerMetadataIdentifier, serviceDefinition);
} else {
@ -263,95 +298,135 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
private void storeProviderMetadataTask(
MetadataIdentifier providerMetadataIdentifier, ServiceDefinition serviceDefinition) {
/**
*
*
* @param providerMetadataIdentifier
* @param serviceDefinition
*/
private void storeProviderMetadataTask(MetadataIdentifier providerMetadataIdentifier, ServiceDefinition serviceDefinition) {
// 将元数据事件转换为服务订阅事件
MetadataEvent metadataEvent = MetadataEvent.toServiceSubscribeEvent(applicationModel, providerMetadataIdentifier.getUniqueServiceName());
MetadataEvent metadataEvent = MetadataEvent.toServiceSubscribeEvent(
applicationModel, providerMetadataIdentifier.getUniqueServiceName());
// 发布元数据事件到指标事件总线,执行回调任务
MetricsEventBus.post(
metadataEvent,
() -> {
boolean result = true;
try {
if (logger.isInfoEnabled()) {
logger.info("store provider metadata. Identifier : " + providerMetadataIdentifier
+ "; definition: " + serviceDefinition);
}
allMetadataReports.put(providerMetadataIdentifier, serviceDefinition);
failedReports.remove(providerMetadataIdentifier);
String data = JsonUtils.toJson(serviceDefinition);
doStoreProviderMetadata(providerMetadataIdentifier, data);
saveProperties(providerMetadataIdentifier, data, true, !syncReport);
} catch (Exception e) {
// retry again. If failed again, throw exception.
failedReports.put(providerMetadataIdentifier, serviceDefinition);
metadataReportRetry.startRetryTask();
logger.error(
PROXY_FAILED_EXPORT_SERVICE,
"",
"",
"Failed to put provider metadata " + providerMetadataIdentifier + " in "
+ serviceDefinition + ", cause: " + e.getMessage(),
e);
result = false;
metadataEvent,
() -> {
boolean result = true;
try {
// 记录日志:存储服务提供者元数据
if (logger.isInfoEnabled()) {
logger.info("store provider metadata. Identifier : " + providerMetadataIdentifier
+ "; definition: " + serviceDefinition);
}
return result;
},
aBoolean -> aBoolean);
// 将服务定义对象放入所有元数据报告的缓存中,移除失败的报告
allMetadataReports.put(providerMetadataIdentifier, serviceDefinition);
failedReports.remove(providerMetadataIdentifier);
// 将服务定义对象转换为 JSON 字符串并存储到元数据存储中
String data = JsonUtils.toJson(serviceDefinition);
doStoreProviderMetadata(providerMetadataIdentifier, data);
// 保存属性变更到本地属性缓存
saveProperties(providerMetadataIdentifier, data, true, !syncReport);
} catch (Exception e) {
// 如果存储失败,记录错误日志,加入失败的报告列表,并启动重试任务
failedReports.put(providerMetadataIdentifier, serviceDefinition);
metadataReportRetry.startRetryTask();
logger.error(PROXY_FAILED_EXPORT_SERVICE, "", "",
"Failed to put provider metadata " + providerMetadataIdentifier + " in "
+ serviceDefinition + ", cause: " + e.getMessage(),
e);
result = false;
}
return result;
},
aBoolean -> aBoolean);
}
/**
*
* 线
*
* @param consumerMetadataIdentifier
* @param serviceParameterMap
*/
@Override
public void storeConsumerMetadata(
MetadataIdentifier consumerMetadataIdentifier, Map<String, String> serviceParameterMap) {
public void storeConsumerMetadata(MetadataIdentifier consumerMetadataIdentifier, Map<String, String> serviceParameterMap) {
if (syncReport) {
storeConsumerMetadataTask(consumerMetadataIdentifier, serviceParameterMap);
} else {
reportCacheExecutor.execute(
() -> storeConsumerMetadataTask(consumerMetadataIdentifier, serviceParameterMap));
reportCacheExecutor.execute(() -> storeConsumerMetadataTask(consumerMetadataIdentifier, serviceParameterMap));
}
}
protected void storeConsumerMetadataTask(
MetadataIdentifier consumerMetadataIdentifier, Map<String, String> serviceParameterMap) {
/**
*
*
* @param consumerMetadataIdentifier
* @param serviceParameterMap
*/
protected void storeConsumerMetadataTask(MetadataIdentifier consumerMetadataIdentifier, Map<String, String> serviceParameterMap) {
try {
// 记录日志:存储消费者元数据
if (logger.isInfoEnabled()) {
logger.info("store consumer metadata. Identifier : " + consumerMetadataIdentifier + "; definition: "
+ serviceParameterMap);
+ serviceParameterMap);
}
// 将服务参数映射表放入所有元数据报告的缓存中,移除失败的报告
allMetadataReports.put(consumerMetadataIdentifier, serviceParameterMap);
failedReports.remove(consumerMetadataIdentifier);
// 将服务参数映射表转换为 JSON 字符串并存储到元数据存储中
String data = JsonUtils.toJson(serviceParameterMap);
doStoreConsumerMetadata(consumerMetadataIdentifier, data);
// 保存属性变更到本地属性缓存
saveProperties(consumerMetadataIdentifier, data, true, !syncReport);
} catch (Exception e) {
// retry again. If failed again, throw exception.
// 如果存储失败,记录错误日志,加入失败的报告列表,并启动重试任务
failedReports.put(consumerMetadataIdentifier, serviceParameterMap);
metadataReportRetry.startRetryTask();
logger.error(
PROXY_FAILED_EXPORT_SERVICE,
"",
"",
"Failed to put consumer metadata " + consumerMetadataIdentifier + "; " + serviceParameterMap
+ ", cause: " + e.getMessage(),
e);
PROXY_FAILED_EXPORT_SERVICE,
"",
"",
"Failed to put consumer metadata " + consumerMetadataIdentifier + "; " + serviceParameterMap
+ ", cause: " + e.getMessage(),
e);
}
}
/**
*
*/
@Override
public void destroy() {
// 关闭报告缓存执行器
if (reportCacheExecutor != null) {
reportCacheExecutor.shutdown();
}
// 关闭报告定时调度器
if (reportTimerScheduler != null) {
reportTimerScheduler.shutdown();
}
// 销毁元数据报告重试管理器,并置空引用
if (metadataReportRetry != null) {
metadataReportRetry.destroy();
metadataReportRetry = null;
}
}
/**
*
*
* @param metadataIdentifier
* @param url URL
*/
@Override
public void saveServiceMetadata(ServiceMetadataIdentifier metadataIdentifier, URL url) {
if (syncReport) {
@ -361,6 +436,11 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
*/
@Override
public void removeServiceMetadata(ServiceMetadataIdentifier metadataIdentifier) {
if (syncReport) {
@ -370,28 +450,51 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
/**
* URL退
*
* @param metadataIdentifier
* @return URL
*/
@Override
public List<String> getExportedURLs(ServiceMetadataIdentifier metadataIdentifier) {
// TODO, fallback to local cache
// TODO 回退到本地缓存
return doGetExportedURLs(metadataIdentifier);
}
/**
*
*
* @param subscriberMetadataIdentifier
* @param urls URL
*/
@Override
public void saveSubscribedData(SubscriberMetadataIdentifier subscriberMetadataIdentifier, Set<String> urls) {
if (syncReport) {
doSaveSubscriberData(subscriberMetadataIdentifier, JsonUtils.toJson(urls));
} else {
reportCacheExecutor.execute(
() -> doSaveSubscriberData(subscriberMetadataIdentifier, JsonUtils.toJson(urls)));
reportCacheExecutor.execute(() -> doSaveSubscriberData(subscriberMetadataIdentifier, JsonUtils.toJson(urls)));
}
}
/**
* URL
*
* @param subscriberMetadataIdentifier
* @return URL
*/
@Override
public List<String> getSubscribedURLs(SubscriberMetadataIdentifier subscriberMetadataIdentifier) {
String content = doGetSubscribedURLs(subscriberMetadataIdentifier);
return JsonUtils.toJavaList(content, String.class);
}
/**
* URL
*
* @param url URL
* @return URL
*/
String getProtocol(URL url) {
String protocol = url.getSide();
protocol = protocol == null ? url.getProtocol() : protocol;
@ -399,33 +502,52 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
/**
* @return if need to continue
*
*
* @return truefalse
*/
public boolean retry() {
return doHandleMetadataCollection(failedReports);
}
/**
*
*
* @return truefalse
*/
@Override
public boolean shouldReportDefinition() {
return reportDefinition;
}
/**
*
*
* @return truefalse
*/
@Override
public boolean shouldReportMetadata() {
return reportMetadata;
}
/**
*
*
* @param metadataMap
* @return truefalse
*/
private boolean doHandleMetadataCollection(Map<MetadataIdentifier, Object> metadataMap) {
if (metadataMap.isEmpty()) {
return true;
}
Iterator<Map.Entry<MetadataIdentifier, Object>> iterable =
metadataMap.entrySet().iterator();
Iterator<Map.Entry<MetadataIdentifier, Object>> iterable = metadataMap.entrySet().iterator();
while (iterable.hasNext()) {
Map.Entry<MetadataIdentifier, Object> item = iterable.next();
if (PROVIDER_SIDE.equals(item.getKey().getSide())) {
// 如果是提供者侧的元数据,则存储为完整的服务定义对象
this.storeProviderMetadata(item.getKey(), (FullServiceDefinition) item.getValue());
} else if (CONSUMER_SIDE.equals(item.getKey().getSide())) {
// 如果是消费者侧的元数据,则存储为参数映射
this.storeConsumerMetadata(item.getKey(), (Map) item.getValue());
}
}
@ -433,7 +555,8 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
/**
* not private. just for unittest.
*
*
*/
void publishAll() {
logger.info("start to publish all metadata.");
@ -441,9 +564,14 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
/**
* between 2:00 am to 6:00 am, the time is random.
*
*
* 1.
* 2. 00:00:00.000
* 3.
* 4.
*
* @return
* @return
*/
long calculateStartTime() {
Calendar calendar = Calendar.getInstance();
@ -454,62 +582,100 @@ public abstract class AbstractMetadataReport implements MetadataReport {
calendar.set(Calendar.MILLISECOND, 0);
long subtract = calendar.getTimeInMillis() + ONE_DAY_IN_MILLISECONDS - nowMill;
return subtract
+ (FOUR_HOURS_IN_MILLISECONDS / 2)
+ ThreadLocalRandom.current().nextInt(FOUR_HOURS_IN_MILLISECONDS);
+ (FOUR_HOURS_IN_MILLISECONDS / 2)
+ ThreadLocalRandom.current().nextInt(FOUR_HOURS_IN_MILLISECONDS);
}
/**
* MetadataReportRetry
*/
class MetadataReportRetry {
protected final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
final ScheduledExecutorService retryExecutor =
Executors.newScheduledThreadPool(0, new NamedThreadFactory("DubboMetadataReportRetryTimer", true));
/**
*
*/
final ScheduledExecutorService retryExecutor = Executors.newScheduledThreadPool(0, new NamedThreadFactory("DubboMetadataReportRetryTimer", true));
/**
*
*/
volatile ScheduledFuture retryScheduledFuture;
/**
*
*/
final AtomicInteger retryCounter = new AtomicInteger(0);
// retry task schedule period
/**
*
*/
long retryPeriod;
// if no failed report, wait how many times to run retry task.
/**
*
*/
int retryTimesIfNonFail = 600;
/**
*
*/
int retryLimit;
/**
*
*
* @param retryTimes
* @param retryPeriod
*/
public MetadataReportRetry(int retryTimes, int retryPeriod) {
this.retryPeriod = retryPeriod;
this.retryLimit = retryTimes;
}
/**
*
*/
void startRetryTask() {
if (retryScheduledFuture == null) {
synchronized (retryCounter) {
if (retryScheduledFuture == null) {
retryScheduledFuture = retryExecutor.scheduleWithFixedDelay(
() -> {
// Check and connect to the metadata
try {
int times = retryCounter.incrementAndGet();
logger.info("start to retry task for metadata report. retry times:" + times);
if (retry() && times > retryTimesIfNonFail) {
cancelRetryTask();
}
if (times > retryLimit) {
cancelRetryTask();
}
} catch (Throwable t) { // Defensive fault tolerance
logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Unexpected error occur at failed retry, cause: " + t.getMessage(),
t);
() -> {
// 检查并连接到元数据
try {
int times = retryCounter.incrementAndGet();
logger.info("start to retry task for metadata report. retry times:" + times);
// 执行重试操作,如果无失败报告并且超过指定重试次数,则取消重试任务
if (retry() && times > retryTimesIfNonFail) {
cancelRetryTask();
}
// 如果超过重试限制次数,则取消重试任务
if (times > retryLimit) {
cancelRetryTask();
}
},
500,
retryPeriod,
TimeUnit.MILLISECONDS);
} catch (Throwable t) { // 防御性容错处理
logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Unexpected error occur at failed retry, cause: " + t.getMessage(),
t);
}
},
500,
retryPeriod,
TimeUnit.MILLISECONDS);
}
}
}
}
/**
*
*/
void cancelRetryTask() {
if (retryScheduledFuture != null) {
retryScheduledFuture.cancel(false);
@ -517,12 +683,17 @@ public abstract class AbstractMetadataReport implements MetadataReport {
retryExecutor.shutdown();
}
/**
*
*/
void destroy() {
cancelRetryTask();
}
/**
* @deprecated only for test
*
*
* @deprecated
*/
@Deprecated
ScheduledExecutorService getRetryExecutor() {
@ -530,6 +701,13 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
}
/**
* URL
* URL
*
* @param subscriberMetadataIdentifier
* @param urls URL
*/
private void doSaveSubscriberData(SubscriberMetadataIdentifier subscriberMetadataIdentifier, List<String> urls) {
if (CollectionUtils.isEmpty(urls)) {
return;
@ -541,25 +719,66 @@ public abstract class AbstractMetadataReport implements MetadataReport {
doSaveSubscriberData(subscriberMetadataIdentifier, encodedUrlList);
}
protected abstract void doStoreProviderMetadata(
MetadataIdentifier providerMetadataIdentifier, String serviceDefinitions);
/**
*
*
* @param providerMetadataIdentifier
* @param serviceDefinitions
*/
protected abstract void doStoreProviderMetadata(MetadataIdentifier providerMetadataIdentifier, String serviceDefinitions);
protected abstract void doStoreConsumerMetadata(
MetadataIdentifier consumerMetadataIdentifier, String serviceParameterString);
/**
*
*
* @param consumerMetadataIdentifier
* @param serviceParameterString
*/
protected abstract void doStoreConsumerMetadata(MetadataIdentifier consumerMetadataIdentifier, String serviceParameterString);
/**
*
*
* @param metadataIdentifier
* @param url URL
*/
protected abstract void doSaveMetadata(ServiceMetadataIdentifier metadataIdentifier, URL url);
/**
*
*
* @param metadataIdentifier
*/
protected abstract void doRemoveMetadata(ServiceMetadataIdentifier metadataIdentifier);
/**
* URL
*
* @param metadataIdentifier
* @return URL
*/
protected abstract List<String> doGetExportedURLs(ServiceMetadataIdentifier metadataIdentifier);
protected abstract void doSaveSubscriberData(
SubscriberMetadataIdentifier subscriberMetadataIdentifier, String urlListStr);
/**
*
*
* @param subscriberMetadataIdentifier
* @param urlListStr URL JSON
*/
protected abstract void doSaveSubscriberData(SubscriberMetadataIdentifier subscriberMetadataIdentifier, String urlListStr);
/**
* URL
*
* @param subscriberMetadataIdentifier
* @return URL JSON
*/
protected abstract String doGetSubscribedURLs(SubscriberMetadataIdentifier subscriberMetadataIdentifier);
/**
* @deprecated only for unit test
* 使
*
* @return
* @deprecated 使
*/
@Deprecated
protected ExecutorService getReportCacheExecutor() {
@ -567,7 +786,10 @@ public abstract class AbstractMetadataReport implements MetadataReport {
}
/**
* @deprecated only for unit test
* 使
*
* @return
* @deprecated 使
*/
@Deprecated
protected MetadataReportRetry getMetadataReportRetry() {

@ -16,6 +16,7 @@
*/
package org.apache.dubbo.metadata.store.redis;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.config.configcenter.ConfigItem;
import org.apache.dubbo.common.logger.ErrorTypeAwareLogger;
@ -28,40 +29,17 @@ import org.apache.dubbo.metadata.MappingChangedEvent;
import org.apache.dubbo.metadata.MappingListener;
import org.apache.dubbo.metadata.MetadataInfo;
import org.apache.dubbo.metadata.ServiceNameMapping;
import org.apache.dubbo.metadata.report.identifier.BaseMetadataIdentifier;
import org.apache.dubbo.metadata.report.identifier.KeyTypeEnum;
import org.apache.dubbo.metadata.report.identifier.MetadataIdentifier;
import org.apache.dubbo.metadata.report.identifier.ServiceMetadataIdentifier;
import org.apache.dubbo.metadata.report.identifier.SubscriberMetadataIdentifier;
import org.apache.dubbo.metadata.report.identifier.*;
import org.apache.dubbo.metadata.report.support.AbstractMetadataReport;
import org.apache.dubbo.rpc.RpcException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import redis.clients.jedis.JedisPubSub;
import redis.clients.jedis.Transaction;
import redis.clients.jedis.*;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.util.JedisClusterCRC16;
import static org.apache.dubbo.common.constants.CommonConstants.CLUSTER_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.CYCLE_REPORT_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.DEFAULT_TIMEOUT;
import static org.apache.dubbo.common.constants.CommonConstants.GROUP_CHAR_SEPARATOR;
import static org.apache.dubbo.common.constants.CommonConstants.QUEUES_KEY;
import static org.apache.dubbo.common.constants.CommonConstants.TIMEOUT_KEY;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import static org.apache.dubbo.common.constants.CommonConstants.*;
import static org.apache.dubbo.common.constants.LoggerCodeConstants.TRANSPORT_FAILED_RESPONSE;
import static org.apache.dubbo.metadata.MetadataConstants.META_DATA_STORE_TAG;
import static org.apache.dubbo.metadata.ServiceNameMapping.DEFAULT_MAPPING_GROUP;
@ -69,31 +47,40 @@ import static org.apache.dubbo.metadata.ServiceNameMapping.getAppNames;
import static org.apache.dubbo.metadata.report.support.Constants.DEFAULT_METADATA_REPORT_CYCLE_REPORT;
/**
* RedisMetadataReport
* RedisMetadataReport Redis
*/
public class RedisMetadataReport extends AbstractMetadataReport {
private static final String REDIS_DATABASE_KEY = "database";
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RedisMetadataReport.class);
// protected , for test
// 受保护的 JedisPool 实例,用于测试
protected JedisPool pool;
// Redis 集群节点集合
private Set<HostAndPort> jedisClusterNodes;
private int timeout;
private String password;
private final String root;
// 映射数据监听器映射表
private final ConcurrentHashMap<String, MappingDataListener> mappingDataListenerMap = new ConcurrentHashMap<>();
private SetParams jedisParams = SetParams.setParams();
/**
* URL RedisMetadataReport
*
* @param url URL
*/
public RedisMetadataReport(URL url) {
super(url);
timeout = url.getParameter(TIMEOUT_KEY, DEFAULT_TIMEOUT);
password = url.getPassword();
this.root = url.getGroup(DEFAULT_ROOT);
// 设置默认的周期性报告时间
if (url.getParameter(CYCLE_REPORT_KEY, DEFAULT_METADATA_REPORT_CYCLE_REPORT)) {
// ttl default is twice the cycle-report time
// TTL 默认是周期报告时间的两倍
jedisParams.ex(ONE_DAY_IN_MILLISECONDS * 2);
}
// 判断是否为集群模式
if (url.getParameter(CLUSTER_KEY, false)) {
jedisClusterNodes = new HashSet<>();
List<URL> urls = url.getBackupUrls();
@ -101,31 +88,61 @@ public class RedisMetadataReport extends AbstractMetadataReport {
jedisClusterNodes.add(new HostAndPort(tmpUrl.getHost(), tmpUrl.getPort()));
}
} else {
// 单机模式下的 Redis 数据库编号,默认为 0
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
pool = new JedisPool(new JedisPoolConfig(), url.getHost(), url.getPort(), timeout, password, database);
}
}
/**
*
*
* @param providerMetadataIdentifier
* @param serviceDefinitions
*/
@Override
protected void doStoreProviderMetadata(MetadataIdentifier providerMetadataIdentifier, String serviceDefinitions) {
this.storeMetadata(providerMetadataIdentifier, serviceDefinitions);
}
/**
*
*
* @param consumerMetadataIdentifier
* @param value
*/
@Override
protected void doStoreConsumerMetadata(MetadataIdentifier consumerMetadataIdentifier, String value) {
this.storeMetadata(consumerMetadataIdentifier, value);
}
/**
*
*
* @param serviceMetadataIdentifier
* @param url URL
*/
@Override
protected void doSaveMetadata(ServiceMetadataIdentifier serviceMetadataIdentifier, URL url) {
this.storeMetadata(serviceMetadataIdentifier, URL.encode(url.toFullString()));
}
/**
*
*
* @param serviceMetadataIdentifier
*/
@Override
protected void doRemoveMetadata(ServiceMetadataIdentifier serviceMetadataIdentifier) {
this.deleteMetadata(serviceMetadataIdentifier);
}
/**
* URL
*
* @param metadataIdentifier
* @return URL
*/
@Override
protected List<String> doGetExportedURLs(ServiceMetadataIdentifier metadataIdentifier) {
String content = getMetadata(metadataIdentifier);
@ -135,21 +152,45 @@ public class RedisMetadataReport extends AbstractMetadataReport {
return new ArrayList<>(Arrays.asList(URL.decode(content)));
}
/**
*
*
* @param subscriberMetadataIdentifier
* @param urlListStr URL
*/
@Override
protected void doSaveSubscriberData(SubscriberMetadataIdentifier subscriberMetadataIdentifier, String urlListStr) {
this.storeMetadata(subscriberMetadataIdentifier, urlListStr);
}
/**
* URL
*
* @param subscriberMetadataIdentifier
* @return URL
*/
@Override
protected String doGetSubscribedURLs(SubscriberMetadataIdentifier subscriberMetadataIdentifier) {
return this.getMetadata(subscriberMetadataIdentifier);
}
/**
*
*
* @param metadataIdentifier
* @return
*/
@Override
public String getServiceDefinition(MetadataIdentifier metadataIdentifier) {
return this.getMetadata(metadataIdentifier);
}
/**
*
*
* @param metadataIdentifier
* @param v
*/
private void storeMetadata(BaseMetadataIdentifier metadataIdentifier, String v) {
if (pool != null) {
storeMetadataStandalone(metadataIdentifier, v);
@ -158,6 +199,12 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @param v
*/
private void storeMetadataInCluster(BaseMetadataIdentifier metadataIdentifier, String v) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -170,6 +217,12 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @param v
*/
private void storeMetadataStandalone(BaseMetadataIdentifier metadataIdentifier, String v) {
try (Jedis jedis = pool.getResource()) {
jedis.set(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), v, jedisParams);
@ -180,6 +233,11 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
*/
private void deleteMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) {
deleteMetadataStandalone(metadataIdentifier);
@ -188,6 +246,11 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
*/
private void deleteMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -199,6 +262,11 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
*/
private void deleteMetadataStandalone(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedis = pool.getResource()) {
jedis.del(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
@ -209,6 +277,12 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @return
*/
private String getMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) {
return getMetadataStandalone(metadataIdentifier);
@ -217,6 +291,12 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @return
*/
private String getMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -228,6 +308,12 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param metadataIdentifier
* @return
*/
private String getMetadataStandalone(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedis = pool.getResource()) {
return jedis.get(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
@ -239,15 +325,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* Store class and application names using Redis hashes
* key: default 'dubbo:mapping'
* field: class (serviceInterface)
* value: application_names
* @param serviceInterface field(class)
* @param defaultMappingGroup {@link ServiceNameMapping#DEFAULT_MAPPING_GROUP}
* @param newConfigContent new application_names
* @param ticket previous application_names
* @return
* 使Redis
* <p>
* 'dubbo:mapping'
* serviceInterface
*
*
* @param serviceInterface
* @param defaultMappingGroup {@link ServiceNameMapping#DEFAULT_MAPPING_GROUP}
* @param newConfigContent
* @param ticket
* @return
*/
@Override
public boolean registerServiceAppMapping(
@ -265,6 +353,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
* Redis
* Redis 使 {@link #storeMappingStandalone(String, String, String, String)}
* 使 {@link #storeMappingInCluster(String, String, String, String)}
*
* @param key
* @param field
* @param value
* @param ticket CAS
* @return
*/
private boolean storeMapping(String key, String field, String value, String ticket) {
if (pool != null) {
return storeMappingStandalone(key, field, value, ticket);
@ -274,8 +373,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* use 'watch' to implement cas.
* Find information about slot distribution by key.
* Redis
* 使 Redis CAS
*
* 使 WATCH MULTI
*
* @param key
* @param field
* @param value
* @param ticket CAS
* @return
* @throws RpcException
*/
private boolean storeMappingInCluster(String key, String field, String value, String ticket) {
try (JedisCluster jedisCluster =
@ -304,8 +412,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* use 'watch' to implement cas.
* Find information about slot distribution by key.
* Redis
* 使 'watch' CAS
*
* @param key Redis
* @param field Redis
* @param value
* @param ticket
* @return
*/
private boolean storeMappingStandalone(String key, String field, String value, String ticket) {
try (Jedis jedis = pool.getResource()) {
@ -330,36 +444,48 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* build mapping key
* @param defaultMappingGroup {@link ServiceNameMapping#DEFAULT_MAPPING_GROUP}
* @return
* Redis
*
*
* @param defaultMappingGroup {@link ServiceNameMapping#DEFAULT_MAPPING_GROUP}
* @return
*/
private String buildMappingKey(String defaultMappingGroup) {
return this.root + GROUP_CHAR_SEPARATOR + defaultMappingGroup;
}
/**
* build pub/sub key
* Redis -
*
*
* @return
*/
private String buildPubSubKey() {
return buildMappingKey(DEFAULT_MAPPING_GROUP) + GROUP_CHAR_SEPARATOR + QUEUES_KEY;
}
/**
* get content and use content to complete cas
* @param serviceKey class
* @param group {@link ServiceNameMapping#DEFAULT_MAPPING_GROUP}
*
* 使
*
* @param serviceKey
* @param group
* @return
*/
@Override
public ConfigItem getConfigItem(String serviceKey, String group) {
String key = buildMappingKey(group);
String content = getMappingData(key, serviceKey);
return new ConfigItem(content, content);
}
/**
* get current application_names
* Redis
* 使使
*
* @param key
* @param field
* @return
*/
private String getMappingData(String key, String field) {
if (pool != null) {
@ -369,6 +495,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
* Redis
*
* @param key Redis
* @param field
* @return
* @throws RpcException Redis
*/
private String getMappingDataInCluster(String key, String field) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -380,6 +514,13 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
* 使 Redis
*
* @param key
* @param field
* @return RpcException
*/
private String getMappingDataStandalone(String key, String field) {
try (Jedis jedis = pool.getResource()) {
return jedis.hget(key, field);
@ -391,7 +532,10 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* remove listener. If have no listener,thread will dead
*
*
* @param serviceKey
* @param listener
*/
@Override
public void removeServiceAppMappingListener(String serviceKey, MappingListener listener) {
@ -406,8 +550,13 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* Start a thread and subscribe to {@link this#buildPubSubKey()}.
* Notify {@link MappingListener} if there is a change in the 'application_names' message.
* 线 {@link this#buildPubSubKey()}
* 'application_names' {@link MappingListener}
*
* @param serviceKey
* @param listener
* @param url URL
* @return
*/
@Override
public Set<String> getServiceAppMapping(String serviceKey, MappingListener listener, URL url) {
@ -421,45 +570,82 @@ public class RedisMetadataReport extends AbstractMetadataReport {
return this.getServiceAppMapping(serviceKey, url);
}
/**
*
*
* @param serviceKey
* @param url URL
* @return
*/
@Override
public Set<String> getServiceAppMapping(String serviceKey, URL url) {
String key = buildMappingKey(DEFAULT_MAPPING_GROUP);
return getAppNames(getMappingData(key, serviceKey));
}
/**
*
*
* @param identifier
* @param instanceMetadata
* @return
*/
@Override
public MetadataInfo getAppMetadata(SubscriberMetadataIdentifier identifier, Map<String, String> instanceMetadata) {
String content = this.getMetadata(identifier);
return JsonUtils.toJavaObject(content, MetadataInfo.class);
}
/**
*
*
* @param identifier
* @param metadataInfo
*/
@Override
public void publishAppMetadata(SubscriberMetadataIdentifier identifier, MetadataInfo metadataInfo) {
this.storeMetadata(identifier, metadataInfo.getContent());
}
/**
*
*
* @param identifier
* @param metadataInfo
*/
@Override
public void unPublishAppMetadata(SubscriberMetadataIdentifier identifier, MetadataInfo metadataInfo) {
this.deleteMetadata(identifier);
}
// for test
// 用于测试
public MappingDataListener getMappingDataListener() {
return mappingDataListenerMap.get(buildPubSubKey());
}
/**
* Listen for changes in the 'application_names' message and notify the listener.
* 'application_names'
*/
class NotifySub extends JedisPubSub {
private final Map<String, Set<MappingListener>> listeners = new ConcurrentHashMap<>();
/**
*
*
* @param key
* @param listener
*/
public void addListener(String key, MappingListener listener) {
Set<MappingListener> listenerSet = listeners.computeIfAbsent(key, k -> new ConcurrentHashSet<>());
listenerSet.add(listener);
}
/**
*
*
* @param serviceKey
* @param listener
*/
public void removeListener(String serviceKey, MappingListener listener) {
Set<MappingListener> listenerSet = this.listeners.get(serviceKey);
if (listenerSet != null) {
@ -470,10 +656,21 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @return true false
*/
public Boolean isEmpty() {
return this.listeners.isEmpty();
}
/**
*
*
* @param key
* @param msg
*/
@Override
public void onMessage(String key, String msg) {
logger.info("sub from redis:" + key + " message:" + msg);
@ -486,11 +683,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
*
*
* @param pattern
* @param key
* @param msg
*/
@Override
public void onPMessage(String pattern, String key, String msg) {
onMessage(key, msg);
}
/**
*
*
* @param pattern
* @param subscribedChannels
*/
@Override
public void onPSubscribe(String pattern, int subscribedChannels) {
super.onPSubscribe(pattern, subscribedChannels);
@ -498,7 +708,7 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
/**
* Subscribe application names change message.
* 线
*/
class MappingDataListener extends Thread {
@ -508,14 +718,27 @@ public class RedisMetadataReport extends AbstractMetadataReport {
// for test
protected volatile boolean running = true;
/**
*
*
* @param path
*/
public MappingDataListener(String path) {
this.path = path;
}
/**
*
*
* @return
*/
public NotifySub getNotifySub() {
return notifySub;
}
/**
* 线
*/
@Override
public void run() {
while (running) {
@ -540,6 +763,9 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
/**
* 线
*/
public void shutdown() {
try {
running = false;

@ -16,27 +16,44 @@ import java.net.InetAddress;
*/
public class CustomBeanFactoryPostProcessor implements BeanFactoryPostProcessor, Ordered {
@Override
public int getOrder() {
return Ordered.HIGHEST_PRECEDENCE;
}
/**
* BeanFactoryPostProcessor
*
* @return
*/
@Override
public int getOrder() {
return Ordered.HIGHEST_PRECEDENCE;
}
/**
* Spring Bean
*
* @param beanFactory Bean
* @throws BeansException
*/
@Override
public void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException {
// 获取 InetUtils bean用于获取 IP 地址
InetUtils inetUtils = beanFactory.getBean(InetUtils.class);
String ip = "127.0.0.1";
// 获取第一个非回环地址
InetAddress address = inetUtils.findFirstNonLoopbackAddress();
if (address != null) {
if (address instanceof Inet6Address) {
// 处理 IPv6 地址
String ipv6AddressString = address.getHostAddress();
if (ipv6AddressString.contains("%")) {
// 去掉可能存在的范围 ID
ipv6AddressString = ipv6AddressString.substring(0, ipv6AddressString.indexOf("%"));
}
ip = ipv6AddressString;
} else {
// 处理 IPv4 地址
ip = inetUtils.findFirstNonLoopbackHostInfo().getIpAddress();
}
}
// 设置系统属性 DUBBO_IP_TO_REGISTRY 为获取到的 IP 地址
System.setProperty("DUBBO_IP_TO_REGISTRY", ip);
}
}

@ -16,6 +16,9 @@ import org.springframework.context.annotation.PropertySource;
@PropertySource(value = "classpath:common-dubbo.yml", factory = YmlPropertySourceFactory.class)
public class DubboConfiguration {
/**
* dubboIP(IP)
*/
@Bean
public BeanFactoryPostProcessor customBeanFactoryPostProcessor() {
return new CustomBeanFactoryPostProcessor();

@ -11,8 +11,18 @@ import lombok.AllArgsConstructor;
public enum RequestLogEnum {
/**
* info param full
* info
*/
INFO, PARAM, FULL;
INFO,
/**
* param
*/
PARAM,
/**
* full
*/
FULL;
}

@ -1,17 +1,28 @@
package org.dromara.common.dubbo.filter;
import org.dromara.common.core.utils.SpringUtils;
import org.dromara.common.dubbo.enumd.RequestLogEnum;
import org.dromara.common.dubbo.properties.DubboCustomProperties;
import lombok.extern.slf4j.Slf4j;
import org.apache.dubbo.common.constants.CommonConstants;
import org.apache.dubbo.common.extension.Activate;
import org.apache.dubbo.rpc.*;
import org.apache.dubbo.rpc.service.GenericService;
import org.dromara.common.core.utils.SpringUtils;
import org.dromara.common.dubbo.enumd.RequestLogEnum;
import org.dromara.common.dubbo.properties.DubboCustomProperties;
import org.dromara.common.json.utils.JsonUtils;
/**
* dubbo
* Dubbo
* <p>
* Dubbo Filter
*
* <p>
*
* - Provider Consumer
* -
* <p>
* 使 SpringUtils
* <p>
* 使 Lombok @Slf4j
*
* @author Lion Li
*/
@ -19,33 +30,48 @@ import org.dromara.common.json.utils.JsonUtils;
@Activate(group = {CommonConstants.PROVIDER, CommonConstants.CONSUMER}, order = Integer.MAX_VALUE)
public class DubboRequestFilter implements Filter {
/**
* Dubbo Filter
*
* @param invoker Dubbo
* @param invocation
* @return
* @throws RpcException
*/
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
DubboCustomProperties properties = SpringUtils.getBean(DubboCustomProperties.class);
// 如果未开启请求日志记录,则直接执行服务调用并返回结果
if (!properties.getRequestLog()) {
// 未开启则跳过日志逻辑
return invoker.invoke(invocation);
}
// 判断是 Provider 还是 Consumer
String client = CommonConstants.PROVIDER;
if (RpcContext.getServiceContext().isConsumerSide()) {
client = CommonConstants.CONSUMER;
}
// 构建基础日志信息
String baselog = "Client[" + client + "],InterfaceName=[" + invocation.getInvoker().getInterface().getSimpleName() + "],MethodName=[" + invocation.getMethodName() + "]";
// 根据日志级别输出不同详细程度的日志信息
if (properties.getLogLevel() == RequestLogEnum.INFO) {
log.info("DUBBO - 服务调用: {}", baselog);
} else {
log.info("DUBBO - 服务调用: {},Parameter={}", baselog, invocation.getArguments());
}
// 记录调用开始时间
long startTime = System.currentTimeMillis();
// 执行接口调用逻辑
Result result = invoker.invoke(invocation);
// 调用耗时
// 计算调用耗时
long elapsed = System.currentTimeMillis() - startTime;
// 如果发生异常 则打印异常日志
// 如果发生异常且调用的是泛化服务,则记录异常日志
if (result.hasException() && invoker.getInterface().equals(GenericService.class)) {
log.error("DUBBO - 服务异常: {},Exception={}", baselog, result.getException());
} else {
// 根据日志级别输出服务响应信息
if (properties.getLogLevel() == RequestLogEnum.INFO) {
log.info("DUBBO - 服务响应: {},SpendTime=[{}ms]", baselog, elapsed);
} else if (properties.getLogLevel() == RequestLogEnum.FULL) {

@ -15,8 +15,14 @@ import org.springframework.cloud.context.config.annotation.RefreshScope;
@ConfigurationProperties(prefix = "dubbo.custom")
public class DubboCustomProperties {
/**
*
*/
private Boolean requestLog;
/**
*
*/
private RequestLogEnum logLevel;
}

Loading…
Cancel
Save