• 业务网关-分流插件


    这里所指的分流插件非常类似于nginx的流量转发功能,或者叫反向代理。

    背景

    尽管nginx的流量转发功能也很强大,但业务上的一些变化有可能出现会让nginx的配置繁多,疲于应付,比如:某款APP随业务发展演化出众多业务线:酒店业务线,机票业务线,餐饮业务线,本地出行业务线。这些业务线的背后往往是不同的部门,不同的技术团队组成,因此会提供不同的服务供APP对接,如果每增加一个新服务都需要去nginx做配置,nginx的配置会随着业务的发展时间的推移变得沉重难以维护。因此我们可以将nginx定义为流量性的网关,与具体的业务无关,只负责基于域名的请求转发,而后端业务线的不同服务的不同由业务网关负责。

    另外就是对服务进行逻辑分组的需求,比如将一些重要的请求划分到一组服务器,这组服务器是高性能的;其它边缘业务的请求划分的另外一组服务器,这组服务器配置相对低。再比如我们的灰度环境,均需要基本一定的规则来分配流量。

    方案

    捕获WebFlux的流量,然后后台将请求转发到相应服务,最后将响应的结果返回给客户端。

    实现

    捕获流量

    实现自定义的WebHandler,

    @Override
    public Mono<Void> handle(final ServerWebExchange exchange) {
    
        return new DefaultDiabloPluginChain(plugins).execute(exchange);
    }

    实现一个插件职责链,调用不同的插件最终返回响应结果。

    
    private static class DefaultDiabloPluginChain implements DiabloPluginChain {
    
            private int index;
    
            private final List<DiabloPlugin> plugins;
    
            DefaultDiabloPluginChain(final List<DiabloPlugin> plugins) {
                this.plugins = plugins;
            }
    
            @Override
            public Mono<Void> execute(final ServerWebExchange exchange) {
    
                if (this.index < plugins.size()) {
                    DiabloPlugin plugin = plugins.get(this.index++);
                    try {
                        return plugin.execute(exchange, this);
                    } catch (Exception ex) {
                        log.error("DefaultDiabloPluginChain.execute, traceId: {}, uri: {}, error:{}", exchange.getAttribute(Constants.CLIENT_RESPONSE_TRACE_ID), exchange.getRequest().getURI().getPath(), Throwables.getStackTraceAsString(ex));
    
                        throw ex;
                    }
                } else {
                    return Mono.empty(); // complete
                }
            }
        }
    
    
    

    分流插件实现

    
    public class DividePlugin extends AbstractDiabloPlugin {
    
        private final UpstreamCacheManager upstreamCacheManager;
    
        private final WebClient webClient;
    
        public DividePlugin(final LocalCacheManager localCacheManager, final UpstreamCacheManager upstreamCacheManager, final WebClient webClient) {
            super(localCacheManager);
            this.upstreamCacheManager = upstreamCacheManager;
            this.webClient = webClient;
        }
    
        @Override
        protected Mono<Void> doExecute(final ServerWebExchange exchange, final DiabloPluginChain chain, final SelectorData selector, final RuleData rule) {
            final RequestDTO requestDTO = exchange.getAttribute(Constants.REQUESTDTO);
            final String traceId = exchange.getAttribute(Constants.CLIENT_RESPONSE_TRACE_ID);
            final DivideRuleHandle ruleHandle = GsonUtils.getInstance().fromJson(rule.getHandle(), DivideRuleHandle.class);
    
            String ruleId = rule.getId();
            final List<DivideUpstream> upstreamList = upstreamCacheManager.findUpstreamListByRuleId(ruleId);
            if (CollectionUtils.isEmpty(upstreamList)) {
    
                log.warn("DividePlugin.doExecute upstreamList is empty, traceId: {}, uri: {}, ruleName:{}", traceId, exchange.getRequest().getURI().getPath(), rule.getName());
                exchange.getResponse().setStatusCode(HttpStatus.SERVICE_UNAVAILABLE);
                return chain.execute(exchange);
            }
    
            final String ip = Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress();
    
            DivideUpstream divideUpstream =
                    LoadBalanceUtils.selector(upstreamList, ruleHandle.getLoadBalance(), ip);
    
            if (Objects.isNull(divideUpstream)) {
    
                log.warn("DividePlugin.doExecute divideUpstream is empty, traceId: {}, uri: {}, loadBalance:{}, ruleName:{}, upstreamSize: {}", traceId, exchange.getRequest().getURI().getPath(), ruleHandle.getLoadBalance(), rule.getName(), upstreamList.size());
                exchange.getResponse().setStatusCode(HttpStatus.SERVICE_UNAVAILABLE);
                return chain.execute(exchange);
            }
    
            if (exchange.getAttributeOrDefault(Constants.GATEWAY_ALREADY_ROUTED_ATTR, false)) {
                log.warn("DividePlugin.doExecute alread routed, traceId: {}, uri: {}, ruleName:{}", traceId, exchange.getRequest().getURI().getPath(), rule.getName());
    
                exchange.getResponse().setStatusCode(HttpStatus.FORBIDDEN);
                return chain.execute(exchange);
            }
            exchange.getAttributes().put(Constants.GATEWAY_ALREADY_ROUTED_ATTR, true);
    
            exchange.getAttributes().put(Constants.GATEWAY_CONTEXT_UPSTREAM_HOST, divideUpstream.getUpstreamHost());
            exchange.getAttributes().put(Constants.GATEWAY_CONTEXT_RULE_ID, ruleId);
    
            HttpCommand command = new HttpCommand(exchange, chain,
                    requestDTO, divideUpstream, webClient, ruleHandle.getTimeout());
            return command.doHttpInvoke();
        }
    
        public SelectorData filterSelector(final List<SelectorData> selectors, final ServerWebExchange exchange) {
            return selectors.stream()
                            .filter(selector -> selector.getEnabled() && filterCustomSelector(selector, exchange))
                            .findFirst().orElse(null);
        }
    
        private Boolean filterCustomSelector(final SelectorData selector, final ServerWebExchange exchange) {
            if (selector.getType() == SelectorTypeEnum.CUSTOM_FLOW.getCode()) {
    
                List<ConditionData> conditionList = selector.getConditionList();
                if (CollectionUtils.isEmpty(conditionList)) {
                    return false;
                }
    
                // 后台初始定义为host且表达式为 =
                if (MatchModeEnum.AND.getCode() == selector.getMatchMode()) {
                    ConditionData conditionData = conditionList.get(0);
                    return Objects.equals(exchange.getRequest().getHeaders().getFirst("Host"), conditionData.getParamValue().trim());
                } else {
                    return conditionList.stream().anyMatch(c -> Objects.equals(exchange.getRequest().getHeaders().getFirst("Host"), c.getParamValue().trim()));
                }
    
            }
            return true;
        }
    
        @Override
        public String named() {
            return PluginEnum.DIVIDE.getName();
        }
    
        @Override
        public Boolean skip(final ServerWebExchange exchange) {
            final RequestDTO body = exchange.getAttribute(Constants.REQUESTDTO);
            return !Objects.equals(Objects.requireNonNull(body).getRpcType(), RpcTypeEnum.HTTP.getName());
        }
    
        @Override
        public PluginTypeEnum pluginType() {
            return PluginTypeEnum.FUNCTION;
        }
    
        @Override
        public int getOrder() {
            return PluginEnum.DIVIDE.getCode();
        }
    
    }
    
    

    WebClient

    将http请求转发出去,主要依赖WebClient,它提供了响应式接口。具体的操作封装在HttpCommand工具类中,核心代码如下:

    public Mono<Void> doHttpInvoke() {
    
            URI uri = buildRealURL(divideUpstream, exchange);
            traceId = exchange.getAttribute(Constants.CLIENT_RESPONSE_TRACE_ID);
            if (uri == null) {
                log.warn("HttpCommand.doNext real url is null, traceId: {}, uri: {}", traceId, exchange.getRequest().getURI().getPath());
                exchange.getResponse().setStatusCode(HttpStatus.SERVICE_UNAVAILABLE);
                return chain.execute(exchange).then(Mono.defer(() -> Mono.empty()));
            }
            // 后续有时间再加 todo 没有清除掉
            // IssRpcContext.commitParams(IssRpcContextParamKey.TRACE_ID, traceId);
    
            if (requestDTO.getHttpMethod().equals(HttpMethodEnum.GET.getName())) {
    
                return webClient.get().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .exchange()
                                // 默认doOnError异常会传递
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient get execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.POST.getName())) {
    
                return webClient.post().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .contentType(buildMediaType())
                                .body(BodyInserters.fromDataBuffers(exchange.getRequest().getBody()))
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient post execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.OPTIONS.getName())) {
                return webClient.options().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient options execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.HEAD.getName())) {
                return webClient.head().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient head execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.PUT.getName())) {
    
                return webClient.put().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .contentType(buildMediaType())
                                .body(BodyInserters.fromDataBuffers(exchange.getRequest().getBody()))
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient put execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
    
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.DELETE.getName())) {
    
                return webClient.delete().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient delete execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
    
            } else if (requestDTO.getHttpMethod().equals(HttpMethodEnum.PATCH.getName())) {
                return webClient.patch().uri(f -> uri)
                                .headers(httpHeaders -> {
                                    httpHeaders.add(Constants.TRACE_ID, traceId);
                                    httpHeaders.addAll(exchange.getRequest().getHeaders());
                                })
                                .contentType(buildMediaType())
                                .body(BodyInserters.fromDataBuffers(exchange.getRequest().getBody()))
                                .exchange()
                                .doOnError(e -> log.error("HttpCommand.doHttpInvoke Failed to webClient patch execute, traceId: {}, uri: {}, cause:{}", traceId, uri, Throwables.getStackTraceAsString(e)))
                                .timeout(Duration.ofMillis(timeout))
                                .flatMap(this::doNext);
            }
    
            log.warn("HttpCommand doHttpInvoke Waring no match doHttpInvoke end, traceId: {}, httpMethod: {}, uri: {}", traceId, requestDTO.getHttpMethod(), uri.getPath());
    
            return Mono.empty();
        }
    

    网关项目开源

    以上内容基于业务网关的一个小模块,详细请看这里:diablo在这里

  • 相关阅读:
    神经网络中的数据预处理方法 Data Preprocessing
    keras中 LSTM 的 [samples, time_steps, features] 最终解释
    keras 学习文档
    tensorflow 中 softmax_cross_entropy_with_logits 与 sparse_softmax_cross_entropy_with_logits 的区别
    对 tensorflow 中 tf.nn.embedding_lookup 函数的解释
    好久不git这么多问题
    去不去创业?
    抗压能力
    培养好的阅读习惯
    深度工作
  • 原文地址:https://www.cnblogs.com/ASPNET2008/p/13514815.html
Copyright © 2020-2023  润新知