当前位置: 首页 > news >正文

实战 Springboot2 集成Redis 哨兵模式、集群模式、缓存管理、Lettuce拓扑刷新

redis搭建集群模式、Cluster模式(6节点,3主3从集群模式,添加删除节点)_redis cluster节点带数据增减-CSDN博客

Linux部署Redis哨兵集群 一主两从三哨兵(这里使用Redis6,其它版本类似)_linux redis集群模式部署-CSDN博客

配置yaml
redis:redis-configs:redis-order:type: sentinel #standalone clusterhostAndPort: 192.168.132.1:16379,192.168.132.1:16380,192.168.132.1:16381masterName: mymasterpassword: dyj1username:database: 15timeout: 10000pool:max-idle: 8min-idle: 0max-active: 8max-wait: 10000
#    redis-pay:
#        type: standalone
#        hostAndPort: localhost:6380
#        database: 14
#        timeout: 10000
#        pool:
#          max-idle: 8 # 连接池中的最大空闲连接 默认 8
#          min-idle: 0 # 连接池中的最小空闲连接 默认 0
#          max-active: 8  # 连接池最大连接数(使用负值表示没有限制) 默认 8
#          max-wait: 10000 # 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1
#    redis-order-cluster:
#        type: cluster
#        hostAndPort: xxx:6379,xxx:6379,xxx:6379
#        database: 15
#        timeout: 10000
#        max-redirects: 3
#        pool:
#          max-idle: 8
#          min-idle: 0
#          max-active: 8
#          max-wait: 10000
#    redis-pay-cluster:
#        type: cluster
#        hostAndPort: xxx:6379,xxx:6379,xxx:6379
#        database: 14
#        timeout: 10000
#        pool:
#          max-idle: 8
#          min-idle: 0
#          max-active: 8
#          max-wait: 10000
cache:caffeine:cache10M: "initialCapacity=20,maximumSize=100,expireAfterWrite=10m,recordStats"cache30s: "initialCapacity=20,maximumSize=100,expireAfterWrite=30s"redis:cache10M: "10"cache30s: "30"
redis连接池
package org.example.redis.config;import freemarker.template.utility.StringUtil;
import io.lettuce.core.ClientOptions;
import io.lettuce.core.cluster.ClusterClientOptions;
import io.lettuce.core.cluster.ClusterTopologyRefreshOptions;
import lombok.Data;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.*;
import org.springframework.data.redis.connection.lettuce.LettuceClientConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnection;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfiguration;
import org.springframework.data.util.Pair;import javax.annotation.PostConstruct;
import java.time.Duration;
import java.util.*;@Configuration
@Data
@EnableConfigurationProperties({RedisPoolConfig.class})
public class RedisConnectionFactoryConfig {public static Map<String, LettuceConnectionFactory> redisConnectionFactors = new HashMap<>();@Autowiredprivate RedisPoolConfig redisPoolConfig;@PostConstructpublic void init() {redisPoolConfig.getRedisConfigs().forEach((name, config) -> {LettuceConnectionFactory redisConnectionFactory = null;LettuceClientConfiguration clientConfig = getClientConfiguration(config);switch (config.getType()) {case "standalone":RedisStandaloneConfiguration redisStandaloneConfiguration = createRedisStandaloneConfiguration(config);if (redisStandaloneConfiguration != null) {redisConnectionFactory = new LettuceConnectionFactory(redisStandaloneConfiguration, clientConfig);}break;case "cluster":RedisClusterConfiguration redisClusterConfiguration = createRedisClusterConfiguration(config);if (redisClusterConfiguration != null) {redisConnectionFactory = new LettuceConnectionFactory(redisClusterConfiguration, clientConfig);}break;case "sentinel":RedisSentinelConfiguration redisSentinelConfiguration = createRedisSentinelConfiguration(config);if (redisSentinelConfiguration != null) {redisConnectionFactory = new LettuceConnectionFactory(redisSentinelConfiguration, clientConfig);}break;default:System.out.printf("Unknown type: %d\n", config.getType());break;}if (null != redisConnectionFactory) {// 在获取连接时,先验证连接是否已经中断,如果已经中断则创建一个新的连接redisConnectionFactory.setValidateConnection(true);redisConnectionFactory.afterPropertiesSet(); // start() for spring-data-redis-3.X; afterPropertiesSet() for spring-data-redis-2.XredisConnectionFactors.putIfAbsent(name, redisConnectionFactory);}});}private LettuceClientConfiguration getClientConfiguration(RedisPoolConfig.Config config) {GenericObjectPoolConfig<LettuceConnection> poolConfig = new GenericObjectPoolConfig<>();if (StringUtils.isNotBlank(config.getPool().getMaxActive())) {poolConfig.setMaxTotal(Integer.parseInt(config.getPool().getMaxActive()));}if (StringUtils.isNotBlank(config.getPool().getMaxWait())) {poolConfig.setMaxWait(Duration.ofMillis(Integer.parseInt(config.getPool().getMaxWait())));}if (StringUtils.isNotBlank(config.getPool().getMaxIdle())) {poolConfig.setMaxIdle(Integer.parseInt(config.getPool().getMaxIdle()));}if (StringUtils.isNotBlank(config.getPool().getMinIdle())) {poolConfig.setMinIdle(Integer.parseInt(config.getPool().getMinIdle()));}int timeout = -1;if (StringUtils.isNotBlank(config.getTimeout())) {timeout = Integer.parseInt(config.getTimeout());}if (StringUtils.equals(config.getType(), "cluster")){// 支持自适应集群拓扑刷新和动态刷新源ClusterTopologyRefreshOptions clusterTopologyRefreshOptions = ClusterTopologyRefreshOptions.builder().enableAllAdaptiveRefreshTriggers()// 开启自适应刷新.enableAdaptiveRefreshTrigger()// 开启定时刷新  default 60 SECONDS.enablePeriodicRefresh(Duration.ofSeconds(5)).build();ClusterClientOptions clusterClientOptions = ClusterClientOptions.builder().topologyRefreshOptions(clusterTopologyRefreshOptions)//        RedisTemplate通过StatefulRedisClusterConnection发送GET命令到Redis集群。//        根据Redis集群的哈希槽机制,命令被路由到正确的节点。//        如果键不存在于当前节点,会触发重定向(最多max-redirects次)直到找到正确的节点或者达到最大重定向次数。//.maxRedirects().build();LettuceClientConfiguration lettuceClientConfiguration = LettucePoolingClientConfiguration.builder().poolConfig(poolConfig)//.readFrom(ReadFrom.SLAVE_PREFERRED)  //读写分离:主写从读模式配置.clientOptions(clusterClientOptions).build();return lettuceClientConfiguration;}LettuceClientConfiguration clientConfig = LettucePoolingClientConfiguration.builder().shutdownTimeout(Duration.ofMillis(timeout)).poolConfig(poolConfig).build();return clientConfig;}private RedisSentinelConfiguration createRedisSentinelConfiguration(RedisPoolConfig.Config config) {RedisSentinelConfiguration redisSentinelConfiguration = new RedisSentinelConfiguration();redisSentinelConfiguration.setMaster(config.getMasterName());List<Pair<String, Integer>> hostAndPorts = parseClusterHostAndPort(config.getHostAndPort());if (hostAndPorts.isEmpty()) {return null;}for (Pair<String, Integer> hostAndPort : hostAndPorts) {RedisNode.RedisNodeBuilder builder = RedisNode.newRedisNode()
//                    .promotedAs(RedisNode.NodeType.SLAVE).listeningAt(hostAndPort.getFirst(), hostAndPort.getSecond());redisSentinelConfiguration.addSentinel(builder.build());}setUsername(config, redisSentinelConfiguration);setPassword(config, redisSentinelConfiguration);setDatabase(config, redisSentinelConfiguration);return redisSentinelConfiguration;}private RedisClusterConfiguration createRedisClusterConfiguration(RedisPoolConfig.Config config) {List<Pair<String, Integer>> hostAndPorts = parseClusterHostAndPort(config.getHostAndPort());if (hostAndPorts.isEmpty()) {return null;}RedisClusterConfiguration redisClusterConfiguration = new RedisClusterConfiguration();for (Pair<String, Integer> hostAndPort : hostAndPorts) {RedisNode node = new RedisNode(hostAndPort.getFirst(), hostAndPort.getSecond());redisClusterConfiguration.addClusterNode(node);}setUsername(config, redisClusterConfiguration);setPassword(config, redisClusterConfiguration);setClusterConf(config, redisClusterConfiguration);return redisClusterConfiguration;}private RedisStandaloneConfiguration createRedisStandaloneConfiguration(RedisPoolConfig.Config config) {Pair<String, Integer> hostAndPort = parseHostAndPort(config.getHostAndPort());if (null == hostAndPort) {return null;}RedisStandaloneConfiguration redisStandaloneConfiguration = new RedisStandaloneConfiguration();redisStandaloneConfiguration.setHostName(hostAndPort.getFirst());redisStandaloneConfiguration.setPort(hostAndPort.getSecond());setUsername(config, redisStandaloneConfiguration);setPassword(config, redisStandaloneConfiguration);setDatabase(config, redisStandaloneConfiguration);return redisStandaloneConfiguration;}private void setUsername(RedisPoolConfig.Config config, RedisConfiguration.WithPassword connectionFactory) {if (null != config.getUsername() && !config.getUsername().isEmpty()) {connectionFactory.setUsername(config.getUsername());}}private void setPassword(RedisPoolConfig.Config config, RedisConfiguration.WithPassword connectionFactory) {if (null != config.getPassword() && !config.getPassword().isEmpty()) {connectionFactory.setPassword(config.getPassword());}}private void setDatabase(RedisPoolConfig.Config config, RedisConfiguration.WithDatabaseIndex connectionFactory) {if (null != config.getDatabase() && !config.getDatabase().isEmpty()) {int database = Integer.parseInt(config.getDatabase());connectionFactory.setDatabase(database);}}private void setClusterConf(RedisPoolConfig.Config config, RedisClusterConfiguration redisClusterConfiguration) {if (null != config.getClusterMaxRedirects() && !config.getClusterMaxRedirects().isEmpty()) {int maxRedirects = Integer.parseInt(config.getClusterMaxRedirects());redisClusterConfiguration.setMaxRedirects(maxRedirects);}}private List<Pair<String, Integer>> parseClusterHostAndPort(String hostAndPortStr) {String[] hosts = hostAndPortStr.split(",");List<Pair<String, Integer>> hostAndPorts = new ArrayList<>();for (String hostAndPort : hosts) {Pair<String, Integer> pair = parseHostAndPort(hostAndPort);if (null != pair) {hostAndPorts.add(pair);}}return hostAndPorts;}private Pair<String, Integer> parseHostAndPort(String hostAndPortStr) {String[] hostAndPort = hostAndPortStr.split(":");if (hostAndPort.length != 2) {System.out.printf("Invalid host and port: %s\n", hostAndPortStr);return null;}String host = hostAndPort[0].trim();String port = hostAndPort[1].trim();return Pair.of(host, Integer.parseInt(port));}
}
 缓存管理类
package org.example.redis.config;import com.github.benmanes.caffeine.cache.*;
import lombok.extern.slf4j.Slf4j;
import org.example.redis.listener.CaffeineCacheRemovalListener;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.caffeine.CaffeineCache;
import org.springframework.cache.interceptor.KeyGenerator;
import org.springframework.cache.interceptor.SimpleKeyGenerator;
import org.springframework.cache.support.SimpleCacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.cache.RedisCacheWriter;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.data.redis.serializer.StringRedisSerializer;import javax.annotation.Resource;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;@Slf4j
@Configuration
@EnableCaching
@EnableConfigurationProperties({CustomerCacheProperties.class})
public class CacheManagerConfiguration {public interface redisCacheKey {public final static String cache10M = "cache10M";public final static String cache30s = "cache30s";}public interface caffeineCacheKey {public final static String cache10M = "cache10M";public final static String cache30s = "cache30s";}@Resourceprivate CustomerCacheProperties customerCacheProperties;public interface CacheManagerNames {String REDIS_CACHE_MANAGER = "redisCacheManager";String LOCAL_CACHE_MANAGER = "localCacheManager";}@Bean(name = CacheManagerNames.REDIS_CACHE_MANAGER)@Primarypublic RedisCacheManager redisCacheManager(RedisConnectionFactory factory) {Map<String, RedisCacheConfiguration> expires = new HashMap<>();customerCacheProperties.getRedis().forEach((name, time) -> {expires.put(name, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(Integer.parseInt(time))));});//        Map<String, RedisCacheConfiguration> expires = ImmutableMap.<String, RedisCacheConfiguration>builder()
//                .put(cache15, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(15)))
//                .put(cache30, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(30)))
//                .build();RedisCacheManager redisCacheManager = RedisCacheManager.RedisCacheManagerBuilder.fromConnectionFactory(factory).cacheDefaults(cacheConfiguration()).withInitialCacheConfigurations(expires)//事务感知功能有助于确保在事务提交之后缓存和数据库中的数据保持一致,这对于保证数据完整性和避免脏读非常重要。// 然而,这也可能会增加一些性能开销,因此在不需要强一致性的场景下,可以考虑禁用这个特性以提高性能.transactionAware().build();//以锁写入的方式创建RedisCacheWriter对象
//        RedisCacheWriter writer = RedisCacheWriter.lockingRedisCacheWriter(factory);return redisCacheManager;}@Beanpublic RedisCacheConfiguration cacheConfiguration() {return RedisCacheConfiguration.defaultCacheConfig().disableCachingNullValues().serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(new StringRedisSerializer())).serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(new GenericJackson2JsonRedisSerializer()));}@Bean(name = CacheManagerNames.LOCAL_CACHE_MANAGER)public CacheManager caffeineCacheManager() {List<CaffeineCache> caffeineCaches = new ArrayList<>();customerCacheProperties.getCaffeine().forEach((name, spec) -> {CaffeineSpec caffeineSpec = CaffeineSpec.parse(spec);Caffeine<Object, Object> caffeine = Caffeine.from(caffeineSpec);caffeine.removalListener(new CaffeineCacheRemovalListener());// caffeineCache.executor(cacheExecutor);// 设置定时任务执行过期清除操作//.scheduler(Scheduler.systemScheduler())//cache对缓存写的通知回调caffeine.writer(new CacheWriter<Object, Object>() {@Overridepublic void write(Object key, Object value) {log.info("CacheManager write key={}", key);}@Overridepublic void delete(Object key, Object value, RemovalCause cause) {log.info("CacheManager delete key={}, cause={}", key, cause);}});//            //使用CacheLoader创建一个LoadingCache
//            caffeine.build(new CacheLoader<String, String>() {
//                //同步加载数据
//                @Override
//                public String load(String key) throws Exception {
//                    log.info("CacheManager load key={}", key);
//                    return "value_" + key;
//                }
//
//                //异步加载数据
//                @Override
//                public String reload(String key, String oldValue) throws Exception {
//                    log.info("CacheManager reload key={}", key);
//                    return "value_" + key;
//                }
//            });CaffeineCache caffeineCache = new CaffeineCache(name, caffeine.build());caffeineCaches.add(caffeineCache);});SimpleCacheManager simpleCacheManager = new SimpleCacheManager();simpleCacheManager.setCaches(caffeineCaches);return simpleCacheManager;}//    @Override
//    @Cacheable(key = "#userId", cacheNames = CacheManagerConfiguration.CacheNames.CACHE_15MINS,
//            cacheManager = CacheManagerConfiguration.CacheManagerNames.EHCACHE_CACHE_MANAGER)
//    public User findUserAccordingToId(Long userId) {
//        return userRepository.findById(userId).orElse(User.builder().build());
//    }
//
//    @Override
//    @Cacheable(key = "#username", cacheNames = CacheManagerConfiguration.CacheNames.CACHE_15MINS,
//            cacheManager = CacheManagerConfiguration.CacheManagerNames.REDIS_CACHE_MANAGER)
//    public User findUserAccordingToUserName(String username) {
//        return userRepository.findUserByUsername(username);
//    }//    @Cacheable( sync = true)}
 创建不同的template
package org.example.redis.config;import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Data;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Primary;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;import static org.example.redis.config.RedisConnectionFactoryConfig.redisConnectionFactors;@Configuration
@Data
@DependsOn("redisConnectionFactoryConfig")
public class RestTemplateConfig {@Bean(name = "orderStringRedisTemplate")@Primarypublic StringRedisTemplate orderStringRedisTemplate() {return buildStringRedisTemplate(redisConnectionFactors.get("redis-order"));}//
//    public RedisTemplate<String, Object> buildObjRedisTemplate(RedisConnectionFactory factory) {
//
//        RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
//        redisTemplate.setConnectionFactory(factory);
//
//        Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = jackson2JsonRedisSerializer();
//        redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
//        redisTemplate.setKeySerializer(new StringRedisSerializer());
//
//        redisTemplate.setHashKeySerializer(new StringRedisSerializer());
//        redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);
//        redisTemplate.afterPropertiesSet();
//        return redisTemplate;
//    }public StringRedisTemplate buildStringRedisTemplate(RedisConnectionFactory factory) {StringRedisTemplate redisTemplate = new StringRedisTemplate(factory);Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = jackson2JsonRedisSerializer();redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);redisTemplate.setKeySerializer(new StringRedisSerializer());redisTemplate.setHashKeySerializer(new StringRedisSerializer());redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);redisTemplate.afterPropertiesSet();return redisTemplate;}private Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer() {Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer<>(Object.class);ObjectMapper om = new ObjectMapper();// 指定要序列化的域,field,get和set,以及修饰符范围,ANY是都有包括private和publicom.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);// 指定序列化输入的类型,类必须是非final修饰的,final修饰的类,比如String,Integer等会跑出异常om.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);jackson2JsonRedisSerializer.setObjectMapper(om);return jackson2JsonRedisSerializer;}}
 使用实体类接收,使用Map接收。key需要根据自己的业务区分即可
package org.example.redis.config;import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;import java.util.Map;@Data
@NoArgsConstructor
@AllArgsConstructor
@Component
@ConfigurationProperties(prefix = "redis")
public class RedisPoolConfig {public Map<String, Config> redisConfigs;@NoArgsConstructor@AllArgsConstructor@Datapublic static class Config {private String name;private String type;private String hostAndPort;private String username;private String password;private String database;
//        private String sentinelMasterHostAndPort; // for Sentineprivate String masterName; // for Sentineprivate String clusterMaxRedirects; // for Clusterprivate String timeout;private PoolConfig pool;@Data@NoArgsConstructor@AllArgsConstructorpublic static class PoolConfig {private String maxIdle;private String minIdle;private String maxActive;private String maxWait;}}
}
缓存redis 以及caffeine
package org.example.redis.config;import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;import java.util.Map;@Data
@NoArgsConstructor
@AllArgsConstructor
@Component
@ConfigurationProperties(prefix = "cache")
public class CustomerCacheProperties {public Map<String, String> caffeine;public Map<String, String> redis;
}
<!--        lettuce这个客户端-->
<dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<dependency><groupId>org.apache.commons</groupId><artifactId>commons-pool2</artifactId>
</dependency>
<dependency><groupId>com.github.ben-manes.caffeine</groupId><artifactId>caffeine</artifactId><version>2.9.3</version>
</dependency>

相关文章:

  • 北京网站建设多少钱?
  • 辽宁网页制作哪家好_网站建设
  • 高端品牌网站建设_汉中网站制作
  • 【Oracle点滴积累】解决ORA-20001: Latest xml inventory is not loaded into table故障的方法
  • 麻雀搜索算法(SSA)与支持向量机(SVM)结合的预测模型(SSA-SVM)及其Python和MATLAB实现
  • 指针(下)
  • 依赖倒置原则:构建灵活软件架构的基石 - 通过代码实例深入解析
  • 什么是 Java?
  • 使用Cisco软件进行模拟万维网配置访问服务器过程
  • 运维高级内容--lvs按权重值轮询调度
  • python从入门到精通:判断语句
  • Spring Boot集成sentinel快速入门Demo
  • SQL之使用存储过程循环插入数据
  • OSPF笔记
  • 搭建高可用OpenStack(Queen版)集群(十一)之OpenStack集成ceph服务
  • opencv 多线程分块处理
  • FFmpeg源码:packet_alloc、av_new_packet、av_shrink_packet、av_grow_packet函数分析
  • 掌握NPM版本候选锁定:策略、实践与示例
  • 0x05 Python数据分析,Anaconda八斩刀
  • Angularjs之国际化
  • Fabric架构演变之路
  • java正则表式的使用
  • js学习笔记
  • laravel5.5 视图共享数据
  • maya建模与骨骼动画快速实现人工鱼
  • Travix是如何部署应用程序到Kubernetes上的
  • windows-nginx-https-本地配置
  • 对话 CTO〡听神策数据 CTO 曹犟描绘数据分析行业的无限可能
  • 每天一个设计模式之命令模式
  • 名企6年Java程序员的工作总结,写给在迷茫中的你!
  • 目录与文件属性:编写ls
  • 如何使用 OAuth 2.0 将 LinkedIn 集成入 iOS 应用
  • 如何邀请好友注册您的网站(模拟百度网盘)
  • 如何用vue打造一个移动端音乐播放器
  • 原生 js 实现移动端 Touch 滑动反弹
  • 3月27日云栖精选夜读 | 从 “城市大脑”实践,瞭望未来城市源起 ...
  • 扩展资源服务器解决oauth2 性能瓶颈
  • 支付宝花15年解决的这个问题,顶得上做出十个支付宝 ...
  • ​学习笔记——动态路由——IS-IS中间系统到中间系统(报文/TLV)​
  • # Kafka_深入探秘者(2):kafka 生产者
  • #HarmonyOS:基础语法
  • #在线报价接单​再坚持一下 明天是真的周六.出现货 实单来谈
  • (0)Nginx 功能特性
  • (31)对象的克隆
  • (Matlab)基于蝙蝠算法实现电力系统经济调度
  • (PHP)设置修改 Apache 文件根目录 (Document Root)(转帖)
  • (八)光盘的挂载与解挂、挂载CentOS镜像、rpm安装软件详细学习笔记
  • (附源码)小程序 交通违法举报系统 毕业设计 242045
  • (四)activit5.23.0修复跟踪高亮显示BUG
  • (微服务实战)预付卡平台支付交易系统卡充值业务流程设计
  • (原創) 如何使用ISO C++讀寫BMP圖檔? (C/C++) (Image Processing)
  • (转)GCC在C语言中内嵌汇编 asm __volatile__
  • (自用)网络编程
  • .cn根服务器被攻击之后
  • .net 7和core版 SignalR
  • .Net CF下精确的计时器
  • .NET Core 实现 Redis 批量查询指定格式的Key
  • .NET 应用启用与禁用自动生成绑定重定向 (bindingRedirect),解决不同版本 dll 的依赖问题