我们正在尝试在 Openshift 内部的 Wildfly 11 上运行的两个 infinispan 节点上安装复制缓存。当我们在一个节点上写入一个对象时,它不会显示在另一个节点上以供读取。
启动时,节点连接到集群中,并且可以在日志中看到彼此:
22:59:52,513 INFO [org.infinispan.remoting.transport.jgroups.JGroupsTransport] (thread-2) ISPN000094: Received new cluster view for channel mychannel: [port-12-z73rp|13] (2) [port-12-z73rp, port-12-q10wf]
缓存配置。我们尝试了包括 NONE、FULL_XA 和 BATCH 在内的事务模式:
<cache-container name="mycache" default-cache="MyDefault_Cache" jndi-name="java:jboss/infinispan/mycache" module="org.wildfly.clustering.server">
<transport lock-timeout="60000"/>
<replicated-cache name="MyCache_CategoryManager" mode="SYNC" jndi-name="java:jboss/infinispan/mycache/MyCache_CategoryManager">
<locking
isolation="READ_COMMITTED"/>
<transaction
locking="OPTIMISTIC"
mode="NONE"
stop-timeout="30000"/>
</replicated-cache>
</cache-container>
</subsystem>
在代码中,缓存通过 spring 连接为:
public Map<String, Object> getCustomCache() {
System.out.println("CACHE -> " + this.customCache.getClass());
return customCache;
}
public void setCustomCache(Map<String, Object> customCache) {
this.customCache = customCache;
}
@Resource(lookup = "java:jboss/infinispan/mycache/MyCache_CategoryManager")
private Map<String, Object> customCache;
和 Jgroups 配置:
<subsystem xmlns="urn:jboss:domain:jgroups:5.0">
<channels default="ee">
<channel name="ee" stack="udp" cluster="mycluster"/>
</channels>
<stacks>
<stack name="udp">
<transport type="UDP" socket-binding="jgroups-udp"/>
<protocol type="PING"/>
<protocol type="MERGE3"/>
<protocol type="FD_SOCK"/>
<protocol type="FD_ALL"/>
<protocol type="VERIFY_SUSPECT"/>
<protocol type="pbcast.NAKACK2"/>
<protocol type="UNICAST3"/>
<protocol type="pbcast.STABLE"/>
<protocol type="pbcast.GMS"/>
<protocol type="UFC"/>
<protocol type="MFC"/>
<protocol type="FRAG2"/>
</stack>
<stack name="tcp">
<transport type="TCP" socket-binding="jgroups-tcp"/>
<protocol type="PING"/>
<protocol type="MERGE3"/>
<protocol type="FD_SOCK"/>
<protocol type="FD_ALL"/>
<protocol type="VERIFY_SUSPECT"/>
<protocol type="pbcast.NAKACK2"/>
<protocol type="UNICAST3"/>
<protocol type="pbcast.STABLE"/>
<protocol type="pbcast.GMS"/>
<protocol type="MFC"/>
<protocol type="FRAG2"/>
</stack>
<stack name="pgping">
<transport type="TCP" socket-binding="jgroups-tcp"/>
<protocol type="JDBC_PING">
<property name="datasource_jndi_name">
${env.PG_MYCLUSTER_SERV_DB_JNDI_NAME}
</property>
<property name="initialize_sql">
CREATE TABLE IF NOT EXISTS jgroupsping (own_addr VARCHAR(200) NOT NULL, cluster_name VARCHAR(200) NOT NULL, ping_data BYTEA DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name))
</property>
</protocol>
<protocol type="PING"/>
<protocol type="MERGE3"/>
<protocol type="FD_SOCK" socket-binding="jgroups-tcp-fd"/>
<protocol type="FD"/>
<protocol type="VERIFY_SUSPECT"/>
<protocol type="pbcast.NAKACK2"/>
<protocol type="UNICAST3">
<property name="conn_close_timeout">5000</property>
</protocol>
<protocol type="pbcast.STABLE"/>
<protocol type="pbcast.GMS">
<property name="join_timeout">3000</property>
</protocol>
<protocol type="MFC"/>
<protocol type="FRAG2"/>
</stack>
<stack name="tcpping">
<transport type="TCP" socket-binding="jgroups-tcp"/>
<protocol type="TCPPING">
<property name="initial_hosts">127.0.0.1[7600]</property>
<property name="port_range">0</property>
<property name="num_initial_members">1</property>
</protocol>
<protocol type="MERGE3"/>
<protocol type="FD_SOCK"/>
<protocol type="FD_ALL"/>
<protocol type="VERIFY_SUSPECT"/>
<protocol type="BARRIER"/>
<protocol type="pbcast.NAKACK2"/>
<protocol type="UNICAST3"/>
<protocol type="pbcast.STABLE"/>
<protocol type="pbcast.GMS"/>
<protocol type="UFC"/>
<protocol type="MFC"/>
<protocol type="FRAG2"/>
<protocol type="RSVP"/>
<protocol type="AUTH">
<property name="auth_class">org.jgroups.auth.MD5Token</property>
<property name="token_hash">SHA</property>
<property name="auth_value">${jboss.node.name}</property>
</protocol>
</stack>
</stacks>
</subsystem>
数据进入数据库,如果我们在第二个节点上重建缓存,它将包含更新的信息。
鉴于节点已连接,我们如何解决读取复制未发生的事实?我们已经尝试了复制和分布式缓存配置。