今天我将尝试在我的 ND4J 和 Deeplearnint4j 项目中使用 CUDA。之后,Neural Net(从 Keras 导入)开始工作得更快。但是接下来的代码开始慢慢工作
我已经尝试将 ND4J 后端更改为本机(CPU)并且我得到了快速的结果。
promlem 部分用注释突出显示(在 2 行中)
import com.rabbitmq.client.Channel;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.ops.transforms.Transforms;
import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
public class GraphUpdater implements Runnable {
private Pair pubPair;
private ConcurrentHashMap<Integer, INDArray> pubsList;
private Connection connectionMain;
private Connection connectionSite;
private Channel channel;
GraphUpdater(Pair pubPair, ConcurrentHashMap<Integer, INDArray> pubsList, Channel channel) throws SQLException {
this.pubPair = pubPair;
this.channel = channel;
this.pubsList = pubsList;
connectionMain = DataBaseConnectionsPool.getConnection();
connectionSite = DataBaseConnectionsPool.getConnectionSite();
}
@Override
public void run(){
try {
channel.basicAck(pubPair.deliveryTag, false);
} catch (IOException e) {
System.out.println("Error, pub="+pubPair.pub);
e.printStackTrace();
}
PreparedStatement st;
PreparedStatement stNew;
try {
st = connectionMain.prepareStatement("update vec_graph set closed_pubs=closed_pubs || ? where pub=?");
stNew = connectionMain.prepareStatement("insert into vec_graph values (?, ?)");
Statement psNew = connectionMain.createStatement();
ResultSet rs = psNew.executeQuery("select * from new_public_vectors where pub="+pubPair.pub);
float[] _floatArr = new float[64];
while (rs.next()){
Array arr = rs.getArray("vector");
Object[] obj = (Object[]) arr.getArray();
for (int vIndex=0; vIndex < 64; vIndex++){
_floatArr[vIndex] = (float)(double)obj[vIndex];
}
pubsList.put(rs.getInt(1), Nd4j.create(_floatArr));
}
//pub from task X all pubs from db
int pub = pubPair.pub;
List<Integer> closed = new ArrayList<>();
double mean = 0.96D;
INDArray currentVector = pubsList.get(pub);
//!%!%!%!%slowly part of code
for (int pubId : pubsList.keySet()) {
INDArray publicVector = pubsList.get(pubId);
if (currentVector == null || pub == pubId || publicVector == null){
continue;
}
//!%!%!%!%mega slowly part of code, ~99% of CPU time in VisualVM
double dist = -Transforms.cosineDistance(currentVector, publicVector) + 1; // Transfer from cosine sim to cosine dist
if ((dist - mean) < 0.01 && (dist - mean) > 0){
mean = (mean+dist)/2;
}else if (dist > mean){
mean = dist;
closed.clear();
st.clearBatch();
}else{
continue;
}
Array a = connectionMain.createArrayOf("int", new Object[]{pub});
st.setArray(1, a);
st.setInt(2, pubId);
st.addBatch();
closed.add(pubId);
}
Object[] obj_vector = new Object[closed.size()];
for (int i = 0; i < closed.size(); i++){
obj_vector[i] = closed.get(i);
}
Array closedArray = connectionMain.createArrayOf("int", obj_vector);
stNew.setInt(1, pub);
stNew.setArray(2, closedArray);
stNew.addBatch();
if (pubPair.byUser != 0){
showToUser(closed, pub, pubPair.byUser);
}
try {
st.executeBatch();
stNew.executeBatch();
}catch (BatchUpdateException e){
e.printStackTrace();
e.getNextException().printStackTrace();
}
} catch (BatchUpdateException e){
e.printStackTrace();
e.getNextException().printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}finally {
try {
connectionMain.close();
connectionSite.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
我想从这个列表中获得一些东西:
获得更快的结果并使用 GPU
关闭这部分代码的 GPU 并为 NN 保留它