3

创建一个多线程应用程序,该应用程序创建多个线程并查询数据库的同一个表

给定具有以下格式的输入 xml 文件:

<transliteration>
<element>
    <source>about us</source>
</element>
</transliteration>

应用程序读取多个文件并为每个 xml 文件创建多个线程,输出将是另一个具有格式的 xml 文件

<transliteration>
<element>
    <source>about us</source>
        <target/>
</element>
</transliteration>

下面是线程的run方法

public void run() {

        MultipleDatabaseThread th = new MultipleDatabaseThread();
        Map<String,String> map = new HashMap<String,String>();

        try
        {
            Document doc = loadXmlContentToMemory(this.inputString);

            XPathExpression expr = null;
            XPathFactory xFactory = XPathFactory.newInstance();
            XPath xPath = xFactory.newXPath();
            expr = xPath.compile("/transliteration/element//source");
            Object result = expr.evaluate(doc, XPathConstants.NODESET);
            NodeList nodes = (NodeList) result;
            String sourceString = "";
            if(nodes.getLength() > 0)
            {
                for (int i=0; i<nodes.getLength();i++)
                {
                    //System.out.println("Name: "+nodes.item(i).getNodeName() +" Local Name: "+nodes.item(i).getLocalName() +" Value: "+nodes.item(i).getTextContent());
                    sourceString = nodes.item(i).getTextContent();
                    map = th.getCompleteStringTransliterate(sourceString, this.language);

                    if(map.get(sourceString) == null || map.get(sourceString).equals("") || map.get(sourceString).equals(sourceString))
                    {
                        map.clear();
                        map = th.getRecordsFromDatabase(sourceString, language);

                        Element abc = doc.createElement("target");

                        String targetString = "";

                        String[] tokens = sourceString.trim().split("\\s+");

                        for(int itr=0; itr < tokens.length; itr++)
                        {
                            targetString = targetString+" "+map.get(tokens[itr]);
                        }

                        abc.setTextContent(targetString.trim());
                        nodes.item(i).getParentNode().appendChild(abc);
                    }
                    else
                    {
                       Element abc = doc.createElement("target");
                       abc.setTextContent(map.get(sourceString));
                       nodes.item(i).getParentNode().appendChild(abc);
                    }
                }
            }

            try
            {

                expr = xPath.compile("/transliteration/element//target");
                result = expr.evaluate(doc, XPathConstants.NODESET);
            }catch(XPathExpressionException ex)
            {   }

            NodeList nodesList = (NodeList) result;

            for(int i =0;i<nodesList.getLength();i++)
            {
                System.out.println("Node Name: "+nodesList.item(i).getNodeName()+" Node Value: "+nodesList.item(i).getTextContent());
            }

            try
            {
                Transformer transformer = TransformerFactory.newInstance().newTransformer();
                StreamResult strResult = new StreamResult(new File(this.inputString+"_out.xml"));
                if(doc != null && strResult != null)
                {
                    DOMSource source = new  DOMSource(doc);
                    transformer.transform(source, strResult);
                }
            }
            catch(TransformerException ex)
            {
               System.out.println(""+ex);
            }
            catch(TransformerFactoryConfigurationError ex)
            {
               System.out.println(""+ex);
            }

        }catch(IOException ex)
        {
            ex.printStackTrace(System.out);
        }
        catch(DOMException ex)
        {
            ex.printStackTrace(System.out);
        } 
        catch(ParserConfigurationException ex)
        {
            ex.printStackTrace(System.out);
        }
        catch(SAXException ex)
        {
            ex.printStackTrace(System.out);
        }
        catch(XPathExpressionException ex)
        {
            ex.printStackTrace(System.out);
        }
        catch(InterruptedException ex)
        {
            ex.printStackTrace(System.out);
        }

    }
  • loadXmlContentToMemory** 函数将文件名作为输入并在 Document 中加载 xml 内容。

  • getCompleteStringTransliterate** 是 MulltipleDatabaseThread 类的一个函数,它返回一个映射变量,其中包含源及其 traget 字符串。

  • getRecordsFromDatabase** 是同一类中的另一个函数,它拆分源字符串并再次获取其目标字符串返回映射变量

    公共类MultipleDatabaseThread {

    public Map<String,String> getCompleteStringTranslate(String inputString, String language) throws InterruptedException
    {
        Map<String,String> map = new HashMap<String,String>();
    
        synchronized(OutputRecords.getMap())
        {
           //long startTime = System.currentTimeMillis();
    
           OutputRecords.clearOutputStream(); 
           Thread thCompleteString = new DatabaseThread(inputString, language);
           thCompleteString.start();
           thCompleteString.join();
    
           map = OutputRecords.getRecords();
           //System.out.println("Complete String Time Taken:: "+(System.currentTimeMillis()-startTime));
           return map;
        }
    }
    
    
    
    public Map<String,String> getRecordsFromDatabase(String inputString, String language) throws  InterruptedException
    {
        String[] tokens = inputString.split("\\s+");
    
        Map<String,String> map = new HashMap<String,String>();
    
        Thread[] databaseThreads = new Thread[tokens.length];
    
        synchronized(OutputRecords.getMap())
        {
            //long startTime = System.currentTimeMillis();
    
            OutputRecords.clearOutputStream();
            for(int index=0; index < tokens.length; index++)
            {
                databaseThreads[index] = new DatabaseThread(tokens[index],language);
                databaseThreads[index].start();
            }
            for(int index = 0 ; index < tokens.length; index++)
            {
                    databaseThreads[index].join();
            }
    
            map = OutputRecords.getRecords();
            //System.out.println("Tokens Time Taken:: "+(System.currentTimeMillis()-startTime));
    
            return map;
    
        }
    }
    

    }

这两个函数都使用 OutputRecord 类中的静态/共享映射变量并生成多个线程,这些线程实际上调用数据库并填充共享映射变量并返回该变量

但是在执行这个程序时,它的给予

Exception in thread "Thread-0" java.lang.NullPointerException
    at transliterationthreading.ExecuteOuterThread.run(ExecuteOuterThread.java:66)

在线的

if(map.get(sourceString) == null || map.get(sourceString).equals("") || map.get(sourceString).equals(sourceString))

所以一个线程被终止,然后另一个线程被完全执行并生成输出文件。我没有得到问题任何人都可以提出解决这个问题的一些建议。

谢谢

4

2 回答 2

0

我怀疑在线路评估期间地图内容会发生变化

if(map.get(sourceString) == null || map.get(sourceString).equals("") || map.get(sourceString).equals(sourceString))

这样,您的空检查成功,但您从地图获得的新值可能为空。地图不同步!

将此行更改为

String sourceStringValue = map.get(sourceString);
if(sourceStringValue == null || sourceStringValue.equals("") || map.get(sourceString).equals(sourceString))
于 2013-06-19T09:18:59.380 回答
0

感谢大家的努力

我尝试通过不使用静态共享地图并使用 ExecutorService 和 Callable Interface 方法来使用不同的方法解决此问题

这是我的代码

try
        {
            doc = loadXmlContentToMemory(this.inputString);
            expr = xPath.compile("/transliteration/element//source");
            result = expr.evaluate(doc, XPathConstants.NODESET);

        }catch(ParserConfigurationException ex)
        {
            System.out.println("loadXmlError: "+ex.toString());
        }
        catch(IOException ex)
        {
            System.out.println("loadXmlError: "+ex.toString());
        }
        catch(SAXException ex)
        {
            System.out.println("loadXmlError: "+ex.toString());
        }
        catch(XPathExpressionException ex)
        {
            System.out.println("loadXmlError: "+ex.toString());
        }

        NodeList nodes = (NodeList) result;
        String sourceString = "";

        if(nodes.getLength() >0)
        {
            Map<String,String> fileMap = new HashMap<String,String>(); 
            ExecutorService executor = Executors.newFixedThreadPool(NTHREADS);

            for(int index =0; index <nodes.getLength(); index++)
            {
                sourceString = nodes.item(index).getTextContent();
                Callable<Map<String,String>> worker = new MultipleDatabaseCallable(sourceString,language);
                Future<Map<String,String>> map = executor.submit(worker);

                try
                {
                    //System.out.println(""+Thread.currentThread().getName()+"SourceString:: "+sourceString+"Map: "+map.get().get(sourceString));
                      fileMap.putAll(map.get());
                }catch(InterruptedException ex)
                {
                    System.out.println("future read: "+ex.toString());
                }
                catch(ExecutionException ex)
                {
                    System.out.println("future read: "+ex.toString());
                }
            }

            executor.shutdown();
            // Wait until all threads are finish
            while (!executor.isTerminated()) {

            }
            ExecutorService tokenExecutor = Executors.newFixedThreadPool(NTHREADS);
            for(int i =0 ;i<nodes.getLength();i++)
            {
                sourceString = nodes.item(i).getTextContent();
                if(fileMap.get(sourceString) == null || fileMap.get(sourceString).equals("") || fileMap.get(sourceString).equals(sourceString))
                {
                    fileMap.remove(sourceString);
                    Callable<Map<String,String>> worker = new MultipleTokenCallable(sourceString,language);
                    Future<Map<String,String>> map = tokenExecutor.submit(worker);

                    try
                    {
                        fileMap.putAll(map.get());
                    }
                    catch(InterruptedException ex)
                    {
                        System.out.println("Tokenized put Interupted exception: "+ex.toString());
                    }
                    catch(ExecutionException ex)
                    {
                        System.out.println("Tokenized put Execution exception: "+ex.toString());
                        ex.printStackTrace(System.out);
                    }

                    Element targetElement = doc.createElement("target");
                    String targetString = "";

                    String[] tokens = sourceString.trim().split("\\s+");

                    for(int itr=0; itr < tokens.length; itr++)
                    {
                        targetString = targetString+" "+fileMap.get(tokens[itr]);
                    }
                    targetElement.setTextContent(targetString.trim());
                    nodes.item(i).getParentNode().appendChild(targetElement);
                    //System.out.println(""+Thread.currentThread().getName()+" Target:  "+targetString+" Source:  "+sourceString);
                }
                else
                {
                    Element abc = doc.createElement("target");
                    abc.setTextContent(fileMap.get(sourceString));
                    nodes.item(i).getParentNode().appendChild(abc);
                }
            }

            tokenExecutor.shutdown();
            // Wait until all threads are finish
            while (!tokenExecutor.isTerminated()) {

            }
            try
            {
                Transformer transformer = TransformerFactory.newInstance().newTransformer();
                StreamResult strResult = new StreamResult(new File(this.inputString+"_out.xml"));
                if(doc != null && strResult != null)
                {
                    DOMSource source = new  DOMSource(doc);
                    transformer.transform(source, strResult);
                }
            }
            catch(TransformerException ex)
            {
               System.out.println(""+ex);
            }
            catch(TransformerFactoryConfigurationError ex)
            {
               System.out.println(""+ex);
            }

        }

由于使用此生成多个线程并且所有线程都尝试同时连接到数据库,因此如果并发线程的数量增加,这可能会导致连接过多错误。所以你需要维护一个连接池来克服这个问题

于 2013-06-19T12:21:37.353 回答