2

如何编译以下程序?不知何故,我无法逃避错误"No instance for (PersistBackend IO)

我的目标是了解如何使用 io-streams 有效地填充数据库表。的类型makeOutputStream(Maybe a -> IO ()) -> IO (OutputStream a)whileinsertWords返回m (),它不接受IO ()作为返回类型。

(后期添加:找到了解决方法,但这不是问题的答案。见下文。)

错误消息是:

Words_read2.hs:30:36:
    No instance for (PersistBackend IO)
      arising from a use of `insertWord'
    Possible fix: add an instance declaration for (PersistBackend IO)
    In the first argument of `Streams.makeOutputStream', namely
      `insertWord'
    In a stmt of a 'do' block:
      os <- Streams.makeOutputStream insertWord
    In the expression:
      do { is <- Streams.handleToInputStream h >>= Streams.words;
           os <- Streams.makeOutputStream insertWord;
           Streams.connect is os }

产生此错误的代码是:

{-# LANGUAGE GADTs, TypeFamilies, TemplateHaskell, QuasiQuotes, FlexibleInstances, FlexibleContexts, StandaloneDeriving #-}

import qualified Data.ByteString as B
import           Data.Maybe
import           Control.Monad.IO.Class (MonadIO, liftIO)
import           Database.Groundhog.Core 
import           Database.Groundhog.TH
import           Database.Groundhog.Sqlite
import           System.IO
import           System.IO.Streams.File
import qualified System.IO.Streams as Streams

data Words = Words {word :: String} deriving (Eq, Show)

mkPersist defaultCodegenConfig [groundhog|
definitions:
  - entity: Words
|]

insertWord :: (MonadIO m, PersistBackend m) => Maybe B.ByteString -> m ()
insertWord wo = case wo of
        Just ww ->  insert_ $ Words ((show . B.unpack) ww)
        Nothing -> return ()

main = do
  withSqliteConn "words2.sqlite" $ runDbConn $ do
     runMigration defaultMigrationLogger $ migrate (undefined :: Words)
     liftIO $ withFile "web2" ReadMode $ \h -> do  -- a link to /usr/share/dict/web2 - a  list of words one per line 
        is <- Streams.handleToInputStream h >>= Streams.words 
        os <- Streams.makeOutputStream insertWord
        Streams.connect is os

作为一种变通方法,我们可以用其他方式做事:我们不尝试在内部工作runDbConn,而是返回一个(池)连接的句柄并传递它。这个想法来自 SO answer to question: Making Custom Instances of PersistBackend

{-# LANGUAGE GADTs, TypeFamilies, TemplateHaskell, QuasiQuotes, FlexibleInstances, FlexibleContexts, StandaloneDeriving #-}

import qualified Data.ByteString as B
import           Data.Maybe
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import           Control.Monad.IO.Class -- (MonadIO, liftIO)
import           Control.Monad.Trans.Control
import           Database.Groundhog.Core 
import           Database.Groundhog.TH
import           Database.Groundhog.Sqlite
import           System.IO
import           System.IO.Streams.File
import qualified System.IO.Streams as Streams

data Words = Words {word :: T.Text} deriving (Eq, Show)

mkPersist defaultCodegenConfig [groundhog|
definitions:
  - entity: Words
|]

main = do
  gh <- do withSqlitePool "words5.sqlite" 5 $ \pconn -> return pconn 
  runDbConn (runMigration defaultMigrationLogger $ migrate (undefined :: Words)) gh
  withFile "web3" ReadMode $ \h -> do  -- 500 words from /usr/share/dict/web2 - a list of words one per line 
    is <- Streams.handleToInputStream h >>= Streams.words 
    os <- Streams.makeOutputStream (iw2db gh)
    Streams.connect is os

iw2db :: (MonadIO m, MonadBaseControl IO m, ConnectionManager cm Sqlite) => cm -> Maybe B.ByteString -> m()
iw2db gh (Just x) = runDbConn (insert_ $ Words (T.decodeUtf8 x)) gh
iw2db gh Nothing  = return ()
4

1 回答 1

1

Groundhog 动作只能在作为 PersistBackend 实例的 monad 中运行。IO 不能作为其实例,因为与 DbPersist 不同,它不携带连接信息。

我喜欢解决方法中的代码,但可以做得更快。现在每个动作都在 runDbConn 打开的自己的事务中运行。为了避免这种情况,我们可以从池中打开一个连接并开始一个事务。然后每个动作都重用这个连接,避免了事务开销。在这种情况下, createSqlitePool 也比 withSqlitePool 更好。

{-# LANGUAGE GADTs, TypeFamilies, TemplateHaskell, QuasiQuotes, FlexibleInstances, FlexibleContexts, StandaloneDeriving #-}

import qualified Data.ByteString as B
import           Data.Maybe    
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import           Control.Monad.IO.Class -- (MonadIO, liftIO)
import           Control.Monad.Trans.Control
import           Database.Groundhog.Core 
import           Database.Groundhog.TH
import           Database.Groundhog.Sqlite
import           System.IO
import           System.IO.Streams.File
import qualified System.IO.Streams as Streams
import Control.Monad.Logger (MonadLogger, NoLoggingT(..))
data Words = Words {word :: T.Text} deriving (Eq, Show)

mkPersist defaultCodegenConfig [groundhog|
definitions:
  - entity: Words
|]

main = do
  gh <- createSqlitePool "words5.sqlite" 5
  runDbConn (runMigration defaultMigrationLogger $ migrate (undefined :: Words)) gh
  withFile "/usr/share/dict/words" ReadMode $ \h -> do  -- 500 words from /usr/share/dict/web2 - a list of words one per line 
    is <- Streams.handleToInputStream h >>= Streams.words
    withConn (\conn -> liftIO $ do -- (conn :: Sqlite) with opened transaction
       os <- Streams.makeOutputStream (iw2db conn)
-- It is important to put Streams.connect inside withConn so that it uses the same transaction
-- If we put it outside, the transaction will be already closed and Sqlite will automatically do a new transaction for each insert
       Streams.connect is os) gh

iw2db :: (MonadIO m, MonadBaseControl IO m, ConnectionManager cm Sqlite)
      => cm -> Maybe B.ByteString -> m ()
iw2db gh (Just x) = runDbConnNoTransaction (insert_ $ Words (T.decodeUtf8 x)) gh
iw2db gh Nothing  = return ()

-- Probably this function should go to the Generic module
runDbConnNoTransaction :: (MonadBaseControl IO m, MonadIO m, ConnectionManager cm conn) => DbPersist conn (NoLoggingT m) a -> cm -> m a
runDbConnNoTransaction f cm = runNoLoggingT (withConnNoTransaction (runDbPersist f) cm)
于 2013-11-13T21:31:42.063 回答