From ac1641918ef1341d1f682d6314bd38a567394ab8 Mon Sep 17 00:00:00 2001 From: Hans Raaf Date: Mon, 15 Apr 2019 14:55:44 +0200 Subject: [PATCH 1/5] More cleanup --- Data/TCache.hs | 7 +- Data/TCache.transact.hs | 953 ---------------------------------------- Data/TCache/Defs.hs | 14 +- TCache.cabal | 2 +- demos/basicSample.hs | 8 +- 5 files changed, 19 insertions(+), 965 deletions(-) delete mode 100644 Data/TCache.transact.hs diff --git a/Data/TCache.hs b/Data/TCache.hs index 811a4c9..f961577 100644 --- a/Data/TCache.hs +++ b/Data/TCache.hs @@ -330,8 +330,9 @@ data CheckTPVarFlags= AddToHash | NoAddToHash setCache :: Cache -> IO() setCache ref = readIORef ref >>= \ch -> writeIORef refcache ch --- | The cache holder. stablished by default +-- | The cache holder. established by default refcache :: Cache +{-# NOINLINE refcache #-} refcache =unsafePerformIO $ newCache >>= newIORef -- | Creates a new cache. Experimental @@ -712,7 +713,7 @@ getResource r= do{mr<- getResources [r];return $! head mr} {-# INLINE getResources #-} getResources:: (IResource a, Typeable a)=>[a]-> IO [Maybe a] getResources rs= atomically $ withSTMResources rs f1 where - f1 mrs= Resources [] [] mrs + f1 = Resources [] [] -- | Delete the resource from cache and from persistent storage. @@ -746,7 +747,7 @@ takeDBRef cache flags x =do case mr of Just dbref -> return . Just $! castErr dbref Nothing -> unsafeIOToSTM (finalize w) >> takeDBRef cache flags x - Nothing -> do + Nothing -> safeIOToSTM $ readToCache flags cache keyr -- unsafeIOToSTM $ readResourceByKey keyr diff --git a/Data/TCache.transact.hs b/Data/TCache.transact.hs deleted file mode 100644 index 467107f..0000000 --- a/Data/TCache.transact.hs +++ /dev/null @@ -1,953 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables, ExistentialQuantification, DeriveDataTypeable - , FlexibleInstances, UndecidableInstances #-} - -{- | TCache is a transactional cache with configurable persistence that permits -STM transactions with objects thar synchronize synchronously or asynchronously with -their user defined storages. Default persistence in files is provided for testing purposes - - TCache implements 'DBRef's . They are persistent STM references with a traditional 'readDBRef', 'writeDBRef' Haskell interface -similar to TVars ('newDBRef', 'readDBRef', 'writeDBRef' etc) but with added persistence. -DBRefs are serializable, so they can be stored and retrieved. See some examples below. -Because they are references, they point to other serializable registers. -This permits persistent mutable inter-object relations. - -For simple transactions of lists of objects of the same type TCache implements -inversion of control primitives 'withSTMResources' and variants, that call pure user-defined code for registers update. -See examples below. - -Triggers in "Data.TCache.Triggers" are user defined hooks that are called back on register updates. That can be used for: - -"Data.TCache.IndexQuery" implements a straightforward pure Haskell, type-safe query language based - on register field relations. This module must be imported separately. - -"Data.TCache.IndexText" add full text search and content search to the qhery language - -"Data.TCache.DefaultPersistence" define instances for key definition, serialization -and persistence, and default file persistence. The file persistence is now more reliable, and the embedded IO reads inside STM transactions are safe. - - --} - - - - -module Data.TCache ( --- * Inherited from 'Control.Concurrent.STM' - - atomically - ,STM --- * synced atomical transaction - ,atomicallySync --- * Operations with cached database references -{-| 'DBRef's are persistent cached database references in the STM monad -with read/write primitives, so the traditional syntax of Haskell STM references -can be used for interfacing with databases. As expected, the DBRefs are transactional, - because they operate in the STM monad. - -A @DBRef@ is associated with its referred object trough its key. -Since DBRefs are serializable, they can be elements of mutable cached objects themselves. -They could point to other mutable objects -and so on, so DBRefs can act as \"hardwired\" relations from mutable objects -to other mutable objects in the database/cache. their referred objects are loaded, saved and flushed -to and from the cache automatically depending on the cache handling policies and the access needs. - -@DBRefs@ are univocally identified by its object keys, so they can be compared, ordered, checked for equality, and so on. -The creation of a DBRef, though 'getDBRef' is pure. This permits an efficient lazy access to the - registers through their DBRefs by lazy marshalling of the register content on demand. - -Example: Car registers have references to Person registers. - -@ -data Person= Person {pname :: String} deriving (Show, Read, Eq, Typeable) -data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, Typeable) -@ - -Here the Car register point to the Person register through the owner field. - -To permit persistence and being referred with DBRefs, define the 'Indexable' instance -for these two register types: - -@ -instance Indexable Person where key Person{pname= n} = "Person " ++ n -instance Indexable Car where key Car{cname= n} = "Car " ++ n -@ - -Now we create a DBRef to a Person whose name is \"Bruce\" - ->>> let bruce = getDBRef . key $ Person "Bruce" :: DBRef Person - ->>> show bruce ->"DBRef \"Person bruce\"" - ->>> atomically (readDBRef bruce) ->Nothing - -'getDBRef' is pure and creates the reference, but not the referred object; -To create both the reference and the DBRef, use 'newDBRef'. -Lets create two Cars and its two Car DBRefs with bruce as owner: - ->>> cars <- atomically $ mapM newDBRef [Car bruce "Bat Mobile", Car bruce "Porsche"] - ->>> print cars ->[DBRef "Car Bat Mobile",DBRef "Car Porsche"] - ->>> carRegs<- atomically $ mapM readDBRef cars -> [Just (Car {owner = DBRef "Person bruce", cname = "Bat Mobile"}) -> ,Just (Car {owner = DBRef "Person bruce", cname = "Porsche"})] - -try to write with 'writeDBRef': - ->>> atomically . writeDBRef bruce $ Person "Other" ->*** Exception: writeDBRef: law of key conservation broken: old , new= Person bruce , Person Other - -DBRef's can not be written with objects of different keys: - ->>> atomically . writeDBRef bruce $ Person "Bruce" - ->>> let Just carReg1= head carRegs - -now from the Car register it is possible to recover the owner's register: - ->>> atomically $ readDBRef ( owner carReg1) ->Just (Person {pname = "bruce"}) - -DBRefs, once the referenced, cached object is looked up in the cache and found at creation, do -not perform any further cache lookup afterwards, so reads and writes from/to DBRefs are faster -than *Resource(s) calls, which perform cache lookups every time the object is accessed. - -DBRefs and @*Resource(s)@ primitives are completely interoperable. The latter operate implicitly with DBRefs - --} - - -,DBRef -,getDBRef -,keyObjDBRef -,newDBRef ---,newDBRefIO -,readDBRef -,writeDBRef -,delDBRef - --- * IResource class -{- | Cached objects must be instances of `IResource`. -Such instances can be implicitly derived trough auxiliary classes for file persistence. --} -,IResource(..) - --- * Operations with cached objects -{- | Implement inversion of control primitives where the user defines the objects to retrieve. The primitives -then call the defined function that determines how to transform the retrieved objects, which are sent -back to the storage and a result is returned. - -In this example \"buy\" is a transaction where the user buys an item. -The spent amount is increased and the stock of the product is decreased: - -@ -data Data= User{uname:: String, uid:: String, spent:: Int} | - Item{iname:: String, iid:: String, price:: Int, stock:: Int} - deriving (Read, Show) - -instance `Indexable` Data where - `key` User{uid=id}= id - `key` Item{iid=id}= id - -user `buy` item= 'withResources'[user,item] buyIt - where - buyIt[Just us,Just it] - | stock it > 0= [us',it'] - | otherwise = error \"stock is empty for this product\" - where - us'= us{spent=spent us + price it} - it'= it{stock= stock it-1} - buyIt _ = error \"either the user or the item (or both) does not exist\" -@ --} -,resources -- empty resources -,withSTMResources -,Resources(..) -- data definition used to communicate object Inserts and Deletes to the cache -,withResources -,withResource -,getResources -,getResource -,deleteResources -,deleteResource - --- * Trigger operations -{- | Trriggers are called just before an object of the given type is created, modified or deleted. -The DBRef to the object and the new value is passed to the trigger. -The called trigger function has two parameters: the DBRef being accesed -(which still contains the old value), and the new value. -If the content of the DBRef is being deleted, the second parameter is 'Nothing'. -if the DBRef contains Nothing, then the object is being created - -Example: - -every time a car is added, or deleted, the owner's list is updated -this is done by the user defined trigger addCar - -@ - addCar pcar (Just(Car powner _ )) = addToOwner powner pcar - addCar pcar Nothing = readDBRef pcar >>= \(Just car)-> deleteOwner (owner car) pcar - - addToOwner powner pcar=do - Just owner <- readDBRef powner - writeDBRef powner owner{cars= nub $ pcar : cars owner} - - deleteOwner powner pcar= do - Just owner <- readDBRef powner - writeDBRef powner owner{cars= delete pcar $ cars owner} - - main= do - 'addTrigger' addCar - putStrLn \"create bruce's register with no cars\" - bruce \<- 'atomically' 'newDBRef' $ Person \"Bruce\" [] - putStrLn "add two car register with \"bruce\" as owner using the reference to the bruces register" - let newcars= [Car bruce \"Bat Mobile\" , Car bruce \"Porsche\"] - insert newcars - Just bruceData \<- atomically $ 'readDBRef' bruce - putStrLn "the trigger automatically updated the car references of the Bruce register" - print . length $ cars bruceData - print bruceData -@ - -gives: - -> main -> 2 -> Person {pname = "Bruce", cars = [DBRef "Car Porsche",DBRef "Car Bat Mobile"]} - --} - -,addTrigger - --- * Cache control -,flushDBRef -,flushAll -,Cache -,setCache -,newCache -,refcache -,syncCache -,setConditions -,clearSyncCache -,numElems -,syncWrite -,SyncMode(..) -,defaultCheck --- * other -,safeIOToSTM -) -where - - -import GHC.Conc -import Control.Monad(when) -import Control.Monad.Trans -import Data.HashTable as H -import Data.IORef -import System.IO.Unsafe -import System.IO(hPutStr, stderr) -import Data.Maybe -import Data.List -import Data.TCache.Defs -import Data.TCache.IResource -import Data.TCache.Triggers -import Control.Exception(handle,assert, bracket, SomeException) -import Data.Typeable -import System.Time -import System.Mem -import System.Mem.Weak - -import Control.Concurrent.MVar -import Control.Exception(catch, throw,evaluate) - -import Debug.Trace -(!>) = flip trace - --- there are two references to the DBRef here --- The Maybe one keeps it alive until the cache releases it for *Resources --- calls which does not reference dbrefs explicitly --- The weak reference keeps the dbref alive until is it not referenced elsewhere -data CacheElem= forall a.(IResource a,Typeable a) => CacheElem (Maybe (DBRef a)) (Weak(DBRef a)) - -type Ht = HashTable String CacheElem - -data ToWrite= forall a.(Typeable a,IResource a ) => ToWrite (DBRef a) - -instance Eq ToWrite where - (ToWrite x)==(ToWrite y)= keyObjDBRef x == keyObjDBRef y - --- contains the hashtable, last sync time and te list of references to be written in permanent storage -type Cache = IORef (Ht , Integer,[ToWrite]) -data CheckTPVarFlags= AddToHash | NoAddToHash - - -data SyncMode= Synchronous -- ^ write state after every step - | Asynchronous - {frequency :: Int -- ^ number of seconds between saves when asynchronous - ,check :: (Integer-> Integer-> Integer-> Bool) -- ^ The user-defined check-for-cleanup-from-cache for each object. 'defaultCheck' is an example - ,cacheSize :: Int -- ^ size of the cache when async - } - | SyncManual -- ^ use Data.TCache.syncCache to write the state - - - - -tvSyncWrite= unsafePerformIO $ newIORef (Synchronous, Nothing) - --- | --- The execution log is cached in memory using the package `TCache`. This procedure defines the polcy for writing the cache into permanent storage. --- --- For fast workflows, or when TCache` is used also for other purposes , `Asynchronous` is the best option --- --- `Asynchronous` mode invokes `clearSyncCache`. For more complex use of the synchronization --- please use this `clearSyncCache`. --- --- When interruptions are controlled, use `SyncManual` mode and include a call to `syncCache` in the finalization code - -syncWrite:: (Monad m, MonadIO m) => SyncMode -> m () -syncWrite mode= do - (_,thread) <- liftIO $ readIORef tvSyncWrite - when (isJust thread ) $ liftIO . killThread . fromJust $ thread - case mode of - Synchronous -> modeWrite - SyncManual -> modeWrite - Asynchronous time check maxsize -> do - th <- liftIO $ clearSyncCacheProc time check maxsize >> return() - liftIO $ writeIORef tvSyncWrite (mode,Just th) - where - modeWrite= - liftIO $ writeIORef tvSyncWrite (mode, Nothing) - - -class Monad m => HasTransactions m where - tatomically :: m a -> IO a - tretry :: m a - tadd :: (Typeable a,IResource a) => DBRef a -> m () - tget :: m [ToWrite] - transact :: m Bool - tlift :: STM a -> m a - -instance HasTransactions STM where - tatomically = atomically - tretry= retry - tlift= id - tadd x= unsafeIOToSTM . atomicModifyIORef refcache - $ \(cache, t, xs) -> ((cache,t, ToWrite x : xs),()) - tget = undefined safeIOToSTM $ atomicModifyIORef refcache - $ \(cache, t, xs) -> - let t2= unsafePerformIO timeInteger - in t2 `seq` ((cache,t2,[]),xs) - transact= do - (savetype,_) <- unsafeIOToSTM $ readIORef tvSyncWrite - case savetype of - Synchronous -> do - sync - return True - _ -> return True - where - sync= do - l <- tget - tosave <- mapM (\(ToWrite x) ->readDBRef x >>= \my -> return $ fmap Filtered my) $ nub l - unsafeIOToSTM . criticalSection saving . save $ catMaybes tosave - - --- | perform an atomic transaction and write to persistent storage if --- the synchronization policies defined in 'syncWrite' is 'Synchronous'. --- Therefore, unlike 'atomic' it can perform synchronous transactions. --- the synchronization process is definedd in the 'transact' method in the 'HasTransaction' instance. --- The signature uses a generic monad instead of STM in order to allow future use of --- more sophisticated mechanisms of synchronization such are distributed transactions --- --- the STM monad uses 'syncCache' - -atomicallySync :: HasTransactions m => m a -> IO a -atomicallySync proc= tatomically $ do - r <- proc - t <- transact - if t then return r else tretry - - - --- | set the cache. this is useful for hot loaded modules that will update an existing cache. Experimental -setCache :: Cache -> IO() -setCache ref = readIORef ref >>= \ch -> writeIORef refcache ch - --- the cache holder. stablished by default -refcache :: Cache -refcache =unsafePerformIO $ newCache >>= newIORef - --- | newCache creates a new cache. Experimental -newCache :: IO (Ht , Integer,[ToWrite]) -newCache =do - c <- H.new (==) hashString - return (c,0,[]) - --- | Return the total number of DBRefs in the cache. For debug purposes. --- This does not count the number of objects in the cache since many of the 'DBRef's --- may not have the referenced object loaded. It's O(n). -numElems :: IO Int -numElems= do - (cache, _,_) <- readIORef refcache - elems <- toList cache - return $ length elems - - -deRefWeakSTM = unsafeIOToSTM . deRefWeak - -deleteFromCache :: (IResource a, Typeable a) => DBRef a -> IO () -deleteFromCache (DBRef k tv)= do - (cache, _,_) <- readIORef refcache - H.delete cache k -- !> ("delete " ++ k) - - --- | return the reference value. If it is not in the cache, it is fetched --- from the database. -readDBRef :: (IResource a, Typeable a) => DBRef a -> STM (Maybe a) -readDBRef dbref@(DBRef key tv)= do - r <- readTVar tv - case r of - Exist (Elem x _ mt) -> do - t <- unsafeIOToSTM timeInteger - writeTVar tv . Exist $ Elem x t mt - return $ Just x - DoNotExist -> return $ Nothing - NotRead -> do - r <- safeIOToSTM $ readResourceByKey key - case r of - Nothing -> writeTVar tv DoNotExist >> return Nothing - Just x -> do - t <- unsafeIOToSTM timeInteger - writeTVar tv $ Exist $ Elem x t t - return $ Just x - --- | write in the reference a value --- The new key must be the same than the old key of the previous object stored --- otherwise, an error "law of key conservation broken" will be raised --- --- WARNING: the value to be written in the DBRef must be fully evaluated. Delayed evaluations at --- serialization time can cause inconsistencies in the database. --- In future releases this will be enforced. -writeDBRef :: (IResource a, Typeable a) => DBRef a -> a -> STM () -writeDBRef dbref@(DBRef key tv) x= x `seq` do - let newkey= keyResource x - if newkey /= key - then error $ "writeDBRef: law of key conservation broken: old , new= " ++ key ++ " , "++newkey - else do - tlift $ do - applyTriggers [dbref] [Just x] - t <- unsafeIOToSTM timeInteger - writeTVar tv $ Exist $ Elem x t t -- !> ("writeDBRef "++ key) - tadd dbref - return() - - - - -instance Show (DBRef a) where - show (DBRef key _)= "DBRef \""++ key ++ "\"" - -instance (IResource a, Typeable a) => Read (DBRef a) where - readsPrec n ('D':'B':'R':'e':'f':' ':'\"':str)= - let (key,nstr) = break (== '\"') str - in [( getDBRef key :: DBRef a, tail nstr)] - readsPrec _ _ = [] - -instance Eq (DBRef a) where - DBRef k _ == DBRef k' _ = k==k' - -instance Ord (DBRef a) where - compare (DBRef k _) (DBRef k' _) = compare k k' - --- | Return the key of the object referenced by the DBRef -keyObjDBRef :: DBRef a -> String -keyObjDBRef (DBRef k _)= k - --- | Get the reference to the object in the cache. If it does not exist, the reference is created empty. --- Every execution of 'getDBRef' returns the same unique reference to this key, --- so it can be safely considered pure. This property is useful because deserialization --- of objects with unused embedded 'DBRef's do not need to marshall them eagerly. --- This also avoids unnecessary cache lookups of the referenced objects. -{-# NOINLINE getDBRef #-} -getDBRef :: (Typeable a, IResource a) => String -> DBRef a -getDBRef key= unsafePerformIO $! getDBRef1 $! key where - getDBRef1 :: (Typeable a, IResource a) => String -> IO (DBRef a) - getDBRef1 key= do - (cache,_,_) <- readIORef refcache -- !> ("getDBRef "++ key) - r <- H.lookup cache key - case r of - Just (CacheElem _ w) -> do - mr <- deRefWeak w - case mr of - Just dbref@(DBRef _ tv) -> return $! castErr dbref - Nothing -> finalize w >> getDBRef1 key -- the weak pointer has not executed his finalizer - - Nothing -> do - tv<- newTVarIO NotRead - dbref <- evaluate $ DBRef key tv - w <- mkWeakPtr dbref . Just $ deleteFromCache dbref - H.update cache key (CacheElem Nothing w) - return dbref - -{- - | Create the object passed as parameter (if it does not exist) and --- return its reference in the IO monad. --- If an object with the same key already exists, it is returned as is --- If not, the reference is created with the new value. --- If you like to update in any case, use 'getDBRef' and 'writeDBRef' combined -newDBRefIO :: (IResource a,Typeable a) => a -> IO (DBRef a) -newDBRefIO x= do - let key = keyResource x - mdbref <- mDBRefIO key - case mdbref of - Right dbref -> return dbref - - Left cache -> do - tv<- newTVarIO DoNotExist - let dbref= DBRef key tv - w <- mkWeakPtr dbref . Just $ deleteFromCache dbref - H.update cache key (CacheElem Nothing w) - t <- timeInteger - atomically $ do - applyTriggers [dbref] [Just x] --`debug` ("before "++key) - writeTVar tv . Exist $ Elem x t t - return dbref - --} - - ----- get a single DBRef if exist ---mDBRefIO --- :: (IResource a, Typeable a) --- => String -- ^ the list of partial object definitions for which keyResource can be extracted --- -> IO (Either Ht (DBRef a)) -- ^ ThTCache.hse TVars that contain such objects ---mDBRefIO k= do --- (cache,_) <- readIORef refcache --- r <- H.lookup cache k --- case r of --- Just (CacheElem _ w) -> do --- mr <- deRefWeak w --- case mr of --- Just dbref -> return . Right $! castErr dbref --- Nothing -> finalize w >> mDBRefIO k --- Nothing -> return $ Left cache - - - --- | Create the object passed as parameter (if it does not exist) and --- return its reference in the STM monad. --- If an object with the same key already exists, it is returned as is --- If not, the reference is created with the new value. --- If you like to update in any case, use 'getDBRef' and 'writeDBRef' combined --- if you need to create the reference and the reference content, use 'newDBRef' - -newDBRef :: (IResource a, Typeable a) => a -> STM (DBRef a) -newDBRef x = do - let ref= getDBRef $! keyResource x - - mr <- readDBRef ref - case mr of - Nothing -> writeDBRef ref x >> return ref -- !> " write" - Just r -> return ref -- !> " non write" - ---newDBRef :: (IResource a, Typeable a) => a -> STM (DBRef a) ---newDBRef x = do --- let key= keyResource x --- mdbref <- unsafeIOToSTM $ mDBRefIO key --- case mdbref of --- Right dbref -> return dbref --- Left cache -> do --- t <- unsafeIOToSTM timeInteger --- tv <- newTVar DoNotExist --- let dbref= DBRef key tv --- (cache,_) <- unsafeIOToSTM $ readIORef refcache --- applyTriggers [dbref] [Just x] --- writeTVar tv . Exist $ Elem x t t --- unsafeIOToSTM $ do --- w <- mkWeakPtr dbref . Just $ deleteFromCache dbref --- H.update cache key ( CacheElem Nothing w) --- return dbref - --- | delete the content of the DBRef form the cache and from permanent storage -delDBRef :: (IResource a, Typeable a) => DBRef a -> STM() -delDBRef dbref@(DBRef k tv)= do - mr <- readDBRef dbref - case mr of - Just x -> do - applyTriggers [dbref] [Nothing] - writeTVar tv DoNotExist - - safeIOToSTM $ criticalSection saving $ delResource x - - Nothing -> return () - --- | Deletes the referenced object from the cache, not the database (see 'delDBRef') --- useful for cache invalidation when the database is modified by other processes. -flushDBRef :: (IResource a, Typeable a) =>DBRef a -> STM() -flushDBRef (DBRef _ tv)= writeTVar tv NotRead - - --- | drops the entire cache. -flushAll :: STM () -flushAll = do - (cache, time, _) <- unsafeIOToSTM $ readIORef refcache - elms <- unsafeIOToSTM $ toList cache - mapM_ (del cache) elms - where - del cache ( _ , CacheElem _ w)= do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Just (DBRef _ tv) -> writeTVar tv NotRead - Nothing -> unsafeIOToSTM (finalize w) - - - --- | This is the main function for the *Resource(s) calls. All the rest derive from it. The results are kept in the STM monad --- so it can be part of a larger STM transaction involving other DBRefs --- The 'Resources' register returned by the user-defined function is interpreted as such: --- --- * 'toAdd': the content of this field will be added/updated to the cache --- --- * 'toDelete': the content of this field will be removed from the cache and from permanent storage --- --- * 'toReturn': the content of this field will be returned by 'withSTMResources' --- --- WARNING: To catch evaluations errors at the right place, the values to be written must be fully evaluated. --- .Errors in delayed evaluations at serialization time can cause inconsistencies in the database. - -withSTMResources :: (IResource a, Typeable a)=> [a] -- ^ the list of resources to be retrieved - -> ([Maybe a]-> Resources a x) -- ^ The function that process the resources found and return a Resources structure - -> STM x -- ^ The return value in the STM monad. - -withSTMResources rs f= do - (cache,_,_) <- unsafeIOToSTM $ readIORef refcache - mtrs <- takeDBRefs rs cache AddToHash - - mrs <- mapM mreadDBRef mtrs - case f mrs of - Retry -> retry - Resources as ds r -> do - - applyTriggers (map (getDBRef . keyResource) ds) (repeat (Nothing `asTypeOf` (Just(head ds)))) - delListFromHash cache ds - releaseTPVars as cache - - safeIOToSTM $ bracket - (takeMVar saving) - (putMVar saving) - $ const $ mapM_ delResource ds - return r - - where - mreadDBRef :: (IResource a, Typeable a) => Maybe (DBRef a) -> STM (Maybe a) - mreadDBRef (Just dbref)= readDBRef dbref - mreadDBRef Nothing = return Nothing - - --- | update of a single object in the cache --- --- @withResource r f= 'withResources' [r] (\[mr]-> [f mr])@ -withResource:: (IResource a, Typeable a) - => a -- ^ prototypes of the object to be retrieved for which keyResource can be derived - -> (Maybe a-> a) -- ^ update function that return another full object - -> IO () -withResource r f= withResources [r] (\[mr]-> [f mr]) - - --- | to atomically add/modify many objects in the cache - --- @ withResources rs f= atomically $ 'withSTMResources' rs f1 >> return() where f1 mrs= let as= f mrs in Resources as [] ()@ -withResources:: (IResource a,Typeable a)=> [a]-> ([Maybe a]-> [a])-> IO () -withResources rs f= atomically $ withSTMResources rs f1 >> return() where - f1 mrs= let as= f mrs in Resources as [] () - --- | to read a resource from the cache. - --- @getResource r= do{mr<- 'getResources' [r];return $! head mr}@ -getResource:: (IResource a, Typeable a)=>a-> IO (Maybe a) -getResource r= do{mr<- getResources [r];return $! head mr} - ---- | to read a list of resources from the cache if they exist - --- | @getResources rs= atomically $ 'withSTMResources' rs f1 where f1 mrs= Resources [] [] mrs@ -getResources:: (IResource a, Typeable a)=>[a]-> IO [Maybe a] -getResources rs= atomically $ withSTMResources rs f1 where - f1 mrs= Resources [] [] mrs - - --- | delete the resource from cache and from persistent storage. --- @ deleteResource r= 'deleteResources' [r] @ -deleteResource :: (IResource a, Typeable a) => a -> IO () -deleteResource r= deleteResources [r] - --- | delete the list of resources from cache and from persistent storage. - --- @ deleteResources rs= atomically $ 'withSTMResources' rs f1 where f1 mrs = Resources [] (catMaybes mrs) ()@ -deleteResources :: (IResource a, Typeable a) => [a] -> IO () -deleteResources rs= atomically $ withSTMResources rs f1 where - f1 mrs = resources {toDelete=catMaybes mrs} - - -takeDBRefs :: (IResource a, Typeable a) => [a] -> Ht -> CheckTPVarFlags -> STM [Maybe (DBRef a)] -takeDBRefs rs cache addToHash= mapM (takeDBRef cache addToHash) rs - - -{-# NOINLINE takeDBRef #-} -takeDBRef :: (IResource a, Typeable a) => Ht -> CheckTPVarFlags -> a -> STM(Maybe (DBRef a)) -takeDBRef cache flags x =do - - let keyr= keyResource x - c <- unsafeIOToSTM $ H.lookup cache keyr - case c of - Just (CacheElem _ w) -> do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Just dbref -> return . Just $! castErr dbref - Nothing -> unsafeIOToSTM (finalize w) >> takeDBRef cache flags x - Nothing -> do - safeIOToSTM $ readToCache flags cache keyr x -- unsafeIOToSTM $ readResourceByKey keyr - - where - readToCache flags cache key x= do - mr <- readResource x - case mr of - Nothing -> return Nothing - Just r2 -> do - ti <- timeInteger - tvr <- newTVarIO . Exist $ Elem r2 ti ti - case flags of - NoAddToHash -> return . Just $ DBRef key tvr - AddToHash -> do - dbref <- evaluate $ DBRef key tvr - w <- mkWeakPtr dbref . Just $ deleteFromCache dbref - H.update cache key (CacheElem (Just dbref) w) - return $ Just dbref - -- !> ("readToCache "++ key) - - - -timeInteger= do TOD t _ <- getClockTime - return t - - - - - -releaseTPVars :: (IResource a,Typeable a)=> [a] -> Ht -> STM () -releaseTPVars rs cache = mapM_ (releaseTPVar cache) rs - -releaseTPVar :: (IResource a,Typeable a)=> Ht -> a -> STM () -releaseTPVar cache r =do - c <- unsafeIOToSTM $ H.lookup cache keyr - case c of - Just (CacheElem _ w) -> do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Nothing -> unsafeIOToSTM (finalize w) >> releaseTPVar cache r - Just dbref@(DBRef key tv) -> do - applyTriggers [dbref] [Just (castErr r)] - tadd dbref - t <- unsafeIOToSTM timeInteger - writeTVar tv . Exist $ Elem (castErr r) t t - - - Nothing -> do - ti <- unsafeIOToSTM timeInteger - tvr <- newTVar NotRead - dbref <- unsafeIOToSTM . evaluate $ DBRef keyr tvr - applyTriggers [dbref] [Just r] - tadd dbref - writeTVar tvr . Exist $ Elem r ti ti - w <- unsafeIOToSTM . mkWeakPtr dbref $ Just $ deleteFromCache dbref - unsafeIOToSTM $ H.update cache keyr (CacheElem (Just dbref) w)-- accesed and modified XXX - return () - - - where keyr= keyResource r - - - - -delListFromHash :: IResource a => Ht -> [a] -> STM () -delListFromHash cache xs= mapM_ del xs - where - del :: IResource a => a -> STM () - del x= do - let key= keyResource x - mr <- unsafeIOToSTM $ H.lookup cache key - case mr of - Nothing -> return () - Just (CacheElem _ w) -> do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Just dbref@(DBRef _ tv) -> do - writeTVar tv DoNotExist - Nothing -> do - unsafeIOToSTM (finalize w) >> del x - - - -updateListToHash hash kv= mapM (update1 hash) kv where - update1 h (k,v)= update h k v - - - --- | Start the thread that periodically call 'clearSyncCache' to clean and writes on the persistent storage. --- Otherwise, 'syncCache' must be invoked explicitly or no persistence will exist. --- Cache writes allways save a coherent state -clearSyncCacheProc :: - Int -- ^ number of seconds betwen checks. objects not written to disk are written - -> (Integer -> Integer-> Integer-> Bool) -- ^ The user-defined check-for-cleanup-from-cache for each object. 'defaultCheck' is an example - -> Int -- ^ The max number of objects in the cache, if more, the cleanup starts - -> IO ThreadId -- ^ Identifier of the thread created -clearSyncCacheProc time check sizeObjects= forkIO clear - where - clear =handle ( \ (e :: SomeException)-> hPutStr stderr (show e) >> clear ) $ do - threadDelay (fromIntegral$ time * 1000000) - clearSyncCache check sizeObjects --`debug` "CLEAR" - clear - -data Filtered= forall a. IResource a => Filtered a - --- | Force the atomic write of all cached objects modified since the last save into permanent storage --- Cache writes allways save a coherent state - -criticalSection mv f= bracket - (takeMVar mv) - (putMVar mv) - $ const $ f - -criticalSectionSTM mt f= do - r <- readTVar mt - if r == False then retry - else do - writeTVar mt True - f - writeTVar mt False - -syncCache :: IO () ---syncCache = bracket --- (takeMVar saving) --- (putMVar saving) --- $ const $ do -syncCache= criticalSection saving $ do - tosave <- atomically $ do - l <- tget !> "syncCache" - mapM (\(ToWrite x) ->readDBRef x >>= \my -> return $ fmap Filtered my) $ nub l - save $ catMaybes tosave - - - - - --- |Saves the unsaved elems of the cache. --- Allways save a coherent state --- It deletes some elems of the cache when the number of elems > 'maxObjects'. --- The deletion depends on the checking policy. 'defaultCheck' is the one implemented -clearSyncCache :: (Integer -> Integer-> Integer-> Bool)-> Int -> IO () -clearSyncCache check maxObjects= criticalSection saving $ do - t <- timeInteger - - tosave <- atomically $ do - l <- tget - mapM (\(ToWrite x) ->readDBRef x >>= \my -> return $ fmap Filtered my) $ nub l - save $ catMaybes tosave - - (cache,lastSync,_) <- readIORef refcache - elems <- toList cache - (elems, size) <- atomically $ extract elems lastSync - - when (size > maxObjects) $ forkIO (filtercache t cache lastSync elems) >> return () - - where - - -- delete elems from the cache according with the checking policy - filtercache t cache lastSync elems= mapM_ filter elems - where - filter (CacheElem Nothing w)= return() --alive because the dbref is being referenced elsewere - filter (CacheElem (Just (DBRef key _)) w) = do - mr <- deRefWeak w - case mr of - Nothing -> finalize w - Just (DBRef _ tv) -> atomically $ do - r <- readTVar tv - case r of - Exist (Elem x lastAccess _ ) -> - if check t lastAccess lastSync - then do - unsafeIOToSTM . H.update cache key $ CacheElem Nothing w - writeTVar tv NotRead - else return () - _ -> return() - - - --- | ths is a default cache clearance check. It forces to drop from the cache all the --- elems not accesed since half the time between now and the last sync --- if it returns True, the object will be discarded from the cache --- it is invoked when the cache size exceeds the number of objects configured --- in 'clearSyncCacheProc' or 'clearSyncCache' -defaultCheck - :: Integer -- ^ current time in seconds - -> Integer -- ^ last access time for a given object - -> Integer -- ^ last cache syncronization (with the persisten storage) - -> Bool -- ^ return true for all the elems not accesed since half the time between now and the last sync -defaultCheck now lastAccess lastSync - | lastAccess > halftime = False - | otherwise = True - - where - halftime= now- (now-lastSync) `div` 2 - -refConditions= unsafePerformIO $ newIORef (return(), return()) - -setConditions :: IO() -> IO() -> IO() --- ^ stablishes the procedures to call before and after saving with 'syncCache', 'clearSyncCache' or 'clearSyncCacheProc'. The postcondition of --- database persistence should be a commit. -setConditions pre post= writeIORef refConditions (pre, post) - -saving= unsafePerformIO $ newMVar False - - -save tosave = do - (pre, post) <- readIORef refConditions - pre -- !> "save" - mapM (\(Filtered x)-> writeResource x ) tosave - post - - - - - --- extract the elements that can be potentially deleted safely --- these that are not pointed directly by a DBRef which is part of an alive data structure -extract elems lastSave= filter1 [] (0:: Int) elems - where - filter1 val n []= return (val, n) - filter1 val n ((_, ch@(CacheElem mybe w)):rest)= do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Nothing -> unsafeIOToSTM (finalize w) >> filter1 val n rest - Just (DBRef key tvr) -> - let tofilter = case mybe of - Just _ -> ch:val - Nothing -> val - in filter1 tofilter (n+1) rest - - --- | executes the IO action in a separate thread --- -safeIOToSTM :: IO a -> STM a -safeIOToSTM req= unsafeIOToSTM $ do - tv <- newEmptyMVar - forkIO $ (req >>= putMVar tv . Right) - `Control.Exception.catch` - (\(e :: SomeException) -> putMVar tv (Left e)) - r <- takeMVar tv - case r of - Right x -> return x - Left e -> throw e - - - - diff --git a/Data/TCache/Defs.hs b/Data/TCache/Defs.hs index 9e00dd3..e1c5951 100644 --- a/Data/TCache/Defs.hs +++ b/Data/TCache/Defs.hs @@ -103,7 +103,7 @@ class Serializable a where deserialize :: B.ByteString -> a deserialize= error "No deserialization defined for your data" deserialKey :: String -> B.ByteString -> a - deserialKey _ v= deserialize v + deserialKey _ = deserialize setPersist :: a -> Maybe Persist -- ^ `defaultPersist` if Nothing setPersist = const Nothing @@ -121,9 +121,9 @@ type Key= String -- | a persist mechanism has to implement these three primitives -- 'filePersist' is the default file persistence data Persist = Persist{ - readByKey :: (Key -> IO(Maybe B.ByteString)) -- ^ read by key. It must be strict - , write :: (Key -> B.ByteString -> IO()) -- ^ write. It must be strict - , delete :: (Key -> IO())} -- ^ delete + readByKey :: Key -> IO(Maybe B.ByteString) -- ^ read by key. It must be strict + , write :: Key -> B.ByteString -> IO() -- ^ write. It must be strict + , delete :: Key -> IO()} -- ^ delete -- | Implements default default-persistence of objects in files with their keys as filenames filePersist = Persist @@ -131,6 +131,7 @@ filePersist = Persist ,write = defaultWrite ,delete = defaultDelete} +{-# NOINLINE defaultPersistIORef #-} defaultPersistIORef = unsafePerformIO $ newIORef filePersist -- | Set the default persistence mechanism of all 'serializable' objects that have @@ -139,6 +140,7 @@ defaultPersistIORef = unsafePerformIO $ newIORef filePersist -- this statement must be the first one before any other TCache call setDefaultPersist p= writeIORef defaultPersistIORef p +{-# NOINLINE getDefaultPersist #-} getDefaultPersist = unsafePerformIO $ readIORef defaultPersistIORef getPersist x= unsafePerformIO $ case setPersist x of @@ -178,7 +180,7 @@ safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("wr safeWrite filename str - | otherwise= if ("invalid" `isInfixOf` ioeGetErrorString e) + | otherwise= if "invalid" `isInfixOf` ioeGetErrorString e then error $ "defaultWriteResource: " ++ show e ++ " defPath and/or keyResource are not suitable for a file path: "++ filename else do @@ -186,7 +188,7 @@ safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("wr safeWrite filename str defaultDelete :: String -> IO() -defaultDelete filename =do +defaultDelete filename = handle (handler filename) $ removeFile filename where diff --git a/TCache.cabal b/TCache.cabal index 629e71d..505cba2 100644 --- a/TCache.cabal +++ b/TCache.cabal @@ -63,7 +63,7 @@ source-repository head library build-depends: base >=4 && <5, bytestring -any, containers >=0.1.0.1, directory >=1.0, old-time >=1.0, - stm -any, text -any, mtl -any, hashtables, + stm -any, text -any, mtl -any, hashtables, hashable, RefSerialize -any diff --git a/demos/basicSample.hs b/demos/basicSample.hs index 48b47d2..c40c122 100644 --- a/demos/basicSample.hs +++ b/demos/basicSample.hs @@ -19,6 +19,10 @@ data Data= User{uname::String, uid::String, spent:: Int} | deriving (Read, Show, Typeable) +-- defining prototypes to make missing-fields warning useful again +user_ = User{uname = undefined, uid = undefined, spent = undefined } +item_ = Item{iname = undefined, iid = undefined, price = undefined, stock = undefined } + -- The mappings between the cache and the phisical storage are defined by the interface IResource -- to extract the unique key, -- to serializa to string @@ -68,12 +72,12 @@ main= do --11 PCs are charged to the John´s account in paralel, to show transactionality --because there are only 10 PCs in stock, the last thread must return an error - for 11 $ forkIO $ User{uid="U12345"} `buy` Item{iid="I54321"} + for 11 $ forkIO $ user_{uid="U12345"} `buy` item_{iid="I54321"} --wait 1 seconds threadDelay 1000000 - [us,it] <- getResources [User{uid="U12345"}, Item{iid="I54321"}] + [us,it] <- getResources [user_{uid="U12345"}, item_{iid="I54321"}] putStrLn $ "user data=" ++ show us putStrLn $ "item data=" ++ show it From d1504e94b4927249b05e852e996e49725900f95e Mon Sep 17 00:00:00 2001 From: Hans Raaf Date: Wed, 17 Apr 2019 13:15:01 +0200 Subject: [PATCH 2/5] Cleaning up, adjusting some demos and documentation - solving most hlint hints (learning something by the way) - rewrote some code mostly based on hlint suggestions - removed (or commented out) some dead code - fixed demos which where not working anymore - corrected some docs (hopefully accurate enough) --- Data/TCache.hs | 178 +++++++++++++++------------ Data/TCache/DefaultDefs.hs | 195 ------------------------------ Data/TCache/DefaultPersistence.hs | 9 +- Data/TCache/Defs.hs | 46 +++---- Data/TCache/IResource.hs | 138 +-------------------- Data/TCache/IndexQuery.hs | 58 ++++----- Data/TCache/IndexText.hs | 150 +++++++++++------------ Data/TCache/Memoization.hs | 11 +- Data/TCache/Triggers.hs | 19 ++- buildDemos.sh | 8 +- demos/DBRef.hs | 7 +- demos/DynamicSample.hs | 38 +++--- demos/basicSample.hs | 70 +++++------ demos/caching.hs | 85 ++++++++++--- demos/indexQuery.hs | 8 +- demos/indexText.hs | 4 +- demos/memoization.hs | 6 +- 17 files changed, 387 insertions(+), 643 deletions(-) delete mode 100644 Data/TCache/DefaultDefs.hs diff --git a/Data/TCache.hs b/Data/TCache.hs index f961577..ea49df9 100644 --- a/Data/TCache.hs +++ b/Data/TCache.hs @@ -1,5 +1,5 @@ -{-# LANGUAGE ScopedTypeVariables, ExistentialQuantification, DeriveDataTypeable - , FlexibleInstances, UndecidableInstances #-} +{-# LANGUAGE ScopedTypeVariables, ExistentialQuantification, + FlexibleInstances, UndecidableInstances #-} {- | TCache is a transactional cache with configurable persistence that permits STM transactions with objects that synchronize synchronously or asynchronously with @@ -281,6 +281,7 @@ clearsyncCache just pass elements from 2 to 1 ,setConditions ,clearSyncCache ,numElems +,statElems ,syncWrite ,SyncMode(..) ,clearSyncCacheProc @@ -292,24 +293,24 @@ where import GHC.Conc -import Control.Monad(when) -import qualified Data.HashTable.IO as H -import Data.IORef -import System.IO.Unsafe +import Control.Monad(when, void) +import qualified Data.HashTable.IO as H(BasicHashTable, new, insert, lookup, toList) +import Data.IORef(IORef, newIORef, readIORef, writeIORef) +import System.IO.Unsafe(unsafePerformIO) import System.IO(hPutStr, stderr) -import Data.Maybe +import Data.Maybe(catMaybes) +import Data.Foldable(forM_) import Data.Char(isSpace) import Data.TCache.Defs import Data.TCache.IResource import Data.TCache.Triggers -import Control.Exception -import Data.Typeable -import System.Time -import System.Mem -import System.Mem.Weak +import Data.Typeable(Typeable) +import System.Time(getClockTime, ClockTime(TOD)) +import System.Mem(performGC) +import System.Mem.Weak(Weak, deRefWeak, mkWeakPtr, finalize) -import Control.Concurrent.MVar -import Control.Exception(catch, throw,evaluate) +import Control.Concurrent.MVar(newMVar, newEmptyMVar, takeMVar, putMVar) +import Control.Exception(catch, handle, throw, evaluate, bracket, SomeException) --import Debug.Trace --(!>) = flip trace @@ -351,7 +352,31 @@ numElems= do return $ length elems -deRefWeakSTM = unsafeIOToSTM . deRefWeak +-- | Retuns some statistical information for the DBRefs in the cache (for debugging) +-- This returns a tuple containing: +-- total : count of the total elements in cache +-- dirty : the elements which need to be written to the persistent storage +-- loaded : the elements which are currently hold in memory +statElems :: IO (Int, Int, Int) +statElems = do + (cache, lastSync) <- readIORef refcache + elems <- H.toList cache + (tosave, elems, size) <- atomically $ extract elems lastSync + counted <- mapM count elems + return (size, length tosave, sum counted) + where + count (CacheElem _ w)= do + mr <- deRefWeak w + case mr of + Just (DBRef _ tv) -> do + r <- readTVarIO tv + case r of + Exist (Elem x _ mt) -> return 1 + DoNotExist -> return 0 + NotRead -> return 0 + Nothing -> finalize w >> return 0 + +-- deRefWeakSTM = unsafeIOToSTM . deRefWeak --deleteFromCache :: (IResource a, Typeable a) => DBRef a -> IO () --deleteFromCache (DBRef k tv)= do @@ -375,7 +400,7 @@ readDBRef dbref@(DBRef key tv)= do t <- unsafeIOToSTM timeInteger writeTVar tv . Exist $ Elem x t mt return $ Just x - DoNotExist -> return $ Nothing + DoNotExist -> return Nothing NotRead -> do r <- safeIOToSTM $ readResourceByKey key case r of @@ -386,7 +411,7 @@ readDBRef dbref@(DBRef key tv)= do return $ Just x -- | Read multiple DBRefs in a single request using the new 'readResourcesByKey' -readDBRefs :: (IResource a, Typeable a) => [DBRef a] -> STM [(Maybe a)] +readDBRefs :: (IResource a, Typeable a) => [DBRef a] -> STM [Maybe a] readDBRefs dbrefs= do let mf (DBRef key tv)= do r <- readTVar tv @@ -403,12 +428,12 @@ readDBRefs dbrefs= do let fromLeft (Left k)= k formLeft _ = error "this will never happen" rs <- safeIOToSTM . readResourcesByKey $ map fromLeft toReadKeys - let processTVar (r, DBRef key tv)= do - case r of - Nothing -> writeTVar tv DoNotExist - Just x -> do - t <- unsafeIOToSTM timeInteger - writeTVar tv $ Exist $ Elem x t (-1) + let processTVar (r, DBRef key tv)= + case r of + Nothing -> writeTVar tv DoNotExist + Just x -> do + t <- unsafeIOToSTM timeInteger + writeTVar tv $ Exist $ Elem x t (-1) mapM_ processTVar $ zip rs dbrs let mix (Right x:xs) ys = x:mix xs ys @@ -492,6 +517,7 @@ getDBRef key= unsafePerformIO $! getDBRef1 $! key where putMVar getRefFlag () return dbref +{-# NOINLINE getRefFlag #-} getRefFlag= unsafePerformIO $ newMVar () {- | Create the object passed as parameter (if it does not exist) and @@ -663,25 +689,22 @@ withSTMResources :: (IResource a, Typeable a)=> [a] -- ^ the list of resources -> ([Maybe a]-> Resources a x) -- ^ The function that process the resources found and return a Resources structure -> STM x -- ^ The return value in the STM monad. -withSTMResources rs f= do - (cache,_) <- unsafeIOToSTM $ readIORef refcache - mtrs <- takeDBRefs rs cache AddToHash - +withSTMResources rs f = do + (cache, _) <- unsafeIOToSTM $ readIORef refcache + mtrs <- takeDBRefs rs cache AddToHash mrs <- mapM mreadDBRef mtrs case f mrs of - Retry -> retry - Resources as ds r -> do - applyTriggers (map (getDBRef . keyResource) ds) (repeat (Nothing `asTypeOf` (Just(head ds)))) - delListFromHash cache ds - releaseTPVars as cache - - safeIOToSTM . criticalSection saving $ mapM_ delResource ds - return r - + Retry -> retry + Resources as ds r -> do + applyTriggers (map (getDBRef . keyResource) ds) (repeat (Nothing `asTypeOf` Just (head ds))) + delListFromHash cache ds + releaseTPVars as cache + safeIOToSTM . criticalSection saving $ mapM_ delResource ds + return r where - mreadDBRef :: (IResource a, Typeable a) => Maybe (DBRef a) -> STM (Maybe a) - mreadDBRef (Just dbref)= readDBRef dbref - mreadDBRef Nothing = return Nothing + mreadDBRef :: (IResource a, Typeable a) => Maybe (DBRef a) -> STM (Maybe a) + mreadDBRef (Just dbref) = readDBRef dbref + mreadDBRef Nothing = return Nothing -- | Update of a single object in the cache @@ -697,8 +720,11 @@ withResource r f= withResources [r] (\[mr]-> [f mr]) -- @ withResources rs f= atomically $ 'withSTMResources' rs f1 >> return() where f1 mrs= let as= f mrs in Resources as [] ()@ {-# INLINE withResources #-} withResources:: (IResource a,Typeable a)=> [a]-> ([Maybe a]-> [a])-> IO () -withResources rs f= atomically $ withSTMResources rs f1 >> return() where - f1 mrs= let as= f mrs in Resources as [] () +withResources rs f = atomically $ void (withSTMResources rs f1) + where + f1 mrs = + let as = f mrs + in Resources as [] () -- | To read a resource from the cache. -- @@ -811,7 +837,7 @@ releaseTPVar cache r =do delListFromHash :: IResource a => Ht -> [a] -> STM () -delListFromHash cache xs= mapM_ del xs +delListFromHash cache= mapM_ del where del :: IResource a => a -> STM () del x= do @@ -822,20 +848,20 @@ delListFromHash cache xs= mapM_ del xs Just (CacheElem _ w) -> do mr <- unsafeIOToSTM $ deRefWeak w case mr of - Just dbref@(DBRef _ tv) -> do + Just dbref@(DBRef _ tv) -> writeTVar tv DoNotExist - Nothing -> do + Nothing -> unsafeIOToSTM (finalize w) >> del x - +{- never used updateListToHash hash kv= mapM (update1 hash) kv where update1 h (k,v)= H.insert h k v - +-} -- | Start the thread that periodically call `clearSyncCache` to clean and writes on the persistent storage. --- it is indirecly set by means of `syncWrite`, since it is more higuer level. I recommend to use the latter +-- it is indirectly set by means of `syncWrite`, since it is more higuer level. I recommend to use the latter -- Otherwise, 'syncCache' or `clearSyncCache` or `atomicallySync` must be invoked explicitly or no persistence will exist. -- Cache writes allways save a coherent state clearSyncCacheProc :: @@ -854,10 +880,10 @@ clearSyncCacheProc time check sizeObjects= forkIO clear criticalSection mv f= bracket (takeMVar mv) (putMVar mv) - $ const $ f + $ const f -- | Force the atomic write of all cached objects modified since the last save into permanent storage. --- Cache writes allways save a coherent state. As allways, only the modified objects are written. +-- Cache writes allways save a coherent state. As always, only the modified objects are written. syncCache :: IO () syncCache = criticalSection saving $ do (cache,lastSync) <- readIORef refcache --`debug` "syncCache" @@ -871,7 +897,7 @@ syncCache = criticalSection saving $ do data SyncMode= Synchronous -- ^ sync state to permanent storage when `atomicallySync` is invoked | Asynchronous {frequency :: Int -- ^ number of seconds between saves when asynchronous - ,check :: (Integer-> Integer-> Integer-> Bool) -- ^ The user-defined check-for-cleanup-from-cache for each object. 'defaultCheck' is an example + ,check :: Integer-> Integer-> Integer-> Bool -- ^ The user-defined check-for-cleanup-from-cache for each object. 'defaultCheck' is an example ,cacheSize :: Int -- ^ size of the cache when async } | SyncManual -- ^ use `syncCache` to write the state @@ -879,21 +905,22 @@ data SyncMode= Synchronous -- ^ sync state to permanent storage when `atomical +{-# NOINLINE tvSyncWrite #-} tvSyncWrite= unsafePerformIO $ newIORef (Synchronous, Nothing) -- | Specify the cache synchronization policy with permanent storage. See `SyncMode` for details syncWrite:: SyncMode -> IO() -syncWrite mode= do - (_,thread) <- readIORef tvSyncWrite - when (isJust thread ) $ killThread . fromJust $ thread - case mode of - Synchronous -> modeWrite - SyncManual -> modeWrite - Asynchronous time check maxsize -> do - th <- clearSyncCacheProc time check maxsize >> return() - writeIORef tvSyncWrite (mode,Just th) - where - modeWrite= writeIORef tvSyncWrite (mode, Nothing) +syncWrite mode = do + (_, thread) <- readIORef tvSyncWrite + forM_ thread killThread + case mode of + Synchronous -> modeWrite + SyncManual -> modeWrite + Asynchronous time check maxsize -> do + th <- void $ clearSyncCacheProc time check maxsize + writeIORef tvSyncWrite (mode, Just th) + where + modeWrite = writeIORef tvSyncWrite (mode, Nothing) -- | Perform a synchronization of the cache with permanent storage once executed the STM transaction @@ -908,14 +935,13 @@ atomicallySync proc=do sync= do (savetype,_) <- readIORef tvSyncWrite case savetype of - Synchronous -> do - syncCache + Synchronous -> syncCache _ -> return () -- |Saves the unsaved elems of the cache. -- Cache writes allways save a coherent state. --- Unlike `syncChace` this call deletes some elems of the cache when the number of elems > @sizeObjects@. +-- Unlike `syncCache` this call deletes some elems from the cache when the number of elems > @sizeObjects@. -- The deletion depends on the check criteria, expressed by the first parameter. -- 'defaultCheck' is the one implemented to be passed by default. Look at it to understand the clearing criteria. clearSyncCache :: (Integer -> Integer-> Integer-> Bool)-> Int -> IO () @@ -931,8 +957,8 @@ clearSyncCache check sizeObjects= criticalSection saving $ do where - -- delete elems from the cache according with the checking criteria - filtercache t cache lastSync elems= mapM_ filter elems + -- delete elems from the cache according with the checking criteria + filtercache t cache lastSync = mapM_ filter where filter (CacheElem Nothing w)= return() --alive because the dbref is being referenced elsewere filter (CacheElem (Just (DBRef key _)) w) = do @@ -942,13 +968,11 @@ clearSyncCache check sizeObjects= criticalSection saving $ do Just (DBRef _ tv) -> atomically $ do r <- readTVar tv case r of - Exist (Elem x lastAccess _ ) -> - if check t lastAccess lastSync - then do - unsafeIOToSTM . H.insert cache key $ CacheElem Nothing w - writeTVar tv NotRead - else return () - _ -> return() + Exist (Elem x lastAccess _ ) -> + when (check t lastAccess lastSync) $ do + unsafeIOToSTM . H.insert cache key $ CacheElem Nothing w + writeTVar tv NotRead + _ -> return() @@ -960,7 +984,7 @@ clearSyncCache check sizeObjects= criticalSection saving $ do defaultCheck :: Integer -- ^ current time in seconds -> Integer -- ^ last access time for a given object - -> Integer -- ^ last cache syncronization (with the persisten storage) + -> Integer -- ^ last cache synchronization (with the persisten storage) -> Bool -- ^ return true for all the elems not accesed since half the time between now and the last sync defaultCheck now lastAccess lastSync | lastAccess > halftime = False @@ -969,6 +993,7 @@ defaultCheck now lastAccess lastSync where halftime= now- (now-lastSync) `div` 2 +{-# NOINLINE refConditions #-} refConditions= unsafePerformIO $ newIORef (return(), return()) setConditions :: IO() -> IO() -> IO() @@ -976,12 +1001,13 @@ setConditions :: IO() -> IO() -> IO() -- database persistence should be a commit. setConditions pre post= writeIORef refConditions (pre, post) +{-# NOINLINE saving #-} saving= unsafePerformIO $ newMVar False save tosave = do (pre, post) <- readIORef refConditions pre -- !> (concatMap (\(Filtered x) -> keyResource x)tosave) - mapM (\(Filtered x) -> writeResource x) tosave + mapM_ (\(Filtered x) -> writeResource x) tosave post @@ -1003,7 +1029,7 @@ extract elems lastSave= filter1 [] [] (0:: Int) elems r <- readTVar tvr case r of Exist (Elem r _ modTime) -> - if (modTime >= lastSave) + if modTime >= lastSave then filter1 (Filtered r:sav) tofilter (n+1) rest else filter1 sav tofilter (n+1) rest -- !> ("rejected->" ++ keyResource r) diff --git a/Data/TCache/DefaultDefs.hs b/Data/TCache/DefaultDefs.hs deleted file mode 100644 index d7ca10f..0000000 --- a/Data/TCache/DefaultDefs.hs +++ /dev/null @@ -1,195 +0,0 @@ ------------------------------------------------------------------------------ --- --- Module : Data.TCache.DefaultDefs --- Copyright : --- License : BSD3 --- --- Maintainer : agocorona@gmail.com --- Stability : --- Portability : --- --- | --- ------------------------------------------------------------------------------ -{- | - -This module decouples the interface of 'IResource" class in two classes -one for key extraction 'Indexable' and other ('Serializable" for serlalization and persistence -.This last one defines persistence in files as default, but it can be changed -to persistence in databases, for examople. - --} -{-# LANGUAGE FlexibleInstances, UndecidableInstances - , MultiParamTypeClasses, FunctionalDependencies - - , ExistentialQuantification - , ScopedTypeVariables - - #-} -module Data.TCache.DefaultDefs(Indexable(..),Serializable(..),defaultPersist,Persist(..)) where - - -import Data.TCache.IResource -import Data.Typeable -import System.IO.Unsafe -import Data.IORef -import System.Directory -import Control.Monad(when,replicateM) -import System.IO -import System.IO.Error -import Control.Exception as Exception -import Control.Concurrent -import Data.List(elemIndices,isInfixOf) -import Data.Maybe(fromJust) -import Data.TCache.Defs(castErr) -import qualified Data.ByteString.Lazy.Char8 as B - ---import Debug.Trace --- ---a !> b = trace b a - - - -{- | Indexable is an utility class used to derive instances of IResource - -Example: - -@data Person= Person{ pname :: String, cars :: [DBRef Car]} deriving (Show, Read, Typeable) -data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, Typeable) -@ - -Since Person and Car are instances of 'Read' ans 'Show', by defining the 'Indexable' instance -will implicitly define the IResource instance for file persistence: - -@ -instance Indexable Person where key Person{pname=n} = \"Person \" ++ n -instance Indexable Car where key Car{cname= n} = \"Car \" ++ n -@ --} - - -class Indexable a where - key:: a -> String - defPath :: a -> String -- ^ additional extension for default file paths. - -- The default value is "data/". - - -- IMPORTANT: defPath must depend on the datatype, not the value (must be constant). Default is "TCacheData/" - defPath = const "TCacheData/" - ---instance IResource a => Indexable a where --- key x= keyResource x - -{- | Serialize is an abstract serialization ionterface in order to define implicit instances of IResource. -The deserialization must be as lazy as possible if deserialized objects contain DBRefs, -lazy deserialization avoid unnecesary DBRef instantiations when they are not accessed, -since DBRefs instantiations involve extra cache lookups -For this reason serialization/deserialization is to/from ordinary Strings -serialization/deserialization are not performance critical in TCache - -Read, Show, instances are implicit instances of Serializable - -> serialize = show -> deserialize= read - -Since write and read to disk of to/from the cache must not be very often -The performance of serialization is not critical. --} -class Serializable a {-serialFormat-} | a -> {-serialFormat-} where - serialize :: a -> B.ByteString --serialFormat - deserialize :: {-serialFormat-} B.ByteString -> a - setPersist :: a -> Persist - setPersist _= defaultPersist - ---instance (Show a, Read a)=> Serializable a where --- serialize= show --- deserialize= read - - --- | a persist mechanism has to implement these three primitives --- 'defaultpersist' is the default file persistence -data Persist = Persist{ - readByKey :: (String -> IO(Maybe B.ByteString)) -- ^ read by key - , write :: (String -> B.ByteString -> IO()) -- ^ write - , delete :: (String -> IO())} -- ^ delete - -defaultPersist= Persist - {readByKey= defaultReadByKey - ,write= defaultWrite - ,delete= defaultDelete} - -getPersist x= return (setPersist x) - `Exception.catch` (\(e:: SomeException) -> error "setPersist must not depend on the type, not the value of the parameter: " ) - - - - -defaultReadByKey :: String-> IO (Maybe B.ByteString) -defaultReadByKey k= iox -- !> "defaultReadByKey" - where - iox = handle handler $ do - s <- readFileStrict k - return $ Just s -- `debug` ("read "++ filename) - - - handler :: IOError -> IO (Maybe B.ByteString) - handler e - | isAlreadyInUseError e = defaultReadByKey k - | isDoesNotExistError e = return Nothing - | otherwise= if ("invalid" `isInfixOf` ioeGetErrorString e) - then - error $ "readResource: " ++ show e ++ " defPath and/or keyResource are not suitable for a file path" - - else defaultReadByKey k - - -defaultWrite :: String-> B.ByteString -> IO() -defaultWrite filename x= safeWrite filename x -safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("write "++filename) - where - handler e-- (e :: IOError) - | isDoesNotExistError e=do - createDirectoryIfMissing True $ take (1+(last $ elemIndices '/' filename)) filename --maybe the path does not exist - safeWrite filename str - - - | otherwise= if ("invalid" `isInfixOf` ioeGetErrorString e) - then - error $ "writeResource: " ++ show e ++ " defPath and/or keyResource are not suitable for a file path" - else do - hPutStrLn stderr $ "defaultWriteResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" - safeWrite filename str - -defaultDelete :: String -> IO() -defaultDelete filename =do - handle (handler filename) $ removeFile filename - --print ("delete "++filename) - where - - handler :: String -> IOException -> IO () - handler file e - | isDoesNotExistError e= return () --`debug` "isDoesNotExistError" - | isAlreadyInUseError e= do - hPutStrLn stderr $ "defaultDelResource: busy" ++ " in file: " ++ filename ++ " retrying" --- threadDelay 100000 --`debug`"isAlreadyInUseError" - defaultDelete filename - | otherwise = do - hPutStrLn stderr $ "defaultDelResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" --- threadDelay 100000 --`debug` ("otherwise " ++ show e) - defaultDelete filename - - - - --- | Strict read from file, needed for default file persistence -readFileStrict f = openFile f ReadMode >>= \ h -> readIt h `finally` hClose h - where - readIt h= do - s <- hFileSize h - let n= fromIntegral s - str <- B.hGet h n -- replicateM n (B.hGetChar h) - return str - - - - - diff --git a/Data/TCache/DefaultPersistence.hs b/Data/TCache/DefaultPersistence.hs index 98ea261..f5b4dee 100644 --- a/Data/TCache/DefaultPersistence.hs +++ b/Data/TCache/DefaultPersistence.hs @@ -1,9 +1,6 @@ -{-# LANGUAGE FlexibleInstances, UndecidableInstances - , MultiParamTypeClasses, FunctionalDependencies - - , ExistentialQuantification - , ScopedTypeVariables - #-} +{-# LANGUAGE FlexibleInstances, UndecidableInstances, + MultiParamTypeClasses, ExistentialQuantification, + ScopedTypeVariables #-} {- | This module decouples the 'IResource" class in two classes one for key extraction 'Indexable' and other ('Serializable" for serlalization and persistence diff --git a/Data/TCache/Defs.hs b/Data/TCache/Defs.hs index e1c5951..c561c01 100644 --- a/Data/TCache/Defs.hs +++ b/Data/TCache/Defs.hs @@ -1,4 +1,4 @@ -{-# LANGUAGE TypeSynonymInstances, FlexibleInstances, ScopedTypeVariables, DeriveDataTypeable #-} +{-# LANGUAGE FlexibleInstances, ScopedTypeVariables, DeriveDataTypeable #-} {- | some internal definitions. To use default persistence, import @Data.TCache.DefaultPersistence@ instead -} @@ -18,7 +18,7 @@ import System.IO.Error import Control.Exception as Exception import Control.Concurrent import Data.List(elemIndices,isInfixOf) -import Data.Maybe(fromJust) +import Data.Maybe(fromJust, fromMaybe) import qualified Data.ByteString.Lazy.Char8 as B @@ -40,10 +40,10 @@ data DBRef a= DBRef !String !(TPVar a) deriving Typeable castErr a= r where - r= case cast a of - Nothing -> error $ "Type error: " ++ (show $ typeOf a) ++ " does not match "++ (show $ typeOf r) - ++ "\nThis means that objects of these two types have the same key \nor the retrieved object type is not the previously stored one for the same key\n" - Just x -> x + r = fromMaybe + (error $ "Type error: " ++ show (typeOf a) ++ " does not match " ++ show (typeOf r) + ++ "\nThis means that objects of these two types have the same key \nor the retrieved object type is not the previously stored one for the same key\n") + (cast a) {- | Indexable is an utility class used to derive instances of IResource @@ -138,7 +138,7 @@ defaultPersistIORef = unsafePerformIO $ newIORef filePersist -- @setPersist= const Nothing@. By default it is 'filePersist' -- -- this statement must be the first one before any other TCache call -setDefaultPersist p= writeIORef defaultPersistIORef p +setDefaultPersist = writeIORef defaultPersistIORef {-# NOINLINE getDefaultPersist #-} getDefaultPersist = unsafePerformIO $ readIORef defaultPersistIORef @@ -154,30 +154,31 @@ getPersist x= unsafePerformIO $ case setPersist x of defaultReadByKey :: String-> IO (Maybe B.ByteString) defaultReadByKey k= iox -- !> "defaultReadByKey" where - iox = handle handler $ do - s <- readFileStrict k + iox = handle handler $ do + s <- readFileStrict k return $ Just s -- `debug` ("read "++ filename) - + handler :: IOError -> IO (Maybe B.ByteString) handler e - | isAlreadyInUseError e = defaultReadByKey k + | isAlreadyInUseError e = defaultReadByKey k | isDoesNotExistError e = return Nothing - | otherwise= if ("invalid" `isInfixOf` ioeGetErrorString e) + | otherwise= if "invalid" `isInfixOf` ioeGetErrorString e then error $ "defaultReadByKey: " ++ show e ++ " defPath and/or keyResource are not suitable for a file path:\n"++ k++"\"" - + else defaultReadByKey k defaultWrite :: String-> B.ByteString -> IO() -defaultWrite filename x= safeWrite filename x +defaultWrite = safeWrite + safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("write "++filename) - where + where handler e-- (e :: IOError) - | isDoesNotExistError e=do - createDirectoryIfMissing True $ take (1+(last $ elemIndices '/' filename)) filename --maybe the path does not exist - safeWrite filename str + | isDoesNotExistError e=do + createDirectoryIfMissing True $ take (1 + last (elemIndices '/' filename)) filename --maybe the path does not exist + safeWrite filename str | otherwise= if "invalid" `isInfixOf` ioeGetErrorString e @@ -186,7 +187,7 @@ safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("wr else do hPutStrLn stderr $ "defaultWriteResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" safeWrite filename str - + defaultDelete :: String -> IO() defaultDelete filename = handle (handler filename) $ removeFile filename @@ -199,7 +200,7 @@ defaultDelete filename = | isAlreadyInUseError e= do hPutStrLn stderr $ "defaultDelResource: busy" ++ " in file: " ++ filename ++ " retrying" -- threadDelay 100000 --`debug`"isAlreadyInUseError" - defaultDelete filename + defaultDelete filename | otherwise = do hPutStrLn stderr $ "defaultDelResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" -- threadDelay 100000 --`debug` ("otherwise " ++ show e) @@ -213,7 +214,7 @@ defReadResourceByKey k= iox where f file >>= evaluate . fmap (deserialKey k) where file= defPath x ++ k - x= undefined `asTypeOf` (fromJust $ unsafePerformIO iox) + x= undefined `asTypeOf` fromJust (unsafePerformIO iox) defWriteResource s= do let Persist _ f _ = getPersist s @@ -230,6 +231,5 @@ readFileStrict f = openFile f ReadMode >>= \ h -> readIt h `finally` hClose h readIt h= do s <- hFileSize h let n= fromIntegral s - str <- B.hGet h n - return str + B.hGet h n diff --git a/Data/TCache/IResource.hs b/Data/TCache/IResource.hs index c071e31..fd17edd 100644 --- a/Data/TCache/IResource.hs +++ b/Data/TCache/IResource.hs @@ -9,7 +9,6 @@ import Control.Concurrent import Control.Exception as Exception import System.IO import System.IO.Error -import Data.List(elemIndices) import Control.Monad(when,replicateM) import Data.List(isInfixOf) @@ -45,7 +44,7 @@ class IResource a where . However, because it is executed by 'safeIOToSTM' it is guaranteed that the execution is not interrupted. -} readResourceByKey :: String -> IO(Maybe a) - readResourceByKey k= return . head =<< readResourcesByKey [k] + readResourceByKey k= head <$> readResourcesByKey [k] -- | hopefully optimized read of many objects by key. readResourcesByKey :: [String] -> IO [Maybe a] readResourcesByKey = mapM readResourceByKey @@ -90,138 +89,3 @@ data Resources a b -- | Empty resources: @resources= Resources [] [] ()@ resources :: Resources a () resources = Resources [] [] () - - -{- - -{- | Indexable is an utility class used to derive instances of IResource - -Example: - -@data Person= Person{ pname :: String, cars :: [DBRef Car]} deriving (Show, Read, Typeable) -data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, Typeable) -@ - -Since Person and Car are instances of 'Read' ans 'Show', by defining the 'Indexable' instance -will implicitly define the IResource instance for file persistence: - -@ -instance Indexable Person where key Person{pname=n} = \"Person \" ++ n -instance Indexable Car where key Car{cname= n} = \"Car \" ++ n -@ --} -class Indexable a where - key:: a -> String - defPath :: a -> String -- ^ additional extension for default file paths. - -- The default value is "data/". - - -- IMPORTANT: defPath must depend on the datatype, not the value (must be constant). Default is "TCacheData/" - defPath = const "TCacheData/" - ---instance IResource a => Indexable a where --- key x= keyResource x - -{- | Serialize is an abstract serialization ionterface in order to define implicit instances of IResource. -The deserialization must be as lazy as possible if deserialized objects contain DBRefs, -lazy deserialization avoid unnecesary DBRef instantiations when they are not accessed, -since DBRefs instantiations involve extra cache lookups -For this reason serialization/deserialization is to/from ordinary Strings -serialization/deserialization are not performance critical in TCache --} -class Serializable a where - serialize :: a -> String - deserialize :: String -> a - - --} - - - -{- -defaultReadResource :: (Serializable a, Indexable a, Typeable a) => a -> IO (Maybe a) -defaultReadResource x= defaultReadResourceByKey $ key x - - -defaultReadResourceByKey :: (Serializable a, Indexable a) => String-> IO (Maybe a) -defaultReadResourceByKey k= iox - where - iox = handle handler $ do - s <- readFileStrict filename :: IO String - return $ Just (deserialize s ) -- `debug` ("read "++ filename) - - filename= defPathIO iox ++ k - - defPathIO :: (Serializable a, Indexable a)=> IO (Maybe a) -> String - defPathIO iox= defPath x - where - Just x= unsafePerformIO $ (return $ Just undefined) `asTypeOf` iox - - - handler :: (Serializable a, Indexable a) => IOError -> IO (Maybe a) - handler e - | isAlreadyInUseError e = defaultReadResourceByKey k - | isDoesNotExistError e = return Nothing - | otherwise= if ("invalid" `isInfixOf` ioeGetErrorString e) - then - error $ ( "readResource: " ++ show e) ++ " defPath and/or keyResource are not suitable for a file path" - - else defaultReadResourceByKey k - - -defaultWriteResource :: (Serializable a, Indexable a) => a-> IO() -defaultWriteResource x= safeWrite filename (serialize x) -- `debug` ("write "++filename) - where - filename= defPath x ++ key x - -safeWrite filename str= handle handler $ writeFile filename str - where - handler (e :: IOError) - | isDoesNotExistError e=do - createDirectoryIfMissing True $ take (1+(last $ elemIndices '/' filename)) filename --maybe the path does not exist - safeWrite filename str - - | otherwise =do - --phPutStrLn stderr $ "defaultWriteResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" - safeWrite filename str - -defaultDelResource :: (Indexable a) => a -> IO() -defaultDelResource x= handle (handler filename) $ removeFile filename --`debug` ("delete "++filename) - where - filename= defPath x ++ key x - handler :: String -> IOError -> IO () - handler file e - | isDoesNotExistError e= return () - | isAlreadyInUseError e= do - --hPutStrLn stderr $ "defaultDelResource: busy" ++ " in file: " ++ filename ++ " retrying" - threadDelay 1000000 - defaultDelResource x - | otherwise = do - --hPutStrLn stderr $ "defaultDelResource: " ++ show e ++ " in file: " ++ filename ++ " retrying" - threadDelay 1000000 - defaultDelResource x - - - - --- | Strict read from file, needed for default file persistence -readFileStrict f = openFile f ReadMode >>= \ h -> readIt h `finally` hClose h - where - readIt h= do - s <- hFileSize h - let n= fromIntegral s - str <- replicateM n (hGetChar h) - return str - - -newtype Transient a= Transient a - --- | Transient wraps any indexable object for living in the cache, but not --- in persistent storage. This is useful for memoization. --- @Transient x@ is neither writeen nor read. -instance Indexable a => IResource (Transient a) where - keyResource (Transient x)= key x - readResourceByKey = const $ return Nothing - writeResource= const $ return () - delResource = const $ return () - --} diff --git a/Data/TCache/IndexQuery.hs b/Data/TCache/IndexQuery.hs index 0532574..6b55d42 100644 --- a/Data/TCache/IndexQuery.hs +++ b/Data/TCache/IndexQuery.hs @@ -69,9 +69,9 @@ fields in a registers are to be indexed, they must have different types. -} -{-# LANGUAGE DeriveDataTypeable, MultiParamTypeClasses -, FunctionalDependencies, FlexibleInstances, UndecidableInstances -, TypeSynonymInstances, IncoherentInstances #-} +{-# LANGUAGE DeriveDataTypeable, MultiParamTypeClasses, FunctionalDependencies, FlexibleInstances, + UndecidableInstances, TypeSynonymInstances, IncoherentInstances, MonoLocalBinds #-} + module Data.TCache.IndexQuery( index , (.==.) @@ -92,10 +92,9 @@ import Data.TCache.Defs import Data.List import Data.Typeable import Control.Concurrent.STM -import Data.Maybe (catMaybes) +import Data.Maybe (catMaybes, fromMaybe) import qualified Data.Map as M import Data.IORef -import qualified Data.Map as M import System.IO.Unsafe import Data.ByteString.Lazy.Char8(pack, unpack) @@ -117,8 +116,8 @@ instance Queriable reg a => IResource (Index reg a) where delResource = defDelResource - -data Index reg a= Index (M.Map a [DBRef reg]) deriving ( Show, Typeable) +-- was data before hlint suggested to use a newtype here +newtype Index reg a= Index (M.Map a [DBRef reg]) deriving ( Show, Typeable) instance (IResource reg, Typeable reg, Ord a, Read a) => Read (Index reg a) where @@ -166,11 +165,7 @@ getIndexr rindex val= do mindex <- readDBRef rindex let index = case mindex of Just (Index index) -> index; _ -> M.empty - - let dbrefs= case M.lookup val index of - Just dbrefs -> dbrefs - Nothing -> [] - + let dbrefs= fromMaybe [] (M.lookup val index) return (rindex, Index index, dbrefs) selectorIndex @@ -277,13 +272,11 @@ class SetOperations set set' setResult | set set' -> setResult where instance SetOperations [DBRef a] [DBRef a] [DBRef a] where (.&&.) fxs fys= do xs <- fxs - ys <- fys - return $ intersect xs ys + intersect xs <$> fys (.||.) fxs fys= do xs <- fxs - ys <- fys - return $ union xs ys + union xs <$> fys infixr 4 .&&. infixr 3 .||. @@ -292,12 +285,12 @@ instance SetOperations (JoinData a a') [DBRef a] (JoinData a a') where (.&&.) fxs fys= do xss <- fxs ys <- fys - return [(intersect xs ys, zs) | (xs,zs) <- xss] + return [(xs `intersect` ys, zs) | (xs,zs) <- xss] (.||.) fxs fys= do xss <- fxs ys <- fys - return [(union xs ys, zs) | (xs,zs) <- xss] + return [(xs `union` ys, zs) | (xs,zs) <- xss] instance SetOperations [DBRef a] (JoinData a a') (JoinData a a') where (.&&.) fxs fys= fys .&&. fxs @@ -307,12 +300,12 @@ instance SetOperations (JoinData a a') [DBRef a'] (JoinData a a') where (.&&.) fxs fys= do xss <- fxs ys <- fys - return [(zs,intersect xs ys) | (zs,xs) <- xss] + return [(zs, xs `intersect` ys) | (zs,xs) <- xss] (.||.) fxs fys= do xss <- fxs ys <- fys - return [(zs, union xs ys) | (zs,xs) <- xss] + return [(zs, xs `union` ys) | (zs,xs) <- xss] -- | return all the (indexed) values which this field has and a DBRef pointer to the register @@ -325,7 +318,7 @@ indexOf selector= do Just (Index index) -> return $ M.toList index; _ -> do let fields= show $ typeOf selector - error $ "the index for "++ fields ++" do not exist. At main, use \"Data.TCache.IdexQuery.index\" to start indexing this field" + error $ "the index for "++ fields ++" do not exist. At main, use \"Data.TCache.IndexQuery.index\" to start indexing this field" retrieve :: Queriable reg a => (reg -> a) -> a -> (a -> a -> Bool) -> STM[DBRef reg] retrieve field value op= do @@ -337,8 +330,7 @@ retrieve field value op= do recordsWith :: (IResource a, Typeable a) => STM [DBRef a] -> STM [ a] -recordsWith dbrefs= dbrefs >>= mapM readDBRef >>= return . catMaybes - +recordsWith dbrefs= catMaybes <$> (dbrefs >>= mapM readDBRef) class Select selector a res | selector a -> res where @@ -353,23 +345,23 @@ instance (Select sel1 a res1, Select sel2 b res2 ) instance (Typeable reg, IResource reg) => Select (reg -> a) (STM [DBRef reg]) (STM [a]) where - select sel xs= return . map sel =<< return . catMaybes =<< mapM readDBRef =<< xs + select sel xs= map sel <$> (catMaybes <$> (mapM readDBRef =<< xs)) instance (Typeable reg, IResource reg, Select (reg -> a) (STM [DBRef reg]) (STM [a]), Select (reg -> b) (STM [DBRef reg]) (STM [b]) ) - => Select ((reg -> a),(reg -> b)) (STM [DBRef reg]) (STM [(a,b)]) + => Select (reg -> a, reg -> b) (STM [DBRef reg]) (STM [(a,b)]) where - select (sel, sel') xs= mapM (\x -> return (sel x, sel' x)) =<< return . catMaybes =<< mapM readDBRef =<< xs + select (sel, sel') xs= mapM (\x -> return (sel x, sel' x)) =<< catMaybes <$> (mapM readDBRef =<< xs) instance (Typeable reg, IResource reg, Select (reg -> a) (STM [DBRef reg]) (STM [a]), Select (reg -> b) (STM [DBRef reg]) (STM [b]), Select (reg -> c) (STM [DBRef reg]) (STM [c]) ) - => Select ((reg -> a),(reg -> b),(reg -> c)) (STM [DBRef reg]) (STM [(a,b,c)]) + => Select (reg -> a, reg -> b, reg -> c) (STM [DBRef reg]) (STM [(a,b,c)]) where - select (sel, sel',sel'') xs= mapM (\x -> return (sel x, sel' x, sel'' x)) =<< return . catMaybes =<< mapM readDBRef =<< xs + select (sel, sel',sel'') xs= mapM (\x -> return (sel x, sel' x, sel'' x)) =<< catMaybes <$> (mapM readDBRef =<< xs) instance (Typeable reg, IResource reg, @@ -377,9 +369,9 @@ instance (Typeable reg, IResource reg, Select (reg -> b) (STM [DBRef reg]) (STM [b]), Select (reg -> c) (STM [DBRef reg]) (STM [c]), Select (reg -> d) (STM [DBRef reg]) (STM [d]) ) - => Select ((reg -> a),(reg -> b),(reg -> c),(reg -> d)) (STM [DBRef reg]) (STM [(a,b,c,d)]) + => Select (reg -> a, reg -> b, reg -> c, reg -> d) (STM [DBRef reg]) (STM [(a,b,c,d)]) where - select (sel, sel',sel'',sel''') xs= mapM (\x -> return (sel x, sel' x, sel'' x, sel''' x)) =<< return . catMaybes =<< mapM readDBRef =<< xs + select (sel, sel',sel'',sel''') xs= mapM (\x -> return (sel x, sel' x, sel'' x, sel''' x)) =<< catMaybes <$> (mapM readDBRef =<< xs) -- for join's (field1 op field2) @@ -387,11 +379,11 @@ instance (Typeable reg, IResource reg, Typeable reg', IResource reg', Select (reg -> a) (STM [DBRef reg]) (STM [a]), Select (reg' -> b) (STM [DBRef reg']) (STM [b]) ) - => Select ((reg -> a),(reg' -> b)) (STM (JoinData reg reg')) (STM [([a],[b])]) + => Select (reg -> a, reg' -> b) (STM (JoinData reg reg')) (STM [([a],[b])]) where select (sel, sel') xss = xss >>= mapM select1 where select1 (xs, ys) = do - rxs <- return . map sel =<< return . catMaybes =<< mapM readDBRef xs - rys <- return . map sel' =<< return . catMaybes =<< mapM readDBRef ys + rxs <- map sel <$> (catMaybes <$> mapM readDBRef xs) + rys <- map sel' <$> (catMaybes <$> mapM readDBRef ys) return (rxs,rys) diff --git a/Data/TCache/IndexText.hs b/Data/TCache/IndexText.hs index 46919f1..b950e43 100644 --- a/Data/TCache/IndexText.hs +++ b/Data/TCache/IndexText.hs @@ -1,8 +1,5 @@ -{-# LANGUAGE TypeSynonymInstances - , DeriveDataTypeable - , FlexibleInstances - , UndecidableInstances - , MultiParamTypeClasses #-} +{-# LANGUAGE DeriveDataTypeable, FlexibleInstances, + UndecidableInstances, MultiParamTypeClasses #-} {- | Implements full text indexation (`indexText`) and text search(`contains`), as an addition to @@ -71,9 +68,8 @@ import Data.Bits import System.Mem.StableName import Data.List((\\)) import GHC.Conc(unsafeIOToSTM) -import Control.Concurrent(forkIO) +import Control.Concurrent(forkIO, threadDelay) import Data.Char -import Control.Concurrent(threadDelay) import Data.ByteString.Lazy.Char8(pack, unpack) import Control.Monad import System.IO.Unsafe @@ -142,6 +138,15 @@ op refIndex t set ws key = do Just integer -> -- word already indexed process (Just $ IndexText t n' mapSI' mapIS' $ M.insert w (set integer docLocation) map) ws +addProto sel = do + let [t1,t2]= typeRepArgs $! typeOf sel + let t = show t1 ++ show t2 + let proto = IndexText t 0 M.empty M.empty M.empty + withResources [proto] $ init proto + where + init proto [Nothing] = [proto] + init _ [Just _] = [] + -- | start a trigger to index the contents of a register field indexText :: (IResource a, Typeable a, Typeable b) @@ -150,13 +155,8 @@ indexText -> IO () indexText sel convert= do addTrigger (indext sel (words1 . convert)) - let [t1,t2]= typeRepArgs $! typeOf sel - t= show t1 ++ show t2 - let proto = IndexText t 0 M.empty M.empty M.empty - withResources [proto] $ init proto - where - init proto [Nothing] = [proto] - init _ [Just _] = [] + addProto sel + -- | trigger the indexation of list fields with elements convertible to Text indexList :: (IResource a, Typeable a, Typeable b) @@ -165,75 +165,75 @@ indexList -> IO () indexList sel convert= do addTrigger (indext sel convert) - let [t1,t2]= typeRepArgs $! typeOf sel - t= show t1 ++ show t2 - let proto= IndexText t 0 M.empty M.empty M.empty - withResources [proto] $ init proto - - where - init proto [Nothing] = [proto] - init _ [Just _]= [] - - + addProto sel indext :: (IResource a, Typeable a,Typeable b) => (a -> b) -> (b -> [T.Text]) -> DBRef a -> Maybe a -> STM() -indext sel convert dbref mreg= f1 -- unsafeIOToSTM $! f +indext sel convert dbref mreg = f1 -- unsafeIOToSTM $! f where - f= forkIO (atomically f1) >> return() - f1= do - moldreg <- readDBRef dbref - case ( moldreg, mreg) of - (Nothing, Just reg) -> add refIndex t (keyResource reg) . convert $ sel reg - (Just oldreg, Nothing) -> del refIndex t (keyResource oldreg) . convert $ sel oldreg - (Just oldreg, Just reg) -> do - st <- unsafeIOToSTM $ makeStableName $ sel oldreg -- test if field - st' <- unsafeIOToSTM $ makeStableName $ sel reg -- has changed - if st== st' - then return () - else do - let key= keyResource reg - let wrds = convert $ sel oldreg - let wrds'= convert $ sel reg - let new= wrds' \\ wrds - let old= wrds \\ wrds' - when(not $ null old) $ del refIndex t key old - when(not $ null new) $ add refIndex t key new - return() - where - [t1,t2]= typeRepArgs $! typeOf sel - t= show t1 ++ show t2 - refIndex= getDBRef . key $ IndexText t u u u u where u= undefined + f = void $ forkIO (atomically f1) + f1 = do + moldreg <- readDBRef dbref + case (moldreg, mreg) of + (Nothing, Just reg) -> add refIndex t (keyResource reg) . convert $ sel reg + (Just oldreg, Nothing) -> del refIndex t (keyResource oldreg) . convert $ sel oldreg + (Just oldreg, Just reg) -> do + st <- unsafeIOToSTM $ makeStableName $ sel oldreg -- test if field + st' <- unsafeIOToSTM $ makeStableName $ sel reg -- has changed + if st == st' + then return () + else do + let key = keyResource reg + let wrds = convert $ sel oldreg + let wrds' = convert $ sel reg + let new = wrds' \\ wrds + let old = wrds \\ wrds' + unless (null old) $ del refIndex t key old + unless (null new) $ add refIndex t key new + where + [t1, t2] = typeRepArgs $! typeOf sel + t = show t1 ++ show t2 + refIndex = getDBRef . key $ IndexText t u u u u + where + u = undefined + +-- avoid duplicate code +targs sel = do + let [t1, t2]= typeRepArgs $! typeOf sel + let t= show t1 ++ show t2 + let u= undefined + withSTMResources [IndexText t u u u u] + $ \[r] -> resources{toReturn= r} -- | return the DBRefs of the registers whose field (first parameter, usually a container) contains the requested value. containsElem :: (IResource a, Typeable a, Typeable b) => (a -> b) -> String -> STM [DBRef a] -containsElem sel wstr = do - let w= T.pack wstr - let [t1, t2]= typeRepArgs $! typeOf sel - let t= show t1 ++ show t2 - let u= undefined - mr <- withSTMResources [IndexText t u u u u] - $ \[r] -> resources{toReturn= r} - case mr of - Nothing -> do - let fields= show $ typeOf sel - error $ "the index for "++ fields ++" do not exist. At main, use \"Data.TCache.IdexQuery.index\" to start indexing this field" - Just (IndexText t n _ mmapIntString map1) -> - case M.lookup w map1 of - Nothing -> return [] - Just integer -> do - let mns=map (\n ->case testBit integer n of True -> Just n; _ -> Nothing) [0..n] - let wordsr = catMaybes $ map (\n -> M.lookup n mmapIntString) $ catMaybes mns - return $ map getDBRef wordsr +containsElem sel wstr = do + let w = T.pack wstr + mr <- targs sel + case mr of + Nothing -> do + let fields = show $ typeOf sel + error $ + "the index for " ++ + fields ++ " do not exist. At main, use \"Data.TCache.IndexQuery.index\" to start indexing this field" + Just (IndexText t n _ mmapIntString map1) -> + case M.lookup w map1 of + Nothing -> return [] + Just integer -> do + let mns = + map + (\n -> + if testBit integer n + then Just n + else Nothing) + [0 .. n] + let wordsr = mapMaybe (`M.lookup` mmapIntString) $ catMaybes mns + return $ map getDBRef wordsr -- | return all the values of a given field (if it has been indexed with 'index') allElemsOf :: (IResource a, Typeable a, Typeable b) => (a -> b) -> STM [T.Text] allElemsOf sel = do - let [t1, t2]= typeRepArgs $! typeOf sel - let t= show t1 ++ show t2 - let u= undefined - mr <- withSTMResources [IndexText t u u u u] - $ \[r] -> resources{toReturn= r} + mr <- targs sel case mr of Nothing -> return [] Just (IndexText t n _ _ map) -> return $ M.keys map @@ -252,7 +252,7 @@ contains sel str= case words str of [w] -> containsElem sel w ws -> do let rs = map (containsElem sel) $ filter filterWord ws - foldl (.&&.) (head rs) (tail rs) + foldl1 (.&&.) rs -filterWordt w= T.length w >2 || or (map (\c -> isUpper c || isDigit c) (T.unpack w)) -filterWord w= length w >2 || or (map (\c -> isUpper c || isDigit c) w) +filterWordt w= T.length w >2 || any (\c -> isUpper c || isDigit c) (T.unpack w) +filterWord w= length w >2 || any (\c -> isUpper c || isDigit c) w diff --git a/Data/TCache/Memoization.hs b/Data/TCache/Memoization.hs index 782ed9d..b7cfc57 100644 --- a/Data/TCache/Memoization.hs +++ b/Data/TCache/Memoization.hs @@ -33,7 +33,8 @@ import Data.RefSerialize(addrHash,newContext) data Cached a b= forall m.Executable m => Cached a (a -> m b) b Integer deriving Typeable -context= unsafePerformIO newContext +{-# NOINLINE context #-} +context = unsafePerformIO newContext -- | given a string, return a key that can be used in Indexable instances -- Of non persistent objects, such are cached objects (it changes fron execution to execution) @@ -107,7 +108,7 @@ cachedSTM time f a= do case time of 0 -> return b _ -> do - TOD tnow _ <- unsafeIOToSTM $ getClockTime + TOD tnow _ <- unsafeIOToSTM getClockTime if tnow - t >= fromIntegral time then do Cached _ _ b _ <- fillIt ref prot @@ -125,10 +126,10 @@ cachedSTM time f a= do -- The Int parameter is the timeout, in second after the last evaluation, after which the cached value will be discarded and the expression will be evaluated again if demanded -- . Time == 0 means no timeout cachedByKey :: (Typeable a, Executable m,MonadIO m) => String -> Int -> m a -> m a -cachedByKey key time f = cached time (\_ -> f) key +cachedByKey key time f = cached time (const f) key cachedByKeySTM :: (Typeable a, Executable m) => String -> Int -> m a -> STM a -cachedByKeySTM key time f = cachedSTM time (\_ -> f) key +cachedByKeySTM key time f = cachedSTM time (const f) key -- Flush the cached object indexed by the key flushCached :: String -> IO () @@ -136,7 +137,7 @@ flushCached k= atomically $ invalidateKey $ cachedKeyPrefix ++ k -- !> -- | a pure version of cached cachedp :: (Indexable a,Typeable a,Typeable b) => (a ->b) -> a -> b -cachedp f k = execute $ cached 0 (\x -> Identity $ f x) k +cachedp f k = execute $ cached 0 (Identity . f) k --testmemo= do -- let f x = "hi"++x !> "exec1" diff --git a/Data/TCache/Triggers.hs b/Data/TCache/Triggers.hs index 83a6374..bbfa5bc 100644 --- a/Data/TCache/Triggers.hs +++ b/Data/TCache/Triggers.hs @@ -1,6 +1,5 @@ +{-# LANGUAGE ExistentialQuantification, DeriveDataTypeable #-} - -{-# LANGUAGE ExistentialQuantification, DeriveDataTypeable, BangPatterns #-} module Data.TCache.Triggers(DBRef(..),Elem(..),Status(..),addTrigger,applyTriggers) where import Data.TCache.IResource import Data.TCache.Defs @@ -9,21 +8,21 @@ import Data.IORef import System.IO.Unsafe import Unsafe.Coerce import GHC.Conc (STM, unsafeIOToSTM) -import Data.Maybe(maybeToList,catMaybes) +import Data.Maybe(maybeToList, catMaybes, fromMaybe, fromJust) import Data.List(nubBy) import Control.Concurrent.STM import Debug.Trace -import Data.Maybe(fromJust) newtype TriggerType a= TriggerType (DBRef a -> Maybe a -> STM()) deriving Typeable -data CMTrigger= forall a.(IResource a, Typeable a) => CMTrigger !((DBRef a) -> Maybe a -> STM()) +data CMTrigger= forall a.(IResource a, Typeable a) => CMTrigger !(DBRef a -> Maybe a -> STM()) cmtriggers :: IORef [(TypeRep ,[CMTrigger])] -cmtriggers= unsafePerformIO $ newIORef [] +{-# NOINLINE cmtriggers #-} +cmtriggers = unsafePerformIO $ newIORef [] @@ -35,7 +34,7 @@ The called trigger function has two parameters: the DBRef being accesed If the DBRef is being deleted, the second parameter is 'Nothing'. if the DBRef contains Nothing, then the object is being created -} -addTrigger :: (IResource a, Typeable a) => ((DBRef a) -> Maybe a -> STM()) -> IO() +addTrigger :: (IResource a, Typeable a) => (DBRef a -> Maybe a -> STM()) -> IO() addTrigger t= do map <- readIORef cmtriggers writeIORef cmtriggers $ @@ -43,11 +42,11 @@ addTrigger t= do in nubByType $ (atype ,CMTrigger t : ts) : map where nubByType= nubBy (\(t,_)(t',_) -> t==t') - (_,(atype:_))= splitTyConApp . typeOf $ TriggerType t + (_,atype:_)= splitTyConApp . typeOf $ TriggerType t -mbToList mxs= case mxs of Nothing -> []; Just xs -> xs +mbToList = fromMaybe [] -- | internally called when a DBRef is modified/deleted/created applyTriggers:: (IResource a, Typeable a) => [DBRef a] -> [Maybe a] -> STM() @@ -61,7 +60,7 @@ applyTriggers dbrfs mas = do f t= mapM2_ (f1 t) dbrfs mas f1 ::(IResource a, Typeable a) => CMTrigger -> DBRef a -> Maybe a -> STM() - f1 (CMTrigger t) dbref ma = (unsafeCoerce t) dbref ma + f1 (CMTrigger t)= unsafeCoerce t diff --git a/buildDemos.sh b/buildDemos.sh index a355360..2265486 100755 --- a/buildDemos.sh +++ b/buildDemos.sh @@ -3,14 +3,16 @@ # This is a crude way to build all the demo files placing them into the # demos/bin folder and removing the build artifacts afterwards -rm -rf ./demos/.build +stack build + rm -rf ./demos/bin/* for fullfile in ./demos/*.hs; do filename=$(basename -- "$fullfile") binname="${filename%.*}" echo "$filename -> $binname" - stack exec ghc -- -outputdir demos/.build $fullfile -o demos/bin/$binname + rm -rf ./demos/.build/Main.o + stack exec ghc -- -O2 -threaded -with-rtsopts=-N -outputdir demos/.build $fullfile -o demos/bin/$binname done -rm -rf ./demos/.build +# rm -rf ./demos/.build diff --git a/demos/DBRef.hs b/demos/DBRef.hs index 8561083..584d779 100644 --- a/demos/DBRef.hs +++ b/demos/DBRef.hs @@ -1,4 +1,4 @@ -{-# OPTIONS -XDeriveDataTypeable -XFlexibleInstances -XUndecidableInstances #-} +{-# LANGUAGE DeriveDataTypeable, FlexibleInstances, UndecidableInstances #-} module Main where import Data.TCache import Data.TCache.DefaultPersistence @@ -35,6 +35,7 @@ instance Indexable Emp where myCompanyName= "mycompany" +{-# NOINLINE myCompanyRef #-} myCompanyRef= unsafePerformIO . atomically $ do refEmp1 <- newDBRef Emp{ename= "Emp1", salary= 34000} @@ -80,7 +81,7 @@ printSalaries ref= do putMsg msg= putStrLn $ ">>" ++ msg main= do - putMsg "DBRefs are cached idexable, serializable, unique-by-key references to objects stored in the cache, mutable under STM transactions" + putMsg "DBRefs are cached indexable, serializable, unique-by-key references to objects stored in the cache, mutable under STM transactions" putMsg "DBRef's are instances of Show" print myCompanyRef @@ -116,7 +117,7 @@ main= do putStrLn "checking race condition on cache cleaning" - let emp1= Emp{ename="Emp1", salary=(-1)} + let emp1= Emp{ename="Emp1", salary= -1} let key= keyResource emp1 let remp1 = getDBRef key Just emp1 <- atomically $ readDBRef remp1 diff --git a/demos/DynamicSample.hs b/demos/DynamicSample.hs index 6f1d2b6..fe877a0 100644 --- a/demos/DynamicSample.hs +++ b/demos/DynamicSample.hs @@ -1,5 +1,5 @@ -{-# OPTIONS -XTypeSynonymInstances -XFlexibleInstances -XUndecidableInstances #-} --- XTypeSynonymInstances added only to permit IResource instances for Strings +{-# LANGUAGE FlexibleInstances, UndecidableInstances #-} + module Main where import Data.TCache import Data.TCache.DefaultPersistence @@ -11,19 +11,20 @@ example of IDynamic usage. -} ---very simple data: ---two objects with two different datatypes: Int and String -{- -instance Indexable Int where - key = show +-- Very simple data: +-- Two objects with two different datatypes: Int and String2 +-- Notice: We need a newtype String for this example because of the special treatment of the key +-- while TCache.Defs already defines the String itself (id) as key. +newtype String2 = String2 { fromString2 :: String } deriving ( Eq, Show, Typeable, Read ) + +instance Indexable String2 where + -- making the key 2 chars wide + key x= take 2 $ fromString2 x -instance Indexable String where - key x= take 2 x --} instance (Read a, Show a) => Serializable a where - serialize= pack . show - deserialize= read . unpack + serialize = pack . show + deserialize = read . unpack main= do @@ -34,28 +35,27 @@ main= do let x= 1:: Int - -- now *Resources primitives suppont different datatypes + -- now *Resources primitives support different datatypes -- without the need of Data.Dynamic withResources [] $ const [x] - withResources [] $ const ["hola"] --resources creation + withResources [] $ const [String2 "hola"] --resources creation syncCache res <- getResource x print res - res <- getResource "ho" + res <- getResource $ String2 "ho" print res -- to use heterogeneous data in the same transaction, -- use DBRef's: s <- atomically $ do let refInt = getDBRef $ key x :: DBRef Int - refString = getDBRef $ key "ho" :: DBRef String + refString = getDBRef $ key (String2 "ho") :: DBRef String2 i <- readDBRef refInt - writeDBRef refString $ "hola, the retrieved value of x is " ++ show i - s <- readDBRef refString - return s + writeDBRef refString $ String2 $ "hola, the retrieved value of x is " ++ show i + readDBRef refString print s diff --git a/demos/basicSample.hs b/demos/basicSample.hs index c40c122..5e04458 100644 --- a/demos/basicSample.hs +++ b/demos/basicSample.hs @@ -6,6 +6,7 @@ import Data.TCache import Data.TCache.DefaultPersistence import Data.ByteString.Lazy.Char8(pack,unpack) import Control.Concurrent +import Data.Foldable (for_) import Data.Typeable import Debug.Trace @@ -23,7 +24,7 @@ data Data= User{uname::String, uid::String, spent:: Int} | user_ = User{uname = undefined, uid = undefined, spent = undefined } item_ = Item{iname = undefined, iid = undefined, price = undefined, stock = undefined } --- The mappings between the cache and the phisical storage are defined by the interface IResource +-- The mappings between the cache and the physical storage are defined by the interface IResource -- to extract the unique key, -- to serializa to string -- to deserialize from string @@ -35,8 +36,8 @@ item_ = Item{iname = undefined, iid = undefined, price = undefined, stock = unde instance Indexable Data where - key User{uid=id}= id - key Item{iid=id}= id + key User{uid=id}= id + key Item{iid=id}= id instance Serializable Data where serialize= pack . show @@ -46,54 +47,53 @@ instance Serializable Data where -- buy is the operation to be performed in the example ---withResources gets a partial definition of each resource necessary for extracting the key, ---fill all the rest of the data structures (if found ) and return a list of Maybe Data. ---BuyIt is part of the domain problem. it receive this list and generates a new list of ---data objects that are updated in the cache. buyIt is executed atomically. +-- withResources gets a partial definition of each resource necessary for extracting the key, +-- fill all the rest of the data structures (if found ) and return a list of Maybe Data. +-- buyIt is part of the domain problem. it receive this list and generates a new list of +-- data objects that are updated in the cache. buyIt is executed atomically. -user `buy` item= withResources[user,item] buyIt + +user `buy` item = withResources [user, item] buyIt where - buyIt[Just us,Just it] - | stock it > 0= [us',it'] `debug` "john buy a PC" + buyIt[Just us, Just it] + | stock it > 0 = [us',it'] `debug` ("john spent " ++ show (spent us) ++ + " so far. Hey tries to buy a PC from the stock of " ++ show (stock it)) | otherwise = error "stock is empty for this product" where - us'= us{spent=spent us + price it} - it'= it{stock= stock it-1} + us'= us{ spent = spent us + price it} + it'= it{ stock = stock it - 1 } buyIt _ = error "either the user or the item does not exist" main= do - -- create resources (acces no resources and return two new Data objects defined in items) - withResources[]items - - --11 PCs are charged to the John´s account in paralel, to show transactionality - --because there are only 10 PCs in stock, the last thread must return an error + -- create resources (access no resources and return two new Data objects defined in items) + withResources [] prepareItems - for 11 $ forkIO $ user_{uid="U12345"} `buy` item_{iid="I54321"} + -- 11 PCs are charged to the John´s account in parallel, to show transactionality + -- because there are only 10 PCs in stock, the last thread must return an error + for_ [1..11] $ const $ forkIO $ user_{ uid = "U12345" } `buy` item_{ iid = "I54321" } - --wait 1 seconds - threadDelay 1000000 + -- wait a second (to let the forked io finish) + threadDelay 1000000 - [us,it] <- getResources [user_{uid="U12345"}, item_{iid="I54321"}] + -- get the contents of the resources by their keys + [us,it] <- getResources [user_{ uid = "U12345" }, item_{ iid = "I54321" }] - putStrLn $ "user data=" ++ show us - putStrLn $ "item data=" ++ show it + putStrLn $ "user data=" ++ show us + putStrLn $ "item data=" ++ show it - -- write the cache content in a persistent store (invoque writeResource for each resource) - -- in a real application clearSyncCacheProc can be used instead to adjust size and write the cache periodically + -- write the cache content in a persistent store (invoke writeResource for each resource) + -- in a real application clearSyncCacheProc can be used instead to adjust size and write the cache periodically + syncCache + threadDelay 1000000 - syncCache - threadDelay 1000000 - - -- the files have been created. the files U12345 and I54321 must contain the result of the 11 iterations + -- the files U12345 and I54321 in .tcachedata must now contain the result of the 11 iterations where - items _= - [User "John" "U12345" 0 - ,Item "PC" "I54321" 6000 10] - - for 0 _ = return () - for n f= f >> for (n-1) f + prepareItems = const + [ User "John" "U12345" 0 + , Item "PC" "I54321" 6000 10 + ] diff --git a/demos/caching.hs b/demos/caching.hs index 33a895d..b2e2875 100644 --- a/demos/caching.hs +++ b/demos/caching.hs @@ -21,39 +21,90 @@ data Data= Data Int Int deriving (Read, Show, Typeable) instance Indexable Data where key (Data i _)= show i - defPath _ = "cacheData/" -- directory where the data is stored. + defPath _ = ".tcachedata/caching/" -- directory where the data is stored. instance Serializable Data where serialize= pack . show deserialize= read . unpack -main= do +printStat (total, dirty, loaded) = + putStrLn $ "total: " ++ show total ++ " dirty: " ++ show dirty ++ " loaded: " ++ show loaded - putStrLn "see the source code of this example" - putStrLn "This program test the caching and cleaning and re-retrieval and update of the cache" +main= do - putStrLn "asyncronous write every 10 seconds, 100 elems max cache size" - putStrLn "default policy (defaultCheck) for clearing the cache is to reduce the cache to half of max sixe when size exceeds the max" + putStrLn "See the source code of this example!" + putStrLn "" + putStrLn "This program tests the caching, cleaning, re-retrieval and updating of the cache." + putStrLn "It uses the DefaultPersistence (disk) and defaultCheck (cleaning rules)." + putStrLn "It writes asyncronously every 10 seconds all changed elemements to disk." + putStrLn "When there is more than the allowed number of elements (100) in the cache it cleans them by the given rule." + putStrLn "With defaultCheck it drops elements which where not accesed since half the time between now and the last sync." putStrLn "" - putStrLn "create resources" - putStrLn " (acces no resources and return two new Data objects defined in items)" + putStrLn "Creating 200 resources with content: n 0" withResources[] $ const[Data i 0 | i <- [1..200]] + -- get stats about them (total, dirty, loaded) + statElems >>= printStat + + x <- getResources [Data i 0 | i <- [1..200]] + putStrLn $ "Last element: " ++ show (last x) putStrLn "" - clearSyncCacheProc 10 defaultCheck 100 - putStrLn $ "every 10 seconds, the modified data in the cache is written in the folder: " ++ defPath ( undefined :: Data) - putStrLn "wait 10 seconds to let the next write cycle to enter (every 10 seconds, set by clearSyncCacheProc)" + putStrLn $ "Starting the async proc with folder: " ++ defPath ( undefined :: Data) + clearSyncCacheProc 10 defaultCheck 100 + threadDelay 6000000 + + putStrLn "after 6 seconds" + statElems >>= printStat + threadDelay 5000000 + + putStrLn "after 11 seconds (should have saved)" + statElems >>= printStat + threadDelay 5000000 + + putStrLn "after 16 seconds (accessing one element)" + -- I read (access) all the data here! + getResource (Data 100 undefined) >>= print + + statElems >>= printStat + --syncCache + threadDelay 5000000 + putStrLn "after 21 seconds (should have cleaned)" + statElems >>= printStat - putStrLn "because 200 exceeds the maximum cache size (100) defaultCheck will discard the 150 older elems to reduce the cache to a half" - putStrLn "This is the behaviour defined in defaultCheck." - threadDelay 20000000 - putStrLn " update every element, included the discarded ones" + putStrLn "Updating every element, included the discarded ones with 'n 1'" withResources [Data i undefined | i <- [1..200]] $ \ds -> [ Data i (n+1) | Just(Data i n) <- ds] + threadDelay 5000000 + + putStrLn "after 26 seconds (should be 'full')" + statElems >>= printStat + + putStrLn "accessing all entries once and print the last" + -- I read (access) all the data here! + x <- getResources [Data i 1 | i <- [1..200]] + print $ last x + + + threadDelay 5000000 + + putStrLn "after 31 seconds (should have saved)" + statElems >>= printStat + threadDelay 5000000 + + putStrLn "after 36 seconds" + statElems >>= printStat + threadDelay 5000000 + + putStrLn "after 41 seconds (should be cleaned again)" + statElems >>= printStat - putStrLn $"wait for the next cycle of file update. The files must contain 1 instead o 0 (Data n 1) in the folder "++ defPath ( undefined :: Data) - threadDelay 20000000 + -- reloading all of the data again + putStrLn "getting the first 50 elements" + x <- getResources [Data i 1 | i <- [1..50]] + putStrLn $ "Last element: " ++ show (last x) + putStrLn "Now we have" + statElems >>= printStat diff --git a/demos/indexQuery.hs b/demos/indexQuery.hs index 712b3f7..d089cbc 100644 --- a/demos/indexQuery.hs +++ b/demos/indexQuery.hs @@ -9,7 +9,7 @@ import Debug.Trace import Data.Typeable -data Person= Person {pname :: String} deriving (Show, Read, Eq, Typeable) +data Person= Person {pname :: String, age :: Int} deriving (Show, Read, Eq, Typeable) data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, Typeable) @@ -25,8 +25,9 @@ main = do index owner index pname index cname + index age - bruce <- atomically $ newDBRef $ Person "bruce" + bruce <- atomically $ newDBRef $ Person "bruce" 42 atomically $ mapM_ newDBRef [Car bruce "Bat Mobile", Car bruce "Porsche"] r <- atomically $ cname .>=. "Bat Mobile" @@ -35,5 +36,8 @@ main = do r <- atomically $ select (cname, owner) $ (owner .==. bruce) .&&. (cname .==. "Bat Mobile") print r + r <- atomically $ age .>=. (20 :: Int) + print r + --syncCache diff --git a/demos/indexText.hs b/demos/indexText.hs index 3c5fe3e..8de50d3 100644 --- a/demos/indexText.hs +++ b/demos/indexText.hs @@ -1,4 +1,4 @@ -{-# LANGUAGE DeriveDataTypeable#-} +{-# LANGUAGE DeriveDataTypeable #-} module Main where import Data.TCache import Data.TCache.DefaultPersistence @@ -27,4 +27,4 @@ main= do r <- atomically $ select title $ body `contains` "hola que tal" print r if r1 == [title doc] then print "OK" else print "FAIL" - if r== [] then print "OK" else print "FAIL" + if null r then print "OK" else print "FAIL" diff --git a/demos/memoization.hs b/demos/memoization.hs index b77b92e..fa5829f 100644 --- a/demos/memoization.hs +++ b/demos/memoization.hs @@ -3,10 +3,12 @@ import Data.TCache.DefaultPersistence import Control.Concurrent import System.Time - +-- | memoization caches a value for a given amount of time +-- This demo stores the current time for 4 seconds until +-- it generates the next timestamp main= do - cachedByKey "" 4 f >>= print + cachedByKey "timequant" 4 f >>= print threadDelay 1000000 main From 4e12cddb499dcf715bcef8240587ee5ba473353c Mon Sep 17 00:00:00 2001 From: Hans Raaf Date: Wed, 17 Apr 2019 16:52:16 +0200 Subject: [PATCH 3/5] Changes for Haskell2010. After preparing a Stack based build I found that TCache.hs didn't compile anymore. This is related to Haskell2010 vs Haskell98. I made (indentation) changes so it does compiles again. --- Data/TCache.hs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Data/TCache.hs b/Data/TCache.hs index ea49df9..6e5dbc9 100644 --- a/Data/TCache.hs +++ b/Data/TCache.hs @@ -414,14 +414,14 @@ readDBRef dbref@(DBRef key tv)= do readDBRefs :: (IResource a, Typeable a) => [DBRef a] -> STM [Maybe a] readDBRefs dbrefs= do let mf (DBRef key tv)= do - r <- readTVar tv - case r of - Exist (Elem x _ mt) -> do - t <- unsafeIOToSTM timeInteger - writeTVar tv . Exist $ Elem x t mt - return $ Right $ Just x - DoNotExist -> return $ Right Nothing - NotRead -> return $ Left key + r <- readTVar tv + case r of + Exist (Elem x _ mt) -> do + t <- unsafeIOToSTM timeInteger + writeTVar tv . Exist $ Elem x t mt + return $ Right $ Just x + DoNotExist -> return $ Right Nothing + NotRead -> return $ Left key inCache <- mapM mf dbrefs let pairs = foldr(\pair@(x,dbr) xs -> case x of Left k -> pair:xs; _ -> xs ) [] $ zip inCache dbrefs let (toReadKeys, dbrs) = unzip pairs @@ -432,8 +432,8 @@ readDBRefs dbrefs= do case r of Nothing -> writeTVar tv DoNotExist Just x -> do - t <- unsafeIOToSTM timeInteger - writeTVar tv $ Exist $ Elem x t (-1) + t <- unsafeIOToSTM timeInteger + writeTVar tv $ Exist $ Elem x t (-1) mapM_ processTVar $ zip rs dbrs let mix (Right x:xs) ys = x:mix xs ys @@ -815,9 +815,9 @@ releaseTPVar cache r =do case mr of Nothing -> unsafeIOToSTM (finalize w) >> releaseTPVar cache r Just dbref@(DBRef key tv) -> do - applyTriggers [dbref] [Just (castErr r)] - t <- unsafeIOToSTM timeInteger - writeTVar tv . Exist $ Elem (castErr r) t t + applyTriggers [dbref] [Just (castErr r)] + t <- unsafeIOToSTM timeInteger + writeTVar tv . Exist $ Elem (castErr r) t t Nothing -> do From a9d1d52de2778091ca6e4e7f237baef997a742fa Mon Sep 17 00:00:00 2001 From: Hans Raaf Date: Wed, 17 Apr 2019 16:55:14 +0200 Subject: [PATCH 4/5] Removed an old incomplete module which never got finished as it seems. --- Data/Persistent/CollectionStream.hs | 107 ---------------------------- 1 file changed, 107 deletions(-) delete mode 100644 Data/Persistent/CollectionStream.hs diff --git a/Data/Persistent/CollectionStream.hs b/Data/Persistent/CollectionStream.hs deleted file mode 100644 index cad397b..0000000 --- a/Data/Persistent/CollectionStream.hs +++ /dev/null @@ -1,107 +0,0 @@ -{-# OPTIONS -XRecordWildCards - -XTypeSynonymInstances - -XFlexibleInstances - -XUndecidableInstances - -XDeriveDataTypeable #-} -module Data.Persistent.CollectionStream where -import Data.Typeable -import Control.Concurrent.STM(STM,atomically, retry) -import Control.Monad(when) -import Data.TCache.DefaultPersistence - -import Data.TCache -import System.IO.Unsafe -import Data.RefSerialize -import Data.RefSerialize -import Data.ByteString.Lazy.Char8 as B -import qualified Data.Vector as V -import Data.HashTable as HT -import Data.ByteString.Lazy.Char8 as B -import Data.List -import Data.Ord - ---import Debug.Trace ---a !> b= trace b a - ---assocs = sortBy (comparing fst) . unsafePerformIO . HT.toList --- ---instance Serialize NContext where --- showp(NContext n (c,s))= --- --- insertString n --- insertString $ showContext c --- readp= do --- n <- readp --- s <- readContent --- let c = unsafePerformIO newContext --- return $ NContext n (c,s) --- --- --- ---data NContext= NContext String (Context,ByteString) deriving Typeable --- ---instance Indexable NContext where --- key (NContext n _)= n - ---getExternalContext n= unsafePerformIO . atomically . newDBRef $ NContext n undefined - -instance Serialize a => Serializable a where - serialize= runW . showp - deserialize= runR readp - -instance Serialize a => Serialize (V.Vector a) where - showp= showp . V.toList - readp= readp >>= return . V.fromList - - - - -data Elem a= - Elem{ ename :: String - , eindex :: Int - , echilds :: Int - , emaxlength :: Int - , enelems :: Int - , econtent :: V.Vector (Either a, DBRef(Elem a)} - -get e = justify (readDBRef rcoll) $ "Not found: " ++ key e - -insertByKey re x= do - e@Elem{..} <- get re - let Just i' = findIndex (\y -> key y> key x ) econtent - i= i' -1 - case econtent ! (i-1) of - Left re' -> insertByKey re' x - Right t -> - if (key t== key x) - then writeDBRef re e{econtent= econtent //[(i,x)] } - else do - toins <- case enelems < emaxlength - True -> return $ Right x - False ->do - let childs echilds+1 - r <- newwDBRef e{eindex= childs+1, childs=0,enelems=1,econtent= singleton x} - writeDBRef re e{echilds=childs+1} - return $ Left r - let ncontent= update_ - econtent - (fromList [i..enelems]) - (cons toins (drop i' econtent)) - - writeDBRef re e{econtent= ncontent} - - - -lookup re k= do - e@Elem{..} <- get re - let Just z = findIndex (\y -> key y> k ) econtent - let i= i' -1 - case econtent ! i of - Right t -> assert (key t== k) $ return t - Left re' -> lookup re' x - - -push re x= do - e@Elem{..} <- get re - if emaxlength == enelems - From f069345e050845ccd440c253eccfb03529886c9d Mon Sep 17 00:00:00 2001 From: Hans Raaf Date: Fri, 19 Apr 2019 16:10:29 +0200 Subject: [PATCH 5/5] Even more cleanup - Using "package.yaml" instead of cabal file (but keeping file locations unchanged for this branch) - compiles with -Wall with no warnings. Notice: I couldn't get rid of some orphan instaces and disabled warnings for those files (for now). - extened and clarified some of the demos. - combined some information from old cabal file to README.md and created a new ChageLog.md file. - added an unfinished directory for the WIP (or abandonded) CollectionStream.hs. - made "buildDemos.sh" more reliable. This will be replaced by a pseudo test suite in the future. - Bumped the version to 0.13 (to make it possible to select the older version for comparisons. But I am thinking of some breaking changes too, because I really don't like some of the name shadowing (esp. "key"). But that may go away with qualified imports. Dunno. --- ChangeLog.md | 12 ++ Data/Persistent/Collection.hs | 62 +++++------ Data/Persistent/IDynamic.hs | 88 +++++++-------- Data/TCache.hs | 177 +++++++++++++++--------------- Data/TCache/DefaultPersistence.hs | 8 +- Data/TCache/Defs.hs | 39 ++++--- Data/TCache/IResource.hs | 10 -- Data/TCache/IndexQuery.hs | 55 +++++----- Data/TCache/IndexText.hs | 90 ++++++++------- Data/TCache/Memoization.hs | 25 +++-- Data/TCache/Triggers.hs | 26 ++--- TCache.cabal | 164 ++++++++++++++------------- demos/DBRef.hs | 90 ++++++++------- demos/DynamicSample.hs | 43 +++++--- demos/basicSample.hs | 18 ++- demos/caching.hs | 18 +-- demos/indexQuery.hs | 31 +++--- demos/indexText.hs | 3 +- demos/memoization.hs | 17 +-- demos/pr.hs | 15 +-- demos/pushpop.hs | 63 +++++++++-- demos/testnewdbref.hs | 10 +- demos/weakTest.hs | 13 ++- package.yaml | 176 +++++++++++++++++++++++++++++ stack.yaml | 2 +- unfinished/CollectionStream.hs | 107 ++++++++++++++++++ 26 files changed, 870 insertions(+), 492 deletions(-) create mode 100644 ChangeLog.md create mode 100644 package.yaml create mode 100644 unfinished/CollectionStream.hs diff --git a/ChangeLog.md b/ChangeLog.md new file mode 100644 index 0000000..a127d34 --- /dev/null +++ b/ChangeLog.md @@ -0,0 +1,12 @@ +* 0.13.x : (WIP) Major refaktoring and cleanups may break older code +* 0.12.1.0 : Dropped Data.Hashtable (deprecated). Now it uses the package hashtables +* 0.12.0.0 : space in index data in indexQuery.hs and IndexText.hs triggered errors in the AWS backend. The space has been changed by \'-\'. So rename the "index *" files in the TCache folder in order to be recognized. +* 0.11.0.0 : added setIndexParsist to define persistence for indexes by type. started the addition of readDBRefs, readResources and so on for simultaneous read, writes and deletes of objects of the same type. +* 0.10.2.0 : Added setDefaultPersist and modified the signature of setPersist in Data.TCache.DefaultPersistence. Fixed issues with ghc 7.6.3 +* 0.10.0.0 : version add memoization and a persistent and transactional collection/queue. +* 0.10.0.8 : subversion add cachedByKeySTM +* 0.10.0.9 : fixed an error in clearSyncCacheProc and SynWrite Asynchronous that checked the cache continuously +* 0.9.0.4 : Solves a bug in the management of weak pointers that evaporated registers from the cache +* 0.9.0.3 : Solves a lost registers bug. +* 0.9.0.1 : Solves a bug when object keys generate invalid filenames, and includes changes in defaultPersistence to further separate serialization from input-output. +* 0.9.0.0 : Adds full-text indexing and search, which is incorporated into the experimental query language. It also changes the default Persistence mechanism. Now `ByteString`s are used for serialization and deserialization. A `Serializable` class and a `Persist` structure decouples serialization from `ByteString` and read/write to files. Both can be redefined separately, so the default persistence could be changed with `setPersist` to write to blobs in a databases, for example. Default persistence now no longer has to be in files. diff --git a/Data/Persistent/Collection.hs b/Data/Persistent/Collection.hs index 9324834..76e0a1f 100644 --- a/Data/Persistent/Collection.hs +++ b/Data/Persistent/Collection.hs @@ -1,13 +1,6 @@ -{-# OPTIONS -XDeriveDataTypeable - -XTypeSynonymInstances - -XMultiParamTypeClasses - -XExistentialQuantification - -XOverloadedStrings - -XFlexibleInstances - -XUndecidableInstances - -XFunctionalDependencies - - #-} +{-# LANGUAGE DeriveDataTypeable, TypeSynonymInstances, + MultiParamTypeClasses, ExistentialQuantification, + OverloadedStrings, FlexibleInstances, UndecidableInstances #-} {- | A persistent, transactional collection with Queue interface as well as @@ -30,7 +23,7 @@ data.persistent collection implementar un btree sobre el -} module Data.Persistent.Collection ( -RefQueue(..), getQRef, +RefQueue, getQRef, pop,popSTM,pick, flush, flushSTM, pickAll, pickAllSTM, push,pushSTM, pickElem, pickElemSTM, readAll, readAllSTM, @@ -43,14 +36,11 @@ import Control.Monad import Data.TCache.DefaultPersistence import Data.TCache -import System.IO.Unsafe -import Data.RefSerialize -import Data.ByteString.Lazy.Char8 import Data.RefSerialize -import Debug.Trace - -a !> b= trace b a +--import Debug.Trace +--(!>) :: a -> String -> a +--a !> b= trace b a @@ -61,7 +51,7 @@ instance Indexable (Queue a) where -data Queue a= Queue {name :: String, imp :: [a], out :: [a]} deriving (Typeable) +data Queue a= Queue String [a] [a] deriving (Typeable) @@ -76,11 +66,11 @@ instance Serialize a => Serialize (Queue a) where - +queuePrefix :: String queuePrefix= "Queue#" -lenQPrefix= Prelude.length queuePrefix - +lenQPrefix :: Int +lenQPrefix= Prelude.length queuePrefix instance Serialize a => Serializable (Queue a ) where serialize = runW . showp @@ -123,7 +113,7 @@ flush = atomically . flushSTM -- | Version in the STM monad flushSTM :: (Typeable a, Serialize a) => RefQueue a -> STM () -flushSTM tv= delDBRef tv +flushSTM = delDBRef -- | Read the first element in the queue and delete it (pop) pop @@ -153,7 +143,7 @@ popSTM tv=do where doit (Queue n [x] [])= do - writeDBRef tv $ (Queue n [] []) + writeDBRef tv (Queue n [] []) return x doit (Queue _ [] []) = retry doit (Queue n imp []) = doit (Queue n [] $ Prelude.reverse imp) @@ -172,7 +162,7 @@ pick tv = atomically $ do doit (Queue _ [x] [])= return x doit (Queue _ [] []) = retry doit (Queue n imp []) = doit (Queue n [] $ Prelude.reverse imp) - doit (Queue n imp list ) = return $ Prelude.head list + doit (Queue _ _ list ) = return $ Prelude.head list -- | Push an element in the queue push :: (Typeable a, Serialize a) => RefQueue a -> a -> IO () @@ -181,7 +171,7 @@ push tv v = atomically $ pushSTM tv v -- | Version in the STM monad pushSTM :: (Typeable a, Serialize a) => RefQueue a -> a -> STM () pushSTM tv v= - readQRef tv >>= \ ((Queue n imp out)) -> writeDBRef tv $ Queue n (v : imp) out + readQRef tv >>= \ (Queue n imp out) -> writeDBRef tv $ Queue n (v : imp) out -- | Return the list of all elements in the queue. The queue remains unchanged pickAll :: (Typeable a, Serialize a) => RefQueue a -> IO [a] @@ -190,23 +180,23 @@ pickAll= atomically . pickAllSTM -- | Version in the STM monad pickAllSTM :: (Typeable a, Serialize a) => RefQueue a -> STM [a] pickAllSTM tv= do - (Queue name imp out) <- readQRef tv + (Queue _ imp out) <- readQRef tv return $ out ++ Prelude.reverse imp -- | Return the first element in the queue that has the given key pickElem ::(Indexable a,Typeable a, Serialize a) => RefQueue a -> String -> IO(Maybe a) -pickElem tv key= atomically $ pickElemSTM tv key +pickElem tv k= atomically $ pickElemSTM tv k -- | Version in the STM monad pickElemSTM :: (Indexable a,Typeable a, Serialize a) => RefQueue a -> String -> STM(Maybe a) -pickElemSTM tv key1= do - Queue name imp out <- readQRef tv - let xs= out ++ Prelude.reverse imp - when (not $ Prelude.null imp) $ writeDBRef tv $ Queue name [] xs - case Prelude.filter (\x-> key x == key1) xs of - [] -> return $ Nothing - (x:_) -> return $ Just x +pickElemSTM tv key1 = do + Queue name imp out <- readQRef tv + let xs = out ++ Prelude.reverse imp + unless (Prelude.null imp) $ writeDBRef tv $ Queue name [] xs + case Prelude.filter (\x -> key x == key1) xs of + [] -> return Nothing + (x:_) -> return $ Just x -- | Update the first element of the queue with a new element with the same key updateElem :: (Indexable a,Typeable a, Serialize a) @@ -239,12 +229,12 @@ readAllSTM tv= do deleteElem :: (Indexable a,Typeable a, Serialize a) => RefQueue a-> a -> IO () deleteElem tv x= atomically $ deleteElemSTM tv x --- | Verison in the STM monad +-- | Version in the STM monad deleteElemSTM :: (Typeable a, Serialize a,Indexable a) => RefQueue a-> a -> STM () deleteElemSTM tv x= do Queue name imp out <- readQRef tv let xs= out ++ Prelude.reverse imp - writeDBRef tv $ Queue name [] $ Prelude.filter (\x-> key x /= k) xs + writeDBRef tv $ Queue name [] $ Prelude.filter (\x2-> key x2 /= k) xs where k=key x diff --git a/Data/Persistent/IDynamic.hs b/Data/Persistent/IDynamic.hs index 4c53f8c..a6e9f0c 100644 --- a/Data/Persistent/IDynamic.hs +++ b/Data/Persistent/IDynamic.hs @@ -1,13 +1,8 @@ - {-# OPTIONS -XExistentialQuantification - -XUndecidableInstances - -XScopedTypeVariables - -XDeriveDataTypeable - -XTypeSynonymInstances - -XIncoherentInstances - -XOverloadedStrings - -XMultiParamTypeClasses - -XFunctionalDependencies - -XFlexibleInstances #-} +{-# LANGUAGE ExistentialQuantification, UndecidableInstances, + ScopedTypeVariables, DeriveDataTypeable, TypeSynonymInstances, + IncoherentInstances, OverloadedStrings, MultiParamTypeClasses, + FlexibleInstances #-} + {- | IDynamic is a indexable and serializable version of Dynamic. (See @Data.Dynamic@). It is used as containers of objects in the cache so any new datatype can be incrementally stored without recompilation. @@ -15,32 +10,21 @@ IDimamic provices methods for safe casting, besides serializaton, deserialireza -} module Data.Persistent.IDynamic where import Data.Typeable -import Unsafe.Coerce import System.IO.Unsafe -import Data.TCache -import Data.TCache.Defs -import Data.RefSerialize -import Data.Char (showLitChar) import Data.ByteString.Lazy.Char8 as B -import Data.Word -import Numeric (showHex, readHex) -import Control.Exception(handle, SomeException, ErrorCall) -import Control.Monad(replicateM) -import Data.Word -import Control.Concurrent.MVar +import Control.Exception(handle, SomeException) import Data.IORef -import Data.Map as M(empty) import Data.RefSerialize --import Debug.Trace --(!>)= flip trace -data IDynamic = IDyn (IORef IDynType) deriving Typeable +newtype IDynamic = IDyn (IORef IDynType) deriving Typeable -data IDynType= forall a w r.(Typeable a, Serialize a) +data IDynType= forall a.(Typeable a, Serialize a) => DRight !a | DLeft !(ByteString ,(Context, ByteString)) @@ -49,6 +33,7 @@ data IDynType= forall a w r.(Typeable a, Serialize a) newtype Save= Save ByteString deriving Typeable +tosave :: IDynamic -> IDynamic tosave d@(IDyn r)= unsafePerformIO $ do mr<- readIORef r case mr of @@ -61,12 +46,18 @@ instance Serialize Save where readp = error "readp not impremented for Save" +errorfied :: String -> String -> a errorfied str str2= error $ str ++ ": IDynamic object not reified: "++ str2 +dynPrefix :: String dynPrefix= "Dyn" + +dynPrefixSp :: ByteString dynPrefixSp= append (pack dynPrefix) " " + +notreified :: ByteString notreified = pack $ dynPrefix ++" 0" @@ -77,7 +68,7 @@ instance Serialize IDynamic where case unsafePerformIO $ readIORef t of DRight x -> do -- insertString $ pack dynPrefix - c <- getWContext + _ <- getWContext showpx <- rshowps x -- showpText . fromIntegral $ B.length showpx showp $ unpack showpx @@ -112,9 +103,11 @@ instance Show IDynamic where +toIDyn :: (Typeable a, Serialize a) => a -> IDynamic toIDyn x= IDyn . unsafePerformIO . newIORef $ DRight x -- | check if a (possibly polimorphic) value within a IDynamic value has the given serialization" +serializedEqual :: IDynamic -> ByteString -> Bool serializedEqual (IDyn r) str= unsafePerformIO $ do t <- readIORef r case t of @@ -128,29 +121,30 @@ fromIDyn x= case safeFromIDyn x of safeFromIDyn :: (Typeable a, Serialize a) => IDynamic -> Either String a -safeFromIDyn (d@(IDyn r))= final where - final= unsafePerformIO $ do - t <- readIORef r - case t of - DRight x -> return $ case cast x of - Nothing -> Left $ "fromIDyn: unable to extract from " - ++ show d ++ " something of type: " - ++ (show . typeOf $ fromRight final) - Just x -> Right x - where - fromRight (Right x)= x - - - DLeft (str, c) -> - handle (\(e :: SomeException) -> return $ Left (show e)) $ -- !> ("safeFromIDyn : "++ show e)) $ - do - let v= runRC c rreadp str -- !> unpack str - writeIORef r $! DRight v -- !> ("***reified "++ unpack str) - return $! Right v -- !> ("*** end reified " ++ unpack str) +safeFromIDyn d@(IDyn r) = final + where + final = + unsafePerformIO $ do + t <- readIORef r + case t of + DRight x -> + return $ + case cast x of + Nothing -> + Left $ + "fromIDyn: unable to extract from " ++ + show d ++ " something of type: " ++ (show . typeOf $ fromRight final) + Just x' -> Right x' + where fromRight (Right x') = x' + fromRight (Left _') = error "this will never happen?" + DLeft (str, c) -> + handle (\(e :: SomeException) -> return $ Left (show e)) $ -- !> ("safeFromIDyn : "++ show e)) $ + do + let v = runRC c rreadp str -- !> unpack str + writeIORef r $! DRight v -- !> ("***reified "++ unpack str) + return (Right v) -- !> ("*** end reified " ++ unpack str) reifyM :: (Typeable a,Serialize a) => IDynamic -> a -> IO a -reifyM dyn v = do - let v'= fromIDyn dyn - return $ v' `seq` v' +reifyM dyn _ = return $ fromIDyn dyn diff --git a/Data/TCache.hs b/Data/TCache.hs index 6e5dbc9..355fd6c 100644 --- a/Data/TCache.hs +++ b/Data/TCache.hs @@ -1,3 +1,4 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} {-# LANGUAGE ScopedTypeVariables, ExistentialQuantification, FlexibleInstances, UndecidableInstances #-} @@ -293,6 +294,7 @@ where import GHC.Conc +import GHC.MVar(MVar) import Control.Monad(when, void) import qualified Data.HashTable.IO as H(BasicHashTable, new, insert, lookup, toList) import Data.IORef(IORef, newIORef, readIORef, writeIORef) @@ -360,20 +362,20 @@ numElems= do statElems :: IO (Int, Int, Int) statElems = do (cache, lastSync) <- readIORef refcache - elems <- H.toList cache - (tosave, elems, size) <- atomically $ extract elems lastSync + clist <- H.toList cache + (tosave, elems, size) <- atomically $ extract clist lastSync counted <- mapM count elems return (size, length tosave, sum counted) where - count (CacheElem _ w)= do + count (CacheElem _ w) = do mr <- deRefWeak w case mr of Just (DBRef _ tv) -> do - r <- readTVarIO tv - case r of - Exist (Elem x _ mt) -> return 1 - DoNotExist -> return 0 - NotRead -> return 0 + r <- readTVarIO tv + case r of + Exist Elem {} -> return 1 + DoNotExist -> return 0 + NotRead -> return 0 Nothing -> finalize w >> return 0 -- deRefWeakSTM = unsafeIOToSTM . deRefWeak @@ -384,7 +386,7 @@ statElems = do -- H.delete cache k -- !> ("delete " ++ k) fixToCache :: (IResource a, Typeable a) => DBRef a -> IO () -fixToCache dbref@(DBRef k tv)= do +fixToCache dbref@(DBRef k _)= do (cache, _) <- readIORef refcache w <- mkWeakPtr dbref $ Just $ fixToCache dbref H.insert cache k (CacheElem (Just dbref) w) @@ -393,7 +395,7 @@ fixToCache dbref@(DBRef k tv)= do -- | Return the reference value. If it is not in the cache, it is fetched -- from the database. readDBRef :: (IResource a, Typeable a) => DBRef a -> STM (Maybe a) -readDBRef dbref@(DBRef key tv)= do +readDBRef (DBRef key1 tv)= do r <- readTVar tv case r of Exist (Elem x _ mt) -> do @@ -402,8 +404,8 @@ readDBRef dbref@(DBRef key tv)= do return $ Just x DoNotExist -> return Nothing NotRead -> do - r <- safeIOToSTM $ readResourceByKey key - case r of + r1 <- safeIOToSTM $ readResourceByKey key1 + case r1 of Nothing -> writeTVar tv DoNotExist >> return Nothing Just x -> do t <- unsafeIOToSTM timeInteger @@ -413,7 +415,7 @@ readDBRef dbref@(DBRef key tv)= do -- | Read multiple DBRefs in a single request using the new 'readResourcesByKey' readDBRefs :: (IResource a, Typeable a) => [DBRef a] -> STM [Maybe a] readDBRefs dbrefs= do - let mf (DBRef key tv)= do + let mf (DBRef key1 tv)= do r <- readTVar tv case r of Exist (Elem x _ mt) -> do @@ -421,14 +423,14 @@ readDBRefs dbrefs= do writeTVar tv . Exist $ Elem x t mt return $ Right $ Just x DoNotExist -> return $ Right Nothing - NotRead -> return $ Left key + NotRead -> return $ Left key1 inCache <- mapM mf dbrefs - let pairs = foldr(\pair@(x,dbr) xs -> case x of Left k -> pair:xs; _ -> xs ) [] $ zip inCache dbrefs + let pairs = foldr(\pair@(x,_) xs -> case x of Left _ -> pair:xs; _ -> xs ) [] $ zip inCache dbrefs let (toReadKeys, dbrs) = unzip pairs let fromLeft (Left k)= k - formLeft _ = error "this will never happen" + fromLeft _ = error "this will never happen" rs <- safeIOToSTM . readResourcesByKey $ map fromLeft toReadKeys - let processTVar (r, DBRef key tv)= + let processTVar (r, DBRef _ tv)= case r of Nothing -> writeTVar tv DoNotExist Just x -> do @@ -438,6 +440,8 @@ readDBRefs dbrefs= do mapM_ processTVar $ zip rs dbrs let mix (Right x:xs) ys = x:mix xs ys mix (Left _:xs) (y:ys)= y:mix xs ys + mix [] _ = error "this will never happen(?)" + mix (Left _:_) [] = error "this will never happen(?)" return $ mix inCache rs @@ -449,10 +453,10 @@ readDBRefs dbrefs= do -- serialization time can cause inconsistencies in the database. -- In future releases this will be enforced. writeDBRef :: (IResource a, Typeable a) => DBRef a -> a -> STM () -writeDBRef dbref@(DBRef key tv) x= x `seq` do +writeDBRef dbref@(DBRef key1 tv) x= x `seq` do let newkey= keyResource x - if newkey /= key - then error $ "writeDBRef: law of key conservation broken: old , new= " ++ key ++ " , "++newkey + if newkey /= key1 + then error $ "writeDBRef: law of key conservation broken: old , new= " ++ key1 ++ " , "++newkey else do applyTriggers [dbref] [Just x] t <- unsafeIOToSTM timeInteger @@ -460,25 +464,15 @@ writeDBRef dbref@(DBRef key tv) x= x `seq` do writeTVar tv $! Exist $! Elem x t t return() - -instance Show (DBRef a) where - show (DBRef key _)= "DBRef \""++ key ++ "\"" - instance (IResource a, Typeable a) => Read (DBRef a) where - readsPrec n str1= readit str + readsPrec _ str1= readit str where str = dropWhile isSpace str1 - readit ('D':'B':'R':'e':'f':' ':'\"':str1)= - let (key,nstr) = break (== '\"') str1 - in [( getDBRef key :: DBRef a, tail nstr)] + readit ('D':'B':'R':'e':'f':' ':'\"':str2)= + let (key1,nstr) = break (== '\"') str2 + in [( getDBRef key1 :: DBRef a, tail nstr)] readit _ = [] -instance Eq (DBRef a) where - DBRef k _ == DBRef k' _ = k==k' - -instance Ord (DBRef a) where - compare (DBRef k _) (DBRef k' _) = compare k k' - -- | Return the key of the object referenced by the DBRef keyObjDBRef :: DBRef a -> String keyObjDBRef (DBRef k _)= k @@ -490,33 +484,34 @@ keyObjDBRef (DBRef k _)= k -- This also avoids unnecessary cache lookups of the referenced objects. {-# NOINLINE getDBRef #-} getDBRef :: (Typeable a, IResource a) => String -> DBRef a -getDBRef key= unsafePerformIO $! getDBRef1 $! key where +getDBRef key1 = unsafePerformIO $! getDBRef1 $! key1 where getDBRef1 :: (Typeable a, IResource a) => String -> IO (DBRef a) - getDBRef1 key = do + getDBRef1 key2 = do (cache,_) <- readIORef refcache -- !> ("getDBRef "++ key) takeMVar getRefFlag - r <- H.lookup cache key + r <- H.lookup cache key2 case r of Just (CacheElem mdb w) -> do putMVar getRefFlag () mr <- deRefWeak w case mr of - Just dbref@(DBRef _ tv) -> + Just dbref@(DBRef _ _) -> case mdb of Nothing -> return $! castErr dbref -- !> "just" Just _ -> do - H.insert cache key (CacheElem Nothing w) --to notify when the DBREf leave its reference + H.insert cache key2 (CacheElem Nothing w) --to notify when the DBREf leave its reference return $! castErr dbref - Nothing -> finalize w >> getDBRef1 key -- !> "finalize" -- the weak pointer has not executed his finalizer + Nothing -> finalize w >> getDBRef1 key2 -- !> "finalize" -- the weak pointer has not executed his finalizer Nothing -> do tv <- newTVarIO NotRead -- !> "Nothing" - dbref <- evaluate $ DBRef key tv + dbref <- evaluate $ DBRef key2 tv w <- mkWeakPtr dbref . Just $ fixToCache dbref - H.insert cache key (CacheElem Nothing w) + H.insert cache key2 (CacheElem Nothing w) putMVar getRefFlag () return dbref +getRefFlag :: MVar () {-# NOINLINE getRefFlag #-} getRefFlag= unsafePerformIO $ newMVar () @@ -578,7 +573,7 @@ newDBRef x = do mr <- readDBRef ref case mr of Nothing -> writeDBRef ref x >> return ref -- !> " write" - Just r -> return ref -- !> " non write" + Just _ -> return ref -- !> " non write" --newDBRef :: (IResource a, Typeable a) => a -> STM (DBRef a) --newDBRef x = do @@ -600,7 +595,7 @@ newDBRef x = do -- | Delete the content of the DBRef form the cache and from permanent storage delDBRef :: (IResource a, Typeable a) => DBRef a -> STM() -delDBRef dbref@(DBRef k tv)= do +delDBRef dbref@(DBRef _ tv)= do mr <- readDBRef dbref case mr of Just x -> do @@ -621,6 +616,7 @@ delDBRef dbref@(DBRef k tv)= do -- or -- -- @result <- readDBRef ref \`onNothing\` return someDefaultValue@ +onNothing :: Monad m => m (Maybe b) -> m b -> m b onNothing io onerr= do my <- io case my of @@ -633,38 +629,40 @@ flushDBRef :: (IResource a, Typeable a) =>DBRef a -> STM() flushDBRef (DBRef _ tv)= writeTVar tv NotRead -- | flush the element with the given key -flushKey key= do - (cache,time) <- unsafeIOToSTM $ readIORef refcache - c <- unsafeIOToSTM $ H.lookup cache key +flushKey :: String -> STM () +flushKey key1= do + (cache, _) <- unsafeIOToSTM $ readIORef refcache + c <- unsafeIOToSTM $ H.lookup cache key1 case c of Just (CacheElem _ w) -> do mr <- unsafeIOToSTM $ deRefWeak w case mr of - Just (DBRef k tv) -> writeTVar tv NotRead - Nothing -> unsafeIOToSTM (finalize w) >> flushKey key + Just (DBRef _ tv) -> writeTVar tv NotRead + Nothing -> unsafeIOToSTM (finalize w) >> flushKey key1 Nothing -> return () -- | label the object as not existent in database -invalidateKey key= do - (cache,time) <- unsafeIOToSTM $ readIORef refcache - c <- unsafeIOToSTM $ H.lookup cache key +invalidateKey :: String -> STM () +invalidateKey key1= do + (cache, _) <- unsafeIOToSTM $ readIORef refcache + c <- unsafeIOToSTM $ H.lookup cache key1 case c of Just (CacheElem _ w) -> do mr <- unsafeIOToSTM $ deRefWeak w case mr of - Just (DBRef k tv) -> writeTVar tv DoNotExist - Nothing -> unsafeIOToSTM (finalize w) >> flushKey key + Just (DBRef _ tv) -> writeTVar tv DoNotExist + Nothing -> unsafeIOToSTM (finalize w) >> flushKey key1 Nothing -> return () -- | drops the entire cache. flushAll :: STM () flushAll = do - (cache,time) <- unsafeIOToSTM $ readIORef refcache + (cache, _) <- unsafeIOToSTM $ readIORef refcache elms <- unsafeIOToSTM $ H.toList cache - mapM_ (del cache) elms + mapM_ del elms where - del cache ( _ , CacheElem _ w)= do + del ( _ , CacheElem _ w)= do mr <- unsafeIOToSTM $ deRefWeak w case mr of Just (DBRef _ tv) -> writeTVar tv NotRead @@ -778,24 +776,25 @@ takeDBRef cache flags x =do -- unsafeIOToSTM $ readResourceByKey keyr where - readToCache flags cache key= do + readToCache flags1 cache1 key1= do mr <- readResource x case mr of Nothing -> return Nothing Just r2 -> do ti <- timeInteger tvr <- newTVarIO . Exist $ Elem r2 ti (-1) - case flags of - NoAddToHash -> return . Just $ DBRef key tvr + case flags1 of + NoAddToHash -> return . Just $ DBRef key1 tvr AddToHash -> do - dbref <- evaluate $ DBRef key tvr + dbref <- evaluate $ DBRef key1 tvr w <- mkWeakPtr dbref . Just $ fixToCache dbref - H.insert cache key (CacheElem (Just dbref) w) + H.insert cache1 key1 (CacheElem (Just dbref) w) return $ Just dbref -- !> ("readToCache "++ key) +timeInteger :: IO Integer timeInteger= do TOD t _ <- getClockTime return t @@ -814,7 +813,7 @@ releaseTPVar cache r =do mr <- unsafeIOToSTM $ deRefWeak w case mr of Nothing -> unsafeIOToSTM (finalize w) >> releaseTPVar cache r - Just dbref@(DBRef key tv) -> do + Just dbref@(DBRef _ tv) -> do applyTriggers [dbref] [Just (castErr r)] t <- unsafeIOToSTM timeInteger writeTVar tv . Exist $ Elem (castErr r) t t @@ -841,14 +840,14 @@ delListFromHash cache= mapM_ del where del :: IResource a => a -> STM () del x= do - let key= keyResource x - mr <- unsafeIOToSTM $ H.lookup cache key + let key1= keyResource x + mr <- unsafeIOToSTM $ H.lookup cache key1 case mr of Nothing -> return () Just (CacheElem _ w) -> do - mr <- unsafeIOToSTM $ deRefWeak w - case mr of - Just dbref@(DBRef _ tv) -> + mr1 <- unsafeIOToSTM $ deRefWeak w + case mr1 of + Just (DBRef _ tv) -> writeTVar tv DoNotExist Nothing -> unsafeIOToSTM (finalize w) >> del x @@ -869,14 +868,15 @@ clearSyncCacheProc :: -> (Integer -> Integer-> Integer-> Bool) -- ^ The user-defined check-for-cleanup-from-cache for each object. 'defaultCheck' is an example -> Int -- ^ The max number of objects in the cache, if more, the cleanup starts -> IO ThreadId -- ^ Identifier of the thread created -clearSyncCacheProc time check sizeObjects= forkIO clear +clearSyncCacheProc time check1 sizeObjects= forkIO clear where clear = do threadDelay $ time * 1000000 handle ( \ (e :: SomeException)-> hPutStr stderr (show e) >> clear ) $ do - clearSyncCache check sizeObjects -- !> "CLEAR" + clearSyncCache check1 sizeObjects -- !> "CLEAR" clear +criticalSection :: MVar b -> IO c -> IO c criticalSection mv f= bracket (takeMVar mv) (putMVar mv) @@ -906,6 +906,7 @@ data SyncMode= Synchronous -- ^ sync state to permanent storage when `atomical {-# NOINLINE tvSyncWrite #-} +tvSyncWrite :: IORef (SyncMode, Maybe a) tvSyncWrite= unsafePerformIO $ newIORef (Synchronous, Nothing) -- | Specify the cache synchronization policy with permanent storage. See `SyncMode` for details @@ -916,8 +917,8 @@ syncWrite mode = do case mode of Synchronous -> modeWrite SyncManual -> modeWrite - Asynchronous time check maxsize -> do - th <- void $ clearSyncCacheProc time check maxsize + Asynchronous time check1 maxsize -> do + th <- void $ clearSyncCacheProc time check1 maxsize writeIORef tvSyncWrite (mode, Just th) where modeWrite = writeIORef tvSyncWrite (mode, Nothing) @@ -945,32 +946,32 @@ atomicallySync proc=do -- The deletion depends on the check criteria, expressed by the first parameter. -- 'defaultCheck' is the one implemented to be passed by default. Look at it to understand the clearing criteria. clearSyncCache :: (Integer -> Integer-> Integer-> Bool)-> Int -> IO () -clearSyncCache check sizeObjects= criticalSection saving $ do +clearSyncCache check1 sizeObjects= criticalSection saving $ do (cache,lastSync) <- readIORef refcache t <- timeInteger elems <- H.toList cache - (tosave, elems, size) <- atomically $ extract elems lastSync + (tosave, elems1, size) <- atomically $ extract elems lastSync save tosave - when (size > sizeObjects) $ forkIO (filtercache t cache lastSync elems) >> performGC + when (size > sizeObjects) $ forkIO (filtercache t cache lastSync elems1) >> performGC writeIORef refcache (cache, t) where -- delete elems from the cache according with the checking criteria - filtercache t cache lastSync = mapM_ filter + filtercache t cache lastSync = mapM_ filter1 where - filter (CacheElem Nothing w)= return() --alive because the dbref is being referenced elsewere - filter (CacheElem (Just (DBRef key _)) w) = do + filter1 (CacheElem Nothing _)= return() --alive because the dbref is being referenced elsewere + filter1 (CacheElem (Just (DBRef key1 _)) w) = do mr <- deRefWeak w case mr of Nothing -> finalize w Just (DBRef _ tv) -> atomically $ do r <- readTVar tv case r of - Exist (Elem x lastAccess _ ) -> - when (check t lastAccess lastSync) $ do - unsafeIOToSTM . H.insert cache key $ CacheElem Nothing w + Exist (Elem _ lastAccess _ ) -> + when (check1 t lastAccess lastSync) $ do + unsafeIOToSTM . H.insert cache key1 $ CacheElem Nothing w writeTVar tv NotRead _ -> return() @@ -994,6 +995,7 @@ defaultCheck now lastAccess lastSync halftime= now- (now-lastSync) `div` 2 {-# NOINLINE refConditions #-} +refConditions :: IORef (IO (), IO ()) refConditions= unsafePerformIO $ newIORef (return(), return()) setConditions :: IO() -> IO() -> IO() @@ -1002,8 +1004,10 @@ setConditions :: IO() -> IO() -> IO() setConditions pre post= writeIORef refConditions (pre, post) {-# NOINLINE saving #-} +saving :: MVar Bool saving= unsafePerformIO $ newMVar False +save :: Foldable t => t Filtered -> IO () save tosave = do (pre, post) <- readIORef refConditions pre -- !> (concatMap (\(Filtered x) -> keyResource x)tosave) @@ -1014,6 +1018,7 @@ save tosave = do data Filtered= forall a.(IResource a)=> Filtered a +extract :: [(a, CacheElem)] -> Integer -> STM ([Filtered], [CacheElem], Int) extract elems lastSave= filter1 [] [] (0:: Int) elems where filter1 sav val n []= return (sav, val, n) @@ -1021,16 +1026,16 @@ extract elems lastSave= filter1 [] [] (0:: Int) elems mr <- unsafeIOToSTM $ deRefWeak w case mr of Nothing -> unsafeIOToSTM (finalize w) >> filter1 sav val n rest - Just (DBRef key tvr) -> + Just (DBRef _ tvr) -> let tofilter = case mybe of Just _ -> ch:val Nothing -> val in do r <- readTVar tvr case r of - Exist (Elem r _ modTime) -> + Exist (Elem r1 _ modTime) -> if modTime >= lastSave - then filter1 (Filtered r:sav) tofilter (n+1) rest + then filter1 (Filtered r1:sav) tofilter (n+1) rest else filter1 sav tofilter (n+1) rest -- !> ("rejected->" ++ keyResource r) _ -> filter1 sav tofilter (n+1) rest @@ -1046,7 +1051,7 @@ extract elems lastSave= filter1 [] [] (0:: Int) elems safeIOToSTM :: IO a -> STM a safeIOToSTM req= unsafeIOToSTM $ do tv <- newEmptyMVar - forkIO $ (req >>= putMVar tv . Right) + _ <- forkIO $ (req >>= putMVar tv . Right) `Control.Exception.catch` (\(e :: SomeException) -> putMVar tv $ Left e ) r <- takeMVar tv @@ -1054,5 +1059,3 @@ safeIOToSTM req= unsafeIOToSTM $ do Right x -> return x Left e -> throw e - - diff --git a/Data/TCache/DefaultPersistence.hs b/Data/TCache/DefaultPersistence.hs index f5b4dee..1580582 100644 --- a/Data/TCache/DefaultPersistence.hs +++ b/Data/TCache/DefaultPersistence.hs @@ -1,3 +1,4 @@ +{-# OPTIONS_GHC -fno-warn-orphans #-} {-# LANGUAGE FlexibleInstances, UndecidableInstances, MultiParamTypeClasses, ExistentialQuantification, ScopedTypeVariables #-} @@ -17,18 +18,13 @@ Indexable(..) ,filePersist ,Persist(..)) where -import System.IO.Unsafe import Data.Typeable -import Data.Maybe(fromJust) import Data.TCache.Defs import Data.TCache - - - instance (Typeable a, Indexable a, Serializable a) => IResource a where keyResource = key - writeResource =defWriteResource + writeResource = defWriteResource readResourceByKey = defReadResourceByKey delResource = defDelResource diff --git a/Data/TCache/Defs.hs b/Data/TCache/Defs.hs index c561c01..3873e01 100644 --- a/Data/TCache/Defs.hs +++ b/Data/TCache/Defs.hs @@ -7,16 +7,12 @@ module Data.TCache.Defs where import Data.Typeable import Control.Concurrent.STM(TVar) -import Data.TCache.IResource - import System.IO.Unsafe import Data.IORef import System.Directory -import Control.Monad(when,replicateM) import System.IO import System.IO.Error import Control.Exception as Exception -import Control.Concurrent import Data.List(elemIndices,isInfixOf) import Data.Maybe(fromJust, fromMaybe) @@ -29,16 +25,24 @@ type AccessTime = Integer type ModifTime = Integer -data Status a= NotRead | DoNotExist | Exist a deriving Typeable +data Status a = NotRead | DoNotExist | Exist a deriving Typeable + +data Elem a = Elem !a !AccessTime !ModifTime deriving Typeable -data Elem a= Elem !a !AccessTime !ModifTime deriving Typeable +type TPVar a = TVar (Status(Elem a)) -type TPVar a= TVar (Status(Elem a)) +data DBRef a = DBRef !String !(TPVar a) deriving Typeable -data DBRef a= DBRef !String !(TPVar a) deriving Typeable +instance Show (DBRef a) where + show (DBRef key1 _)= "DBRef \""++ key1 ++ "\"" +instance Eq (DBRef a) where + DBRef k _ == DBRef k' _ = k == k' +instance Ord (DBRef a) where + compare (DBRef k _) (DBRef k' _) = compare k k' +castErr :: (Typeable a1, Typeable a2) => a1 -> a2 castErr a= r where r = fromMaybe (error $ "Type error: " ++ show (typeOf a) ++ " does not match " ++ show (typeOf r) @@ -63,9 +67,9 @@ instance Indexable Car where key Car{cname= n} = \"Car \" ++ n @ -} class Indexable a where - key:: a -> String + key :: a -> String defPath :: a -> String -- ^ additional extension for default file paths. - -- IMPORTANT: defPath must depend on the datatype, not the value (must be constant). Default is ".tcachedata/" + -- IMPORTANT: defPath must depend on the datatype, not the value (must be constant). Default is ".tcachedata/" defPath = const ".tcachedata/" --instance IResource a => Indexable a where @@ -101,7 +105,7 @@ The performance of serialization is not critical. class Serializable a where serialize :: a -> B.ByteString deserialize :: B.ByteString -> a - deserialize= error "No deserialization defined for your data" + deserialize = error "No deserialization defined for your data" deserialKey :: String -> B.ByteString -> a deserialKey _ = deserialize setPersist :: a -> Maybe Persist -- ^ `defaultPersist` if Nothing @@ -126,11 +130,13 @@ data Persist = Persist{ , delete :: Key -> IO()} -- ^ delete -- | Implements default default-persistence of objects in files with their keys as filenames +filePersist :: Persist filePersist = Persist {readByKey= defaultReadByKey ,write = defaultWrite ,delete = defaultDelete} +defaultPersistIORef :: IORef Persist {-# NOINLINE defaultPersistIORef #-} defaultPersistIORef = unsafePerformIO $ newIORef filePersist @@ -138,11 +144,14 @@ defaultPersistIORef = unsafePerformIO $ newIORef filePersist -- @setPersist= const Nothing@. By default it is 'filePersist' -- -- this statement must be the first one before any other TCache call +setDefaultPersist :: Persist -> IO () setDefaultPersist = writeIORef defaultPersistIORef {-# NOINLINE getDefaultPersist #-} +getDefaultPersist :: Persist getDefaultPersist = unsafePerformIO $ readIORef defaultPersistIORef +getPersist :: (Serializable a, Typeable a) => a -> Persist getPersist x= unsafePerformIO $ case setPersist x of Nothing -> readIORef defaultPersistIORef Just p -> return p @@ -173,6 +182,7 @@ defaultReadByKey k= iox -- !> "defaultReadByKey" defaultWrite :: String-> B.ByteString -> IO() defaultWrite = safeWrite +safeWrite :: FilePath -> B.ByteString -> IO () safeWrite filename str= handle handler $ B.writeFile filename str -- !> ("write "++filename) where handler e-- (e :: IOError) @@ -195,7 +205,7 @@ defaultDelete filename = where handler :: String -> IOException -> IO () - handler file e + handler _ e | isDoesNotExistError e= return () --`debug` "isDoesNotExistError" | isAlreadyInUseError e= do hPutStrLn stderr $ "defaultDelResource: busy" ++ " in file: " ++ filename ++ " retrying" @@ -208,6 +218,7 @@ defaultDelete filename = +defReadResourceByKey :: (Indexable a, Serializable a, Typeable a) => String -> IO (Maybe a) defReadResourceByKey k= iox where iox= do let Persist f _ _ = getPersist x @@ -216,20 +227,22 @@ defReadResourceByKey k= iox where file= defPath x ++ k x= undefined `asTypeOf` fromJust (unsafePerformIO iox) +defWriteResource :: (Indexable a, Serializable a, Typeable a) => a -> IO () defWriteResource s= do let Persist _ f _ = getPersist s f (defPath s ++ key s) $ serialize s +defDelResource :: (Indexable a, Serializable a, Typeable a) => a -> IO () defDelResource s= do let Persist _ _ f = getPersist s f $ defPath s ++ key s -- | Strict read from file, needed for default file persistence +readFileStrict :: FilePath -> IO B.ByteString readFileStrict f = openFile f ReadMode >>= \ h -> readIt h `finally` hClose h where readIt h= do s <- hFileSize h let n= fromIntegral s B.hGet h n - diff --git a/Data/TCache/IResource.hs b/Data/TCache/IResource.hs index fd17edd..03d4453 100644 --- a/Data/TCache/IResource.hs +++ b/Data/TCache/IResource.hs @@ -2,16 +2,6 @@ , UndecidableInstances, FlexibleInstances #-} module Data.TCache.IResource where -import Data.Typeable -import System.IO.Unsafe -import Control.Concurrent.STM -import Control.Concurrent -import Control.Exception as Exception -import System.IO -import System.IO.Error -import Control.Monad(when,replicateM) -import Data.List(isInfixOf) - {- | Must be defined for every object to be cached. -} diff --git a/Data/TCache/IndexQuery.hs b/Data/TCache/IndexQuery.hs index 6b55d42..4b2795e 100644 --- a/Data/TCache/IndexQuery.hs +++ b/Data/TCache/IndexQuery.hs @@ -91,11 +91,8 @@ import Data.TCache import Data.TCache.Defs import Data.List import Data.Typeable -import Control.Concurrent.STM import Data.Maybe (catMaybes, fromMaybe) import qualified Data.Map as M -import Data.IORef -import System.IO.Unsafe import Data.ByteString.Lazy.Char8(pack, unpack) @@ -128,19 +125,20 @@ instance (IResource reg, Typeable reg, Ord a, Read a) instance (Queriable reg a) => Serializable (Index reg a) where serialize= pack . show deserialize= read . unpack - setPersist index= persistIndex $ getType index + setPersist index1= persistIndex $ getType index1 where getType :: Index reg a -> reg getType= undefined -- type level +keyIndex :: (Show a1, Show a2) => a1 -> a2 -> String keyIndex treg tv= "index-" ++ show treg ++ show tv instance (Typeable reg, Typeable a) => Indexable (Index reg a) where - key map= keyIndex typeofreg typeofa + key map1= keyIndex typeofreg typeofa where - [typeofreg, typeofa]= typeRepArgs $! typeOf map + [typeofreg, typeofa]= typeRepArgs $! typeOf map1 -- defPath index= defPath $ ofRegister index -- where -- ofRegister :: Index reg a -> reg @@ -164,18 +162,18 @@ getIndexr :: (Queriable reg a) getIndexr rindex val= do mindex <- readDBRef rindex - let index = case mindex of Just (Index index) -> index; _ -> M.empty - let dbrefs= fromMaybe [] (M.lookup val index) - return (rindex, Index index, dbrefs) + let index1 = case mindex of Just (Index index2) -> index2; _ -> M.empty + let dbrefs= fromMaybe [] (M.lookup val index1) + return (rindex, Index index1, dbrefs) selectorIndex :: (Queriable reg a, IResource reg ) => (reg -> a) -> DBRef (Index reg a) -> DBRef reg -> Maybe reg -> STM () -selectorIndex selector rindex pobject mobj = do +selectorIndex selector rindex1 pobject mobj1 = do moldobj <- readDBRef pobject - choice moldobj mobj + choice moldobj mobj1 where choice moldobj mobj= case (moldobj, mobj) of @@ -189,34 +187,37 @@ selectorIndex selector rindex pobject mobj = do (Just oldobj, Nothing) -> do -- delete the old selector value from the index let val= selector oldobj - (rindex,Index index, dbrefs) <- getIndexr rindex val + (rindex,Index index2, dbrefs) <- getIndexr rindex1 val let dbrefs'= Data.List.delete pobject dbrefs - writeDBRef rindex $ Index (M.insert val dbrefs' index) + writeDBRef rindex $ Index (M.insert val dbrefs' index2) (Nothing, Just obj) -> do -- add the new value to the index let val= selector obj - (rindex,Index index, dbrefs) <- getIndexr rindex val + (rindex,Index index2, dbrefs) <- getIndexr rindex1 val let dbrefs'= nub $ Data.List.insert pobject dbrefs - writeDBRef rindex $ Index (M.insert val dbrefs' index) + writeDBRef rindex $ Index (M.insert val dbrefs' index2) {- | Register a trigger for indexing the values of the field passed as parameter. the indexed field can be used to perform relational-like searches -} -index - :: (Queriable reg a) => - (reg -> a) -> IO () +index :: (Queriable reg a) => (reg -> a) -> IO () index sel= do let [one, two]= typeRepArgs $! typeOf sel rindex= getDBRef $! keyIndex one two addTrigger $ selectorIndex sel rindex let proto= Index M.empty `asTypeOf` indexsel sel - withResources [proto] $ init proto + withResources [proto] $ init1 proto where - init proto [Nothing] = [proto] - init _ [Just _] = [] + init1 proto [Nothing] = [proto] + init1 _ [Just _] = [] + init1 _ (Nothing:_:_) = error "this will never happen(?)" + init1 _ (Just _:_:_) = error "this will never happen(?)" + init1 _ [] = error "this will never happen(?)" + indexsel :: (reg-> a) -> Index reg a indexsel= undefined + -- | implement the relational-like operators, operating on record fields class RelationOps field1 field2 res | field1 field2 -> res where (.==.) :: field1 -> field2 -> STM res @@ -246,9 +247,9 @@ join op field1 field2 =do return $ mix idxs idxs' where opv (v, _ )(v', _)= v `op` v' - mix xs ys= - let zlist= [(x,y) | x <- xs , y <- ys, x `opv` y] - in map ( \(( _, xs),(_ ,ys)) ->(xs,ys)) zlist + mix xs1 ys1 = + let zlist= [(x,y) | x <- xs1 , y <- ys1, x `opv` y] + in map ( \(( _, xs2),(_ ,ys2)) ->(xs2, ys2)) zlist type JoinData reg reg'=[([DBRef reg],[DBRef reg'])] @@ -315,15 +316,15 @@ indexOf selector= do let rindex= getDBRef $! keyIndex one two mindex <- readDBRef rindex case mindex of - Just (Index index) -> return $ M.toList index; + Just (Index index1) -> return $ M.toList index1; _ -> do let fields= show $ typeOf selector error $ "the index for "++ fields ++" do not exist. At main, use \"Data.TCache.IndexQuery.index\" to start indexing this field" retrieve :: Queriable reg a => (reg -> a) -> a -> (a -> a -> Bool) -> STM[DBRef reg] retrieve field value op= do - index <- indexOf field - let higuer = map (\(v, vals) -> if op v value then vals else []) index + index1 <- indexOf field + let higuer = map (\(v, vals) -> if op v value then vals else []) index1 return $ concat higuer -- from a Query result, return the records, rather than the references diff --git a/Data/TCache/IndexText.hs b/Data/TCache/IndexText.hs index b950e43..7c411ed 100644 --- a/Data/TCache/IndexText.hs +++ b/Data/TCache/IndexText.hs @@ -68,22 +68,20 @@ import Data.Bits import System.Mem.StableName import Data.List((\\)) import GHC.Conc(unsafeIOToSTM) -import Control.Concurrent(forkIO, threadDelay) import Data.Char import Data.ByteString.Lazy.Char8(pack, unpack) import Control.Monad -import System.IO.Unsafe --import Debug.Trace --(!>)= flip trace -data IndexText= IndexText - { fieldType :: !String - , lastDoc :: Int - , mapDocKeyInt :: M.Map String Int - , mapIntDocKey :: M.Map Int String - , mapTextInteger :: M.Map T.Text Integer - } deriving (Typeable) +data IndexText = IndexText + !String -- fieldType + Int -- lastDoc + (M.Map String Int) -- mapDocKeyInt + (M.Map Int String) -- mapIntDocKey + (M.Map T.Text Integer) -- mapTextInteger + deriving (Typeable) instance Show IndexText where @@ -106,46 +104,56 @@ instance IResource IndexText where readResourceByKey = defReadResourceByKey delResource = defDelResource +{- readInitDBRef v x= do mv <- readDBRef x case mv of Nothing -> writeDBRef x v >> return v Just v -> return v +-} + +add :: DBRef IndexText -> String -> String -> [T.Text] -> STM () +add ref t key1 w = op ref t setBit w key1 -add ref t key w = op ref t setBit w key -del ref t key w = op ref t clearBit w key +del :: DBRef IndexText -> String -> String -> [T.Text] -> STM () +del ref t key1 w = op ref t clearBit w key1 -op refIndex t set ws key = do +op :: DBRef IndexText -> String -> (Integer -> Int -> Integer) -> [T.Text] -> String -> STM () +op refIndex t set ws1 key1 = do mindex <- readDBRef refIndex - let mindex'= process mindex ws + let mindex'= process mindex ws1 writeDBRef refIndex $ fromJust mindex' where process mindex []= mindex - process mindex (w:ws)= + process mindex (w:ws) = case mindex of - Nothing -> process (Just $ IndexText t 0 (M.singleton key 0) (M.singleton 0 key) (M.singleton w 1)) ws - Just (IndexText t n mapSI mapIS map) -> do - let (docLocation,n', mapSI',mapIS')= case M.lookup key mapSI of - Nothing -> let n'= n+1 in (n', n' - , M.insert key n' mapSI - , M.insert n' key mapIS) -- new Document + Nothing -> process (Just $ IndexText t 0 (M.singleton key1 0) (M.singleton 0 key1) (M.singleton w 1)) ws + Just (IndexText _ n mapSI mapIS map1) -> do + let (docLocation, n1, mapSI',mapIS')= case M.lookup key1 mapSI of + Nothing -> let n2= n+1 in (n2, n2 + , M.insert key1 n2 mapSI + , M.insert n2 key1 mapIS) -- new Document Just m -> (m,n, mapSI,mapIS) -- already indexed document - case M.lookup w map of + case M.lookup w map1 of Nothing -> --new word - process (Just $ IndexText t n' mapSI' mapIS' (M.insert w (set 0 docLocation) map)) ws + process (Just $ IndexText t n1 mapSI' mapIS' (M.insert w (set 0 docLocation) map1)) ws Just integer -> -- word already indexed - process (Just $ IndexText t n' mapSI' mapIS' $ M.insert w (set integer docLocation) map) ws + process (Just $ IndexText t n1 mapSI' mapIS' $ M.insert w (set integer docLocation) map1) ws +addProto :: Typeable a => a -> IO () addProto sel = do let [t1,t2]= typeRepArgs $! typeOf sel let t = show t1 ++ show t2 let proto = IndexText t 0 M.empty M.empty M.empty - withResources [proto] $ init proto + withResources [proto] $ init' proto where - init proto [Nothing] = [proto] - init _ [Just _] = [] + init' proto [Nothing] = [proto] + init' _ [Just _] = [] + init' _ [] = error "this will never happen(?)" + init' _ (Nothing:_:_) = error "this will never happen(?)" + init' _ (Just _:_:_) = error "this will never happen(?)" -- | start a trigger to index the contents of a register field indexText @@ -171,7 +179,7 @@ indext :: (IResource a, Typeable a,Typeable b) => (a -> b) -> (b -> [T.Text]) -> DBRef a -> Maybe a -> STM() indext sel convert dbref mreg = f1 -- unsafeIOToSTM $! f where - f = void $ forkIO (atomically f1) + {-f = void $ forkIO (atomically f1)-} f1 = do moldreg <- readDBRef dbref case (moldreg, mreg) of @@ -183,13 +191,14 @@ indext sel convert dbref mreg = f1 -- unsafeIOToSTM $! f if st == st' then return () else do - let key = keyResource reg + let key1 = keyResource reg let wrds = convert $ sel oldreg let wrds' = convert $ sel reg let new = wrds' \\ wrds let old = wrds \\ wrds' - unless (null old) $ del refIndex t key old - unless (null new) $ add refIndex t key new + unless (null old) $ del refIndex t key1 old + unless (null new) $ add refIndex t key1 new + (Nothing, Nothing) -> error "this will never happen(?)" where [t1, t2] = typeRepArgs $! typeOf sel t = show t1 ++ show t2 @@ -198,6 +207,7 @@ indext sel convert dbref mreg = f1 -- unsafeIOToSTM $! f u = undefined -- avoid duplicate code +targs :: Typeable a => a -> STM (Maybe IndexText) targs sel = do let [t1, t2]= typeRepArgs $! typeOf sel let t= show t1 ++ show t2 @@ -216,15 +226,15 @@ containsElem sel wstr = do error $ "the index for " ++ fields ++ " do not exist. At main, use \"Data.TCache.IndexQuery.index\" to start indexing this field" - Just (IndexText t n _ mmapIntString map1) -> + Just (IndexText _ n _ mmapIntString map1) -> case M.lookup w map1 of Nothing -> return [] Just integer -> do let mns = map - (\n -> - if testBit integer n - then Just n + (\i -> + if testBit integer i + then Just i else Nothing) [0 .. n] let wordsr = mapMaybe (`M.lookup` mmapIntString) $ catMaybes mns @@ -236,9 +246,10 @@ allElemsOf sel = do mr <- targs sel case mr of Nothing -> return [] - Just (IndexText t n _ _ map) -> return $ M.keys map + Just (IndexText _ _ _ _ map') -> return $ M.keys map' -words1= filter filterWordt {-( (<) 2 . T.length)-} . T.split (\c -> isSeparator c || c=='\n' || isPunctuation c ) +words1 :: T.Text -> [T.Text] +words1 = filter filterWordt {-( (<) 2 . T.length)-} . T.split (\c -> isSeparator c || c=='\n' || isPunctuation c ) -- | return the DBRefs whose fields include all the words in the requested text contents.Except the -- words with less than three characters that are not digits or uppercase, that are filtered out before making the query @@ -254,5 +265,8 @@ contains sel str= case words str of let rs = map (containsElem sel) $ filter filterWord ws foldl1 (.&&.) rs -filterWordt w= T.length w >2 || any (\c -> isUpper c || isDigit c) (T.unpack w) -filterWord w= length w >2 || any (\c -> isUpper c || isDigit c) w +filterWordt :: T.Text -> Bool +filterWordt w = T.length w >2 || any (\c -> isUpper c || isDigit c) (T.unpack w) + +filterWord :: Foldable t => t Char -> Bool +filterWord w = length w >2 || any (\c -> isUpper c || isDigit c) w diff --git a/Data/TCache/Memoization.hs b/Data/TCache/Memoization.hs index b7cfc57..9930139 100644 --- a/Data/TCache/Memoization.hs +++ b/Data/TCache/Memoization.hs @@ -11,6 +11,7 @@ -- | -- ----------------------------------------------------------------------------- +{-# OPTIONS_GHC -fno-warn-orphans -fno-warn-missing-signatures #-} {-# LANGUAGE DeriveDataTypeable , ExistentialQuantification , FlexibleInstances @@ -21,7 +22,6 @@ where import Data.Typeable import Data.TCache import Data.TCache.Defs(Indexable(..)) -import System.Mem.StableName import System.IO.Unsafe import System.Time import Data.Maybe(fromJust) @@ -39,11 +39,12 @@ context = unsafePerformIO newContext -- | given a string, return a key that can be used in Indexable instances -- Of non persistent objects, such are cached objects (it changes fron execution to execution) -- . It uses `addrHash` +addrStr :: a -> String addrStr x= "addr" ++ show hash where hash = case unsafePerformIO $ addrHash context x of - Right x -> x - Left x -> x + Right x1 -> x1 + Left x1 -> x1 -- | to execute a monad for the purpose of memoizing its result class Executable m where @@ -52,7 +53,7 @@ class Executable m where instance Executable IO where execute m = unsafePerformIO $! f1 m "" where - f1 m x= m + f1 m1 _= m1 instance Executable Identity where execute (Identity x)= x @@ -61,14 +62,15 @@ instance MonadIO Identity where liftIO f= Identity $! unsafePerformIO $! f +cachedKeyPrefix :: String cachedKeyPrefix = "cached" instance (Indexable a) => IResource (Cached a b) where - keyResource ch@(Cached a _ _ _)= cachedKeyPrefix ++ key a -- ++ unsafePerformIO (addrStr f ) + keyResource (Cached a _ _ _)= cachedKeyPrefix ++ key a -- ++ unsafePerformIO (addrStr f ) writeResource _= return () delResource _= return () - readResourceByKey k= return Nothing -- error $ "access By key is undefined for cached objects.key= " ++ k + readResourceByKey _= return Nothing -- error $ "access By key is undefined for cached objects.key= " ++ k readResource (Cached a f _ _)=do @@ -101,18 +103,19 @@ writeCached a b c d= cached :: (Indexable a,Typeable a, Typeable b, Executable m,MonadIO m) => Int -> (a -> m b) -> a -> m b cached time f a= liftIO . atomically $ cachedSTM time f a +cachedSTM :: (Typeable a, Typeable b, Executable m, Indexable a, Integral p) => p -> (a -> m b) -> a -> STM b cachedSTM time f a= do let prot= Cached a f undefined undefined let ref= getDBRef $ keyResource prot - cho@(Cached _ _ b t) <- readDBRef ref `onNothing` fillIt ref prot + (Cached _ _ b t) <- readDBRef ref `onNothing` fillIt ref prot case time of 0 -> return b _ -> do TOD tnow _ <- unsafeIOToSTM getClockTime if tnow - t >= fromIntegral time then do - Cached _ _ b _ <- fillIt ref prot - return b + Cached _ _ b1 _ <- fillIt ref prot + return b1 else return b where -- has been invalidated by flushCached @@ -126,10 +129,10 @@ cachedSTM time f a= do -- The Int parameter is the timeout, in second after the last evaluation, after which the cached value will be discarded and the expression will be evaluated again if demanded -- . Time == 0 means no timeout cachedByKey :: (Typeable a, Executable m,MonadIO m) => String -> Int -> m a -> m a -cachedByKey key time f = cached time (const f) key +cachedByKey key1 time f = cached time (const f) key1 cachedByKeySTM :: (Typeable a, Executable m) => String -> Int -> m a -> STM a -cachedByKeySTM key time f = cachedSTM time (const f) key +cachedByKeySTM key1 time f = cachedSTM time (const f) key1 -- Flush the cached object indexed by the key flushCached :: String -> IO () diff --git a/Data/TCache/Triggers.hs b/Data/TCache/Triggers.hs index bbfa5bc..6953266 100644 --- a/Data/TCache/Triggers.hs +++ b/Data/TCache/Triggers.hs @@ -8,11 +8,10 @@ import Data.IORef import System.IO.Unsafe import Unsafe.Coerce import GHC.Conc (STM, unsafeIOToSTM) -import Data.Maybe(maybeToList, catMaybes, fromMaybe, fromJust) +import Data.Maybe(fromMaybe, fromJust) import Data.List(nubBy) -import Control.Concurrent.STM -import Debug.Trace +--import Debug.Trace newtype TriggerType a= TriggerType (DBRef a -> Maybe a -> STM()) deriving Typeable @@ -35,25 +34,26 @@ If the DBRef is being deleted, the second parameter is 'Nothing'. if the DBRef contains Nothing, then the object is being created -} addTrigger :: (IResource a, Typeable a) => (DBRef a -> Maybe a -> STM()) -> IO() -addTrigger t= do - map <- readIORef cmtriggers +addTrigger tr = do + map' <- readIORef cmtriggers writeIORef cmtriggers $ - let ts = mbToList $ lookup atype map - in nubByType $ (atype ,CMTrigger t : ts) : map + let ts = mbToList $ lookup atype map' + in nubByType $ (atype ,CMTrigger tr : ts) : map' where nubByType= nubBy (\(t,_)(t',_) -> t==t') - (_,atype:_)= splitTyConApp . typeOf $ TriggerType t + (_,atype:_)= splitTyConApp . typeOf $ TriggerType tr +mbToList :: Maybe [a] -> [a] mbToList = fromMaybe [] -- | internally called when a DBRef is modified/deleted/created applyTriggers:: (IResource a, Typeable a) => [DBRef a] -> [Maybe a] -> STM() applyTriggers [] _ = return() applyTriggers dbrfs mas = do - map <- unsafeIOToSTM $ readIORef cmtriggers - let ts = mbToList $ lookup (typeOf $ fromJust (head mas)) map + map' <- unsafeIOToSTM $ readIORef cmtriggers + let ts = mbToList $ lookup (typeOf $ fromJust (head mas)) map' mapM_ f ts where @@ -64,7 +64,7 @@ applyTriggers dbrfs mas = do -mapM2_ _ [] _= return() +mapM2_ :: Monad m => (t1 -> t2 -> m a) -> [t1] -> [t2] -> m () +mapM2_ _ [] _ = return() +mapM2_ _ _ [] = return() mapM2_ f (x:xs) (y:ys)= f x y >> mapM2_ f xs ys - - diff --git a/TCache.cabal b/TCache.cabal index 505cba2..6316eb0 100644 --- a/TCache.cabal +++ b/TCache.cabal @@ -1,85 +1,93 @@ -name: TCache -version: 0.12.0 -cabal-version: >= 1.6 -build-type: Simple -license: BSD3 -license-file: LICENSE -maintainer: agocorona@gmail.com -synopsis: A Transactional cache with user-defined persistence -description: TCache is a transactional cache with configurable persistence. It allows conventional - STM transactions for objects that synchronize with their user-defined storages. - State in memory and into permanent storage is transactionally coherent. - . - It has interface defined for Amazon WS and Yesod Persistent backends defined in tcache-\ - packages. - Persistent is a multi-backend interface for SQL and non SQL databases such in Mongo-db - . - The package implements serializable STM references, access by key and by record field value, triggers, - full text and field indexation, default serialization and a query language based on record fields - . - Since the STM references can be included in data structures and serialized, this is right - for graph databases and other NoSQL databases. - . - 0.12.0 space in index data in indexQuery.hs and IndexText.hs triggered errors in the AWS backend. - The space has been changed by \'-\'. So rename the "index *" files in the TCache folder - in order to be recognized. - . - 0.11.0.0 added setIndexParsist to define persistence for indexes by type. started the addition of readDBRefs, readResources and so on for simultaneous read, writes and deletes of - objects of the same type. - . - 0.10.2.0 Added setDefaultPersist and modified the signature of setPersist in Data.TCache.DefaultPersistence. - Fixed issues with ghc 7.6.3 - . - 0.10 version add memoization and a persistent and transactional collection/queue. - . - 0.10.0.8 subversion add cachedByKeySTM - . - 0.10.0.9 fixed an error in clearSyncCacheProc and SynWrite Asynchronous that checked the cache continuously - . - See "Data.TCache" for details - . - In this release: - . - Dropped Data.Hashtable (deprecated). Now it uses the package hashtables +cabal-version: 1.12 +-- This file has been generated from package.yaml by hpack version 0.31.1. +-- +-- see: https://github.com/sol/hpack +-- +-- hash: 27ba4712998e8aa6b478c4583801336d311f66632b4a2fbb36f9b29c91b160ff -category: Data, Database -author: Alberto Gómez Corona -tested-with: GHC ==7.0.3 -data-dir: "" -extra-source-files: demos/DBRef.hs - demos/IndexQuery.hs - demos/IndexText.hs - demos/basicSample.hs - demos/caching.hs - demos/triggerRelational.lhs - demos/memoization.hs - demos/DBRef.hs +name: TCache +version: 0.13.0.0 +synopsis: A Transactional cache with user-defined persistence +description: Please see the README on GitHub at +category: Data, Database +homepage: https://github.com/agocorona/TCache#readme +bug-reports: https://github.com/agocorona/TCache/issues +author: Alberto Gómez Corona +maintainer: agocorona@gmail.com +copyright: 2019 Alberto Gómez Corona +license: BSD3 +license-file: LICENSE +build-type: Simple +extra-source-files: + ChangeLog.md + README.md + demos/basicSample.hs + demos/caching.hs + demos/DBRef.hs + demos/DynamicSample.hs + demos/indexQuery.hs + demos/indexText.hs + demos/memoization.hs + demos/pr.hs + demos/pushpop.hs + demos/testnewdbref.hs + demos/triggerRelational.lhs + demos/weakTest.hs + Data/Persistent/IDynamic.hs source-repository head - type: git - location: https://github.com/agocorona/TCache + type: git + location: https://github.com/agocorona/TCache library - build-depends: base >=4 && <5, bytestring -any, - containers >=0.1.0.1, directory >=1.0, old-time >=1.0, - stm -any, text -any, mtl -any, hashtables, hashable, - RefSerialize -any - - - - exposed-modules: Data.TCache Data.TCache.DefaultPersistence, - Data.TCache.Defs Data.TCache.IResource Data.TCache.IndexQuery - Data.TCache.IndexText Data.TCache.Memoization Data.TCache.Triggers - Data.Persistent.Collection - Data.Persistent.IDynamic - - - - exposed: True - buildable: True - extensions: UndecidableInstances - ScopedTypeVariables DeriveDataTypeable - hs-source-dirs: . - other-modules: + exposed-modules: + Data.TCache + Data.TCache.DefaultPersistence + Data.TCache.Defs + Data.TCache.IResource + Data.TCache.IndexQuery + Data.TCache.IndexText + Data.TCache.Memoization + Data.TCache.Triggers + Data.Persistent.Collection + Data.Persistent.IDynamic + other-modules: + Paths_TCache + hs-source-dirs: + ./. + ghc-options: -Wall -Wcompat -Widentities + build-depends: + RefSerialize + , base >=4.7 && <5 + , bytestring + , containers >=0.1.0.1 + , directory >=1.0 + , hashtables + , mtl + , old-time >=1.0 + , stm + , text + default-language: Haskell2010 +test-suite caching + type: exitcode-stdio-1.0 + main-is: caching.hs + other-modules: + Paths_TCache + hs-source-dirs: + test/caching + ghc-options: -Wall -Wcompat -Widentities -threaded -rtsopts -with-rtsopts=-N + build-depends: + RefSerialize + , TCache + , base >=4.7 && <5 + , bytestring + , containers >=0.1.0.1 + , directory >=1.0 + , hashtables + , mtl + , old-time >=1.0 + , stm + , text + default-language: Haskell2010 diff --git a/demos/DBRef.hs b/demos/DBRef.hs index 584d779..b4f773b 100644 --- a/demos/DBRef.hs +++ b/demos/DBRef.hs @@ -3,40 +3,53 @@ module Main where import Data.TCache import Data.TCache.DefaultPersistence import Data.ByteString.Lazy.Char8(pack,unpack) -import GHC.Conc import System.IO.Unsafe import Data.Typeable -import Debug.Trace +{- +-- would create orphan instances +instance (Read a, Show a) => Serializable a where + serialize = pack . show + deserialize = read . unpack +-} +-- An Employee +data Emp = Emp + { ename :: String + , salary :: Float + } deriving (Read, Show, Typeable) +instance Serializable Emp where + serialize = pack . show + deserialize = read . unpack -newtype Other= Other String deriving (Read, Show) +instance Indexable Emp where + key Emp { ename = name } = name -data Company = Company { - cname :: String - ,personnel :: [DBRef Emp] - ,other :: Other} - deriving (Read, Show,Typeable) +-- For illustration +newtype Other = Other String deriving (Read, Show) +-- A Company +data Company = Company + { cname :: String + , personnel :: [DBRef Emp] + , other :: Other + } deriving (Read, Show, Typeable) -data Emp= Emp{ename :: String, salary :: Float} deriving (Read, Show, Typeable) +instance Serializable Company where + serialize = pack . show + deserialize = read . unpack instance Indexable Company where - key Company{cname=name}= name - -instance (Read a, Show a) => Serializable a where - serialize= pack . show - deserialize= read . unpack - -instance Indexable Emp where - key Emp{ename= name}= name - + key Company{ cname = name } = name -myCompanyName= "mycompany" +myCompanyName :: String +myCompanyName = "mycompany" +-- Creating a Company from scratch {-# NOINLINE myCompanyRef #-} -myCompanyRef= unsafePerformIO . atomically $ do +myCompanyRef :: DBRef Company +myCompanyRef = unsafePerformIO . atomically $ do refEmp1 <- newDBRef Emp{ename= "Emp1", salary= 34000} refEmp2 <- newDBRef Emp{ename= "Emp2", salary= 35000} @@ -49,18 +62,16 @@ myCompanyRef= unsafePerformIO . atomically $ do ,personnel= [refEmp1, refEmp2, refEmp3, refEmp4] ,other= Other "blah blah blah"} - -- myCompany= Company myCompanyName [getDBRef "Emp1",getDBRef "Emp2",getDBRef "Emp3"] - - -increaseSalaries percent= do +increaseSalaries :: Float -> STM () +increaseSalaries percent1 = do mycompany' <- readDBRef myCompanyRef mycompany <- case mycompany' of Just x -> pure x Nothing -> error "Boom" - mapM_ (increase percent ) $ personnel mycompany + mapM_ (increase percent1 ) $ personnel mycompany where increase percent ref= do emp' <- readDBRef ref @@ -72,21 +83,24 @@ increaseSalaries percent= do where factor= 1+ percent/ 100 -printSalaries ref= do - Just comp <- atomically $ readDBRef ref +printSalaries :: DBRef Company -> IO () +printSalaries ref1 = do + Just comp <- atomically $ readDBRef ref1 mapM_ printSalary $ personnel comp where printSalary ref= atomically (readDBRef ref) >>= print +putMsg :: String -> IO () putMsg msg= putStrLn $ ">>" ++ msg -main= do +main :: IO () +main = do putMsg "DBRefs are cached indexable, serializable, unique-by-key references to objects stored in the cache, mutable under STM transactions" putMsg "DBRef's are instances of Show" print myCompanyRef - let myCompanyRef2= read $ show myCompanyRef :: DBRef Company + let myCompanyRef2 = read $ show myCompanyRef :: DBRef Company putMsg "DBRefs are identified by the key of the referenced object" putMsg "DBRef's are alse instances of read" @@ -105,7 +119,7 @@ main= do putMsg "after the increase" printSalaries myCompanyRef2 - let emp3ref= getDBRef "Emp3" + let emp3ref = getDBRef "Emp3" putMsg "tch tch, this bad boy does not deserve his salary" Just emp3 <- atomically $ readDBRef emp3ref print emp3 @@ -117,17 +131,13 @@ main= do putStrLn "checking race condition on cache cleaning" - let emp1= Emp{ename="Emp1", salary= -1} - let key= keyResource emp1 - let remp1 = getDBRef key - Just emp1 <- atomically $ readDBRef remp1 + let emp1 = Emp{ename="Emp1", salary= -1} + let key1 = keyResource emp1 + let remp1 = getDBRef key1 + Just emp1' <- atomically $ readDBRef remp1 atomically $ flushDBRef remp1 - let remp1'= getDBRef key - atomically $ writeDBRef remp1' $ emp1{salary=0} + let remp1' = getDBRef key1 + atomically $ writeDBRef remp1' $ emp1'{salary=0} putStrLn "must reflect the salary 0 for emp1" printSalaries myCompanyRef2 - - - - diff --git a/demos/DynamicSample.hs b/demos/DynamicSample.hs index fe877a0..d924cce 100644 --- a/demos/DynamicSample.hs +++ b/demos/DynamicSample.hs @@ -12,49 +12,58 @@ example of IDynamic usage. -} -- Very simple data: --- Two objects with two different datatypes: Int and String2 --- Notice: We need a newtype String for this example because of the special treatment of the key --- while TCache.Defs already defines the String itself (id) as key. +-- Two objects with two different datatypes: MyInt and MyString +-- We use newtypes so we don't need to create orphan instances -newtype String2 = String2 { fromString2 :: String } deriving ( Eq, Show, Typeable, Read ) +newtype MyInt = MyInt { fromMyInt :: Int } deriving ( Eq, Show, Typeable, Read ) +newtype MyString = MyString { fromMyString :: String } deriving ( Eq, Show, Typeable, Read ) -instance Indexable String2 where +instance Indexable MyString where -- making the key 2 chars wide - key x= take 2 $ fromString2 x + key x = take 2 $ fromMyString x -instance (Read a, Show a) => Serializable a where +instance Indexable MyInt where + -- just use the string representation as key here + key = show + +instance Serializable MyString where + serialize = pack . show + deserialize = read . unpack + +instance Serializable MyInt where serialize = pack . show deserialize = read . unpack +main :: IO () main= do putStrLn "see the code to know the meaning of he results" -- NOTE: registerType no longer needed - let x= 1:: Int + let x = MyInt 1 -- now *Resources primitives support different datatypes -- without the need of Data.Dynamic withResources [] $ const [x] - withResources [] $ const [String2 "hola"] --resources creation + withResources [] $ const [MyString "hola"] --resources creation syncCache - res <- getResource x - print res + res1 <- getResource x + print res1 - res <- getResource $ String2 "ho" - print res + res2 <- getResource $ MyString "ho" + print res2 -- to use heterogeneous data in the same transaction, -- use DBRef's: s <- atomically $ do - let refInt = getDBRef $ key x :: DBRef Int - refString = getDBRef $ key (String2 "ho") :: DBRef String2 + let refInt = getDBRef $ key x :: DBRef MyInt + refString = getDBRef $ key (MyString "ho") :: DBRef MyString i <- readDBRef refInt - writeDBRef refString $ String2 $ "hola, the retrieved value of x is " ++ show i + writeDBRef refString $ MyString $ "hola, the retrieved value of x is " ++ show i readDBRef refString print s @@ -62,5 +71,3 @@ main= do -- however, retrieval of data with the incorrect type will generate an exception: syncCache - - diff --git a/demos/basicSample.hs b/demos/basicSample.hs index 5e04458..51d1805 100644 --- a/demos/basicSample.hs +++ b/demos/basicSample.hs @@ -10,6 +10,7 @@ import Data.Foldable (for_) import Data.Typeable import Debug.Trace +debug :: a -> String -> a debug a b= trace b a -- The data elements to be used in the example: A user will repeatedly buy Items. @@ -21,7 +22,10 @@ data Data= User{uname::String, uid::String, spent:: Int} | -- defining prototypes to make missing-fields warning useful again +user_ :: Data user_ = User{uname = undefined, uid = undefined, spent = undefined } + +item_ :: Data item_ = Item{iname = undefined, iid = undefined, price = undefined, stock = undefined } -- The mappings between the cache and the physical storage are defined by the interface IResource @@ -36,12 +40,12 @@ item_ = Item{iname = undefined, iid = undefined, price = undefined, stock = unde instance Indexable Data where - key User{uid=id}= id - key Item{iid=id}= id + key User { uid=id' } = id' + key Item { iid=id' } = id' instance Serializable Data where - serialize= pack . show - deserialize= read . unpack + serialize = pack . show + deserialize = read . unpack @@ -54,6 +58,7 @@ instance Serializable Data where -- data objects that are updated in the cache. buyIt is executed atomically. +buy :: Data -> Data -> IO () user `buy` item = withResources [user, item] buyIt where buyIt[Just us, Just it] @@ -68,13 +73,14 @@ user `buy` item = withResources [user, item] buyIt buyIt _ = error "either the user or the item does not exist" -main= do +main :: IO () +main = do -- create resources (access no resources and return two new Data objects defined in items) withResources [] prepareItems -- 11 PCs are charged to the John´s account in parallel, to show transactionality -- because there are only 10 PCs in stock, the last thread must return an error - for_ [1..11] $ const $ forkIO $ user_{ uid = "U12345" } `buy` item_{ iid = "I54321" } + for_ [(1::Int)..11] $ const $ forkIO $ user_{ uid = "U12345" } `buy` item_{ iid = "I54321" } -- wait a second (to let the forked io finish) threadDelay 1000000 diff --git a/demos/caching.hs b/demos/caching.hs index b2e2875..e5efd80 100644 --- a/demos/caching.hs +++ b/demos/caching.hs @@ -11,6 +11,8 @@ import Data.ByteString.Lazy.Char8(pack,unpack) import Control.Concurrent import Debug.Trace import Data.Typeable + +debug :: a -> String -> a debug a b= trace b a -- The data elements to be used in the example @@ -28,11 +30,12 @@ instance Serializable Data where deserialize= read . unpack +printStat :: (Show a1, Show a2, Show a3) => (a1, a2, a3) -> IO () printStat (total, dirty, loaded) = putStrLn $ "total: " ++ show total ++ " dirty: " ++ show dirty ++ " loaded: " ++ show loaded -main= do - +main :: IO () +main = do putStrLn "See the source code of this example!" putStrLn "" putStrLn "This program tests the caching, cleaning, re-retrieval and updating of the cache." @@ -47,12 +50,12 @@ main= do -- get stats about them (total, dirty, loaded) statElems >>= printStat - x <- getResources [Data i 0 | i <- [1..200]] - putStrLn $ "Last element: " ++ show (last x) + x1 <- getResources [Data i 0 | i <- [1..200]] + putStrLn $ "Last element: " ++ show (last x1) putStrLn "" putStrLn $ "Starting the async proc with folder: " ++ defPath ( undefined :: Data) - clearSyncCacheProc 10 defaultCheck 100 + _ <- clearSyncCacheProc 10 defaultCheck 100 threadDelay 6000000 putStrLn "after 6 seconds" @@ -84,9 +87,8 @@ main= do putStrLn "accessing all entries once and print the last" -- I read (access) all the data here! - x <- getResources [Data i 1 | i <- [1..200]] - print $ last x - + x2 <- getResources [Data i 1 | i <- [1..200]] + print $ last x2 threadDelay 5000000 diff --git a/demos/indexQuery.hs b/demos/indexQuery.hs index d089cbc..ef56ad4 100644 --- a/demos/indexQuery.hs +++ b/demos/indexQuery.hs @@ -4,11 +4,8 @@ import Data.TCache import Data.TCache.IndexQuery import Data.TCache.DefaultPersistence import Data.ByteString.Lazy.Char8(pack,unpack) -import Debug.Trace - import Data.Typeable - data Person= Person {pname :: String, age :: Int} deriving (Show, Read, Eq, Typeable) data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, Typeable) @@ -16,10 +13,15 @@ data Car= Car{owner :: DBRef Person , cname:: String} deriving (Show, Read, Eq, instance Indexable Person where key Person{pname= n} = "Person " ++ n instance Indexable Car where key Car{cname= n} = "Car " ++ n -instance (Read a, Show a) => Serializable a where - serialize= pack . show - deserialize= read . unpack +instance Serializable Person where + serialize = pack . show + deserialize = read . unpack +instance Serializable Car where + serialize = pack . show + deserialize = read . unpack + +main :: IO () main = do index owner @@ -27,17 +29,16 @@ main = do index cname index age - bruce <- atomically $ newDBRef $ Person "bruce" 42 - atomically $ mapM_ newDBRef [Car bruce "Bat Mobile", Car bruce "Porsche"] - - r <- atomically $ cname .>=. "Bat Mobile" - print r + bruce <- atomically $ newDBRef $ Person "bruce" 42 + atomically $ mapM_ newDBRef [Car bruce "Bat Mobile", Car bruce "Porsche"] - r <- atomically $ select (cname, owner) $ (owner .==. bruce) .&&. (cname .==. "Bat Mobile") - print r + r1 <- atomically $ cname .>=. "Bat Mobile" + print r1 - r <- atomically $ age .>=. (20 :: Int) - print r + r2 <- atomically $ select (cname, owner) $ (owner .==. bruce) .&&. (cname .==. "Bat Mobile") + print r2 + r3 <- atomically $ age .>=. (20 :: Int) + print r3 --syncCache diff --git a/demos/indexText.hs b/demos/indexText.hs index 8de50d3..f206c57 100644 --- a/demos/indexText.hs +++ b/demos/indexText.hs @@ -16,7 +16,8 @@ instance Serializable Doc where serialize= pack . show deserialize= read . unpack -main= do +main :: IO () +main = do indexText body T.pack let doc= Doc{title= "title", body= "hola que tal estamos"} rdoc <- atomically $ newDBRef doc diff --git a/demos/memoization.hs b/demos/memoization.hs index fa5829f..46457f8 100644 --- a/demos/memoization.hs +++ b/demos/memoization.hs @@ -1,5 +1,4 @@ import Data.TCache.Memoization -import Data.TCache.DefaultPersistence import Control.Concurrent import System.Time @@ -7,11 +6,13 @@ import System.Time -- This demo stores the current time for 4 seconds until -- it generates the next timestamp -main= do - cachedByKey "timequant" 4 f >>= print - threadDelay 1000000 - main +main :: IO b +main = do + cachedByKey "timequant" 4 f >>= print + threadDelay 1000000 + main -f= do - TOD t _ <- getClockTime - return t +f :: IO Integer +f = do + TOD t _ <- getClockTime + return t diff --git a/demos/pr.hs b/demos/pr.hs index 26472a3..b6ca251 100644 --- a/demos/pr.hs +++ b/demos/pr.hs @@ -1,12 +1,8 @@ -{-# OPTIONS -XDeriveDataTypeable - #-} module Main where import Data.TCache import Data.Typeable import Data.TCache.DefaultPersistence import Data.ByteString.Lazy.Char8 -import Control.Concurrent(threadDelay) -import System.IO (hFlush,stdout) data Ops= Plus | Times deriving (Read, Show, Typeable) @@ -17,10 +13,11 @@ instance Serializable Ops where instance Indexable Ops where key _ = "ops" -main= do - let ref = getDBRef $ keyResource Times - atomically $ writeDBRef ref Plus - syncCache +main :: IO () +main = do + let ref = getDBRef $ keyResource Times + atomically $ writeDBRef ref Plus + syncCache - print ref + print ref diff --git a/demos/pushpop.hs b/demos/pushpop.hs index beed5e0..364cba1 100644 --- a/demos/pushpop.hs +++ b/demos/pushpop.hs @@ -1,13 +1,58 @@ - import Data.Persistent.Collection import Control.Concurrent import Data.TCache -main= do - let x= "a" - let q= getQRef "hi" - forkIO $ pop q >>= print --- forkIO $ pop q >>= print - push q x - push q x - syncCache +main :: IO () +main = do + let q = getQRef "hi" + + -- display if and what we have in the queue + -- will be empty on first run but afterwards + -- contains "e" + pickAll q >>= print + + -- make sure there is no data left + flush q + + -- pop from empty would deadlock, so don't do that + -- pop q >>= print + + push q "a" -- push before starting asyncs + + -- async pops (first does not need to wait) + _ <- forkIO $ pop q >>= print + _ <- forkIO $ pop q >>= print + + putStrLn "Waiting a bit (should print \"a\")" + threadDelay 1000000 + putStrLn "By mpt \"a\" should be printed" + + -- push more + push q "b" -- this will be printed asap + + push q "c" + push q "d" -- will be sync popped + push q "e" -- will be left over at the end + + -- let the second fork finish + threadDelay 1000000 + putStrLn "By now \"b\" should be already printed" + + -- another async fork (printing "c") + _ <- forkIO $ pop q >>= print + + -- sync current state to file and print it + -- this usually still has "c" included + threadDelay 1000000 + syncCache + readFile ".tcachedata/Queue#hi" >>= putStrLn + + -- sync pop (usually prints "d") + pop q >>= print + + -- another sync / wait / print + syncCache + threadDelay 1000000 + -- here "e" will be left in the queue on Disk + readFile ".tcachedata/Queue#hi" >>= putStrLn + -- and stays there for the next run diff --git a/demos/testnewdbref.hs b/demos/testnewdbref.hs index 570248f..d3d919e 100644 --- a/demos/testnewdbref.hs +++ b/demos/testnewdbref.hs @@ -15,18 +15,18 @@ data User= User , upassword :: String } deriving (Read, Show, Typeable) +userPrefix :: String userPrefix= "User#" + instance Indexable User where key User{userName= user}= userPrefix++user - - userRegister :: String -> String -> IO(DBRef User) userRegister user password = atomically $ newDBRef $ User user password -instance (Show a, Read a)=> Serializable a where - serialize= pack . show - deserialize= read . unpack +instance (Show a, Read a) => Serializable a where + serialize = pack . show + deserialize = read . unpack main :: IO () main = do diff --git a/demos/weakTest.hs b/demos/weakTest.hs index 91c65b1..3a0b0e8 100644 --- a/demos/weakTest.hs +++ b/demos/weakTest.hs @@ -1,12 +1,13 @@ import System.Mem.Weak -import Control.Concurrent import Debug.Trace -debug= flip trace +debug :: c -> String -> c +debug = flip trace -dat= "this is the data" - -main= do - mkWeakPtr dat . Just $ print "deleted" `debug` "deleted" +dat :: String +dat = "this is the data" +main :: IO b +main = do + _ <- mkWeakPtr dat . Just $ print "deleted" `debug` "deleted" main diff --git a/package.yaml b/package.yaml new file mode 100644 index 0000000..aa5fa2c --- /dev/null +++ b/package.yaml @@ -0,0 +1,176 @@ +name: TCache +version: 0.13.0.0 +github: "agocorona/TCache" +license: BSD3 +author: "Alberto Gómez Corona" +maintainer: "agocorona@gmail.com" +copyright: "2019 Alberto Gómez Corona" + +extra-source-files: + - ChangeLog.md + - README.md + - demos/* + - unfinished/* + +# Metadata used when publishing your package +synopsis: A Transactional cache with user-defined persistence +category: Data, Database + +# To avoid duplicated efforts in documentation and dealing with the +# complications of embedding Haddock markup inside cabal files, it is +# common to point users to the README.md file. +description: Please see the README on GitHub at + +ghc-options: -Wall + -Wcompat + -Widentities +# -Wredundant-constraints +# -Wmissing-export-lists +# -Wpartial-fields +# -Wincomplete-uni-patterns +# -Wincomplete-record-updates +# -Werror + +dependencies: + - base >= 4.7 && < 5 + - bytestring + - containers >= 0.1.0.1 + - directory >=1.0 + - old-time >=1.0 + - stm + - text + - mtl + - hashtables + - RefSerialize + +library: + source-dirs: . + exposed-modules: + - Data.TCache + - Data.TCache.DefaultPersistence + - Data.TCache.Defs + - Data.TCache.IResource + - Data.TCache.IndexQuery + - Data.TCache.IndexText + - Data.TCache.Memoization + - Data.TCache.Triggers + - Data.Persistent.Collection + - Data.Persistent.IDynamic + +#executables: +# basicSample: +# main: basicSample.hs +# source-dirs: app +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +#tests: +# caching: +# main: caching.hs +# source-dirs: test/caching +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# DBRef: +# main: demos/DBRef.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# DynamicSample: +# main: demos/DynamicSample.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# indexQuery: +# main: demos/IndexQuery.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# indexText: +# main: demos/IndexText.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# memoization: +# main: demos/memoization.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# pr: +# main: demos/pr.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# pushpop: +# main: demos/pushpop.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# +# testnewdbref: +# main: demos/testnewdbref.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# triggerRelational: +# main: demos/triggerRelational.lhs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# weakTest: +# main: demos/weakTest.hs +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache +# tests: +# main: Spec.hs +# source-dirs: test +# ghc-options: +# - -threaded +# - -rtsopts +# - -with-rtsopts=-N +# dependencies: +# - TCache diff --git a/stack.yaml b/stack.yaml index f08a43f..e33b120 100644 --- a/stack.yaml +++ b/stack.yaml @@ -17,7 +17,7 @@ # # resolver: ./custom-snapshot.yaml # resolver: https://example.com/snapshots/2018-01-01.yaml -resolver: lts-13.15 +resolver: lts-13.17 # User packages to be built. # Various formats can be used as shown in the example below. diff --git a/unfinished/CollectionStream.hs b/unfinished/CollectionStream.hs new file mode 100644 index 0000000..cad397b --- /dev/null +++ b/unfinished/CollectionStream.hs @@ -0,0 +1,107 @@ +{-# OPTIONS -XRecordWildCards + -XTypeSynonymInstances + -XFlexibleInstances + -XUndecidableInstances + -XDeriveDataTypeable #-} +module Data.Persistent.CollectionStream where +import Data.Typeable +import Control.Concurrent.STM(STM,atomically, retry) +import Control.Monad(when) +import Data.TCache.DefaultPersistence + +import Data.TCache +import System.IO.Unsafe +import Data.RefSerialize +import Data.RefSerialize +import Data.ByteString.Lazy.Char8 as B +import qualified Data.Vector as V +import Data.HashTable as HT +import Data.ByteString.Lazy.Char8 as B +import Data.List +import Data.Ord + +--import Debug.Trace +--a !> b= trace b a + +--assocs = sortBy (comparing fst) . unsafePerformIO . HT.toList +-- +--instance Serialize NContext where +-- showp(NContext n (c,s))= +-- +-- insertString n +-- insertString $ showContext c +-- readp= do +-- n <- readp +-- s <- readContent +-- let c = unsafePerformIO newContext +-- return $ NContext n (c,s) +-- +-- +-- +--data NContext= NContext String (Context,ByteString) deriving Typeable +-- +--instance Indexable NContext where +-- key (NContext n _)= n + +--getExternalContext n= unsafePerformIO . atomically . newDBRef $ NContext n undefined + +instance Serialize a => Serializable a where + serialize= runW . showp + deserialize= runR readp + +instance Serialize a => Serialize (V.Vector a) where + showp= showp . V.toList + readp= readp >>= return . V.fromList + + + + +data Elem a= + Elem{ ename :: String + , eindex :: Int + , echilds :: Int + , emaxlength :: Int + , enelems :: Int + , econtent :: V.Vector (Either a, DBRef(Elem a)} + +get e = justify (readDBRef rcoll) $ "Not found: " ++ key e + +insertByKey re x= do + e@Elem{..} <- get re + let Just i' = findIndex (\y -> key y> key x ) econtent + i= i' -1 + case econtent ! (i-1) of + Left re' -> insertByKey re' x + Right t -> + if (key t== key x) + then writeDBRef re e{econtent= econtent //[(i,x)] } + else do + toins <- case enelems < emaxlength + True -> return $ Right x + False ->do + let childs echilds+1 + r <- newwDBRef e{eindex= childs+1, childs=0,enelems=1,econtent= singleton x} + writeDBRef re e{echilds=childs+1} + return $ Left r + let ncontent= update_ + econtent + (fromList [i..enelems]) + (cons toins (drop i' econtent)) + + writeDBRef re e{econtent= ncontent} + + + +lookup re k= do + e@Elem{..} <- get re + let Just z = findIndex (\y -> key y> k ) econtent + let i= i' -1 + case econtent ! i of + Right t -> assert (key t== k) $ return t + Left re' -> lookup re' x + + +push re x= do + e@Elem{..} <- get re + if emaxlength == enelems +