2010-01-17 01:22:05 +00:00
|
|
|
{-
|
|
|
|
|
|
|
|
Copyright (C) 2010 Scott R Parish <srp@srparish.net>
|
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
a copy of this software and associated documentation files (the
|
|
|
|
"Software"), to deal in the Software without restriction, including
|
|
|
|
without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
the following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be
|
|
|
|
included in all copies or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
|
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
-}
|
|
|
|
|
2010-01-11 03:23:33 +00:00
|
|
|
module Database.MongoDB
|
|
|
|
(
|
2010-01-18 19:24:14 +00:00
|
|
|
-- * Connection
|
|
|
|
Connection,
|
2010-01-22 03:09:20 +00:00
|
|
|
connect, connectOnPort, conClose, disconnect, dropDatabase,
|
2010-01-26 09:02:47 +00:00
|
|
|
serverInfo, serverShutdown,
|
2010-01-22 12:23:39 +00:00
|
|
|
databasesInfo, databaseNames,
|
2010-01-19 03:26:00 +00:00
|
|
|
-- * Database
|
|
|
|
Database, MongoDBCollectionInvalid,
|
|
|
|
ColCreateOpt(..),
|
2010-01-19 12:31:59 +00:00
|
|
|
collectionNames, createCollection, dropCollection,
|
2010-01-22 01:12:06 +00:00
|
|
|
renameCollection, runCommand, validateCollection,
|
2010-01-19 03:26:00 +00:00
|
|
|
-- * Collection
|
2010-01-19 12:28:17 +00:00
|
|
|
Collection, FieldSelector, FullCollection,
|
|
|
|
NumToSkip, NumToReturn, Selector,
|
2010-01-18 19:24:14 +00:00
|
|
|
QueryOpt(..),
|
|
|
|
UpdateFlag(..),
|
2010-01-19 04:13:32 +00:00
|
|
|
count, countMatching, delete, insert, insertMany, query, remove, update,
|
2010-01-26 07:40:00 +00:00
|
|
|
-- * Convenience collection operations
|
2010-01-18 23:31:41 +00:00
|
|
|
find, findOne, quickFind, quickFind',
|
|
|
|
-- * Cursor
|
2010-01-18 19:24:14 +00:00
|
|
|
Cursor,
|
2010-01-17 01:38:11 +00:00
|
|
|
allDocs, allDocs', finish, nextDoc,
|
2010-01-20 04:31:38 +00:00
|
|
|
-- * Index
|
|
|
|
Key, Unique,
|
|
|
|
Direction(..),
|
2010-01-21 04:33:02 +00:00
|
|
|
createIndex, dropIndex, dropIndexes, indexInformation,
|
2010-01-11 03:23:33 +00:00
|
|
|
)
|
|
|
|
where
|
2010-01-17 03:40:22 +00:00
|
|
|
import Control.Exception
|
2010-01-11 03:23:33 +00:00
|
|
|
import Control.Monad
|
|
|
|
import Data.Binary
|
2010-01-13 02:08:28 +00:00
|
|
|
import Data.Binary.Get
|
2010-01-11 03:23:33 +00:00
|
|
|
import Data.Binary.Put
|
2010-01-13 13:14:31 +00:00
|
|
|
import Data.Bits
|
2010-01-21 05:19:39 +00:00
|
|
|
import Data.ByteString.Internal (c2w)
|
2010-01-11 03:23:33 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2010-01-18 18:43:05 +00:00
|
|
|
import qualified Data.ByteString.Lazy.UTF8 as L8
|
2010-01-11 03:23:33 +00:00
|
|
|
import Data.Int
|
|
|
|
import Data.IORef
|
|
|
|
import qualified Data.List as List
|
2010-01-22 12:23:39 +00:00
|
|
|
import qualified Data.Map as Map
|
2010-01-18 21:03:13 +00:00
|
|
|
import Data.Maybe
|
2010-01-17 03:40:22 +00:00
|
|
|
import Data.Typeable
|
2010-01-18 21:03:13 +00:00
|
|
|
import Database.MongoDB.BSON as BSON
|
2010-01-11 03:23:33 +00:00
|
|
|
import Database.MongoDB.Util
|
|
|
|
import qualified Network
|
|
|
|
import Network.Socket hiding (connect, send, sendTo, recv, recvFrom)
|
|
|
|
import Prelude hiding (getContents)
|
|
|
|
import System.IO
|
2010-01-17 01:14:47 +00:00
|
|
|
import System.IO.Unsafe
|
2010-01-11 03:23:33 +00:00
|
|
|
import System.Random
|
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | A handle to a database connection
|
2010-01-11 03:23:33 +00:00
|
|
|
data Connection = Connection { cHandle :: Handle, cRand :: IORef [Int] }
|
|
|
|
|
2010-01-26 07:40:00 +00:00
|
|
|
-- | Establish a connection to a MongoDB server
|
2010-01-11 03:23:33 +00:00
|
|
|
connect :: HostName -> IO Connection
|
|
|
|
connect = flip connectOnPort $ Network.PortNumber 27017
|
|
|
|
|
2010-01-26 07:40:00 +00:00
|
|
|
-- | Establish a connection to a MongoDB server on a non-standard port
|
2010-01-11 03:23:33 +00:00
|
|
|
connectOnPort :: HostName -> Network.PortID -> IO Connection
|
|
|
|
connectOnPort host port = do
|
|
|
|
h <- Network.connectTo host port
|
|
|
|
hSetBuffering h NoBuffering
|
|
|
|
r <- newStdGen
|
|
|
|
let ns = randomRs (fromIntegral (minBound :: Int32),
|
|
|
|
fromIntegral (maxBound :: Int32)) r
|
|
|
|
nsRef <- newIORef ns
|
2010-01-25 02:58:49 +00:00
|
|
|
return Connection { cHandle = h, cRand = nsRef }
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Close database connection
|
2010-01-17 00:04:15 +00:00
|
|
|
conClose :: Connection -> IO ()
|
|
|
|
conClose = hClose . cHandle
|
|
|
|
|
2010-01-22 12:23:39 +00:00
|
|
|
-- | Information about the databases on the server.
|
|
|
|
databasesInfo :: Connection -> IO BsonDoc
|
2010-01-25 02:58:49 +00:00
|
|
|
databasesInfo c =
|
2010-01-22 12:23:39 +00:00
|
|
|
runCommand c (s2L "admin") $ toBsonDoc [("listDatabases", toBson (1::Int))]
|
|
|
|
|
|
|
|
-- | Return a list of database names on the server.
|
|
|
|
databaseNames :: Connection -> IO [Database]
|
|
|
|
databaseNames c = do
|
|
|
|
info <- databasesInfo c
|
|
|
|
let (BsonArray dbs) = fromJust $ Map.lookup (s2L "databases") info
|
2010-01-25 02:58:49 +00:00
|
|
|
names = mapMaybe (Map.lookup (s2L "name") . fromBson) dbs
|
2010-01-22 12:23:39 +00:00
|
|
|
return $ List.map fromBson (names::[BsonValue])
|
|
|
|
|
2010-01-18 20:19:25 +00:00
|
|
|
-- | Alias for 'conClose'
|
|
|
|
disconnect :: Connection -> IO ()
|
|
|
|
disconnect = conClose
|
|
|
|
|
2010-01-19 03:31:21 +00:00
|
|
|
-- | Drop a database.
|
|
|
|
dropDatabase :: Connection -> Database -> IO ()
|
|
|
|
dropDatabase c db = do
|
2010-01-19 12:31:59 +00:00
|
|
|
_ <- runCommand c db $ toBsonDoc [("dropDatabase", toBson (1::Int))]
|
2010-01-19 03:31:21 +00:00
|
|
|
return ()
|
|
|
|
|
2010-01-22 03:09:20 +00:00
|
|
|
-- | Get information about the MongoDB server we're connected to.
|
|
|
|
serverInfo :: Connection -> IO BsonDoc
|
2010-01-25 02:58:49 +00:00
|
|
|
serverInfo c =
|
2010-01-22 03:09:20 +00:00
|
|
|
runCommand c (s2L "admin") $ toBsonDoc [("buildinfo", toBson (1::Int))]
|
|
|
|
|
2010-01-26 09:02:47 +00:00
|
|
|
-- | Shut down the MongoDB server.
|
|
|
|
--
|
|
|
|
-- Force a clean exit, flushing and closing all data files.
|
|
|
|
-- Note that it will wait until all ongoing operations are complete.
|
2010-01-26 09:03:59 +00:00
|
|
|
serverShutdown :: Connection -> IO BsonDoc
|
2010-01-26 09:02:47 +00:00
|
|
|
serverShutdown c =
|
|
|
|
runCommand c (s2L "admin") $ toBsonDoc [("shutdown", toBson (1::Int))]
|
|
|
|
|
2010-01-18 21:03:13 +00:00
|
|
|
-- | Return a list of collections in /Database/.
|
2010-01-19 12:28:17 +00:00
|
|
|
collectionNames :: Connection -> Database -> IO [FullCollection]
|
2010-01-18 21:03:13 +00:00
|
|
|
collectionNames c db = do
|
2010-01-21 05:19:39 +00:00
|
|
|
docs <- quickFind' c (L.append db $ s2L ".system.namespaces") empty
|
2010-01-25 02:58:49 +00:00
|
|
|
let names = flip List.map docs $ fromBson . fromJust . BSON.lookup "name"
|
2010-01-21 05:19:39 +00:00
|
|
|
return $ List.filter (L.notElem $ c2w '$') names
|
2010-01-19 03:26:00 +00:00
|
|
|
|
|
|
|
data ColCreateOpt = CCOSize Int64 -- ^ Desired initial size for the
|
|
|
|
-- collection (in bytes). must be
|
|
|
|
-- less than or equal to
|
|
|
|
-- 10000000000. For capped
|
|
|
|
-- collections this size is the max
|
|
|
|
-- size of the collection.
|
|
|
|
| CCOCapped Bool -- ^ If 'True', this is a capped collection.
|
|
|
|
| CCOMax Int64 -- ^ Maximum number of objects if capped.
|
|
|
|
deriving (Show, Eq)
|
|
|
|
|
|
|
|
colCreateOptToBson :: ColCreateOpt -> (String, BsonValue)
|
|
|
|
colCreateOptToBson (CCOSize sz) = ("size", toBson sz)
|
|
|
|
colCreateOptToBson (CCOCapped b) = ("capped", toBson b)
|
|
|
|
colCreateOptToBson (CCOMax m) = ("max", toBson m)
|
|
|
|
|
|
|
|
-- | Create a new collection in this database.
|
|
|
|
--
|
|
|
|
-- Normally collection creation is automatic. This function should
|
|
|
|
-- only be needed if you want to specify 'ColCreateOpt's on creation.
|
|
|
|
-- 'MongoDBCollectionInvalid' is thrown if the collection already
|
|
|
|
-- exists.
|
2010-01-19 12:28:17 +00:00
|
|
|
createCollection :: Connection -> FullCollection -> [ColCreateOpt] -> IO ()
|
2010-01-19 03:26:00 +00:00
|
|
|
createCollection c col opts = do
|
2010-01-22 01:12:06 +00:00
|
|
|
(db, col') <- validateCollectionName col
|
2010-01-19 03:26:00 +00:00
|
|
|
dbcols <- collectionNames c db
|
2010-01-25 02:58:49 +00:00
|
|
|
when (col `List.elem` dbcols) $
|
|
|
|
throwColInvalid $ "Collection already exists: " ++ show col
|
2010-01-19 03:26:00 +00:00
|
|
|
let cmd = ("create", toBson col') : List.map colCreateOptToBson opts
|
2010-01-19 12:31:59 +00:00
|
|
|
_ <- runCommand c db $ toBsonDoc cmd
|
2010-01-19 03:26:00 +00:00
|
|
|
return ()
|
|
|
|
|
2010-01-19 04:20:01 +00:00
|
|
|
-- | Drop a collection.
|
2010-01-19 12:28:17 +00:00
|
|
|
dropCollection :: Connection -> FullCollection -> IO ()
|
2010-01-19 03:26:34 +00:00
|
|
|
dropCollection c col = do
|
2010-01-19 12:28:17 +00:00
|
|
|
let (db, col') = splitFullCol col
|
2010-01-19 12:31:59 +00:00
|
|
|
_ <- runCommand c db $ toBsonDoc [("drop", toBson col')]
|
2010-01-19 03:26:34 +00:00
|
|
|
return ()
|
|
|
|
|
2010-01-22 01:12:06 +00:00
|
|
|
-- | Rename a collection--first /FullCollection/ argument is the
|
|
|
|
-- existing name, the second is the new name. At the moment this command
|
|
|
|
-- can also be used to move a collection between databases.
|
|
|
|
renameCollection :: Connection -> FullCollection -> FullCollection -> IO ()
|
|
|
|
renameCollection c col newName = do
|
|
|
|
_ <- validateCollectionName col
|
|
|
|
_ <- runCommand c (s2L "admin") $ toBsonDoc [("renameCollection", toBson col),
|
|
|
|
("to", toBson newName)]
|
|
|
|
return ()
|
|
|
|
|
2010-01-19 04:20:01 +00:00
|
|
|
-- | Return a string of validation info about the collection.
|
|
|
|
--
|
|
|
|
-- Example output (note this probably can/will change with different
|
|
|
|
-- versions of the server):
|
|
|
|
--
|
|
|
|
-- > validate
|
|
|
|
-- > details: 0x7fe5cc2c1da4 ofs:e7da4
|
|
|
|
-- > firstExtent:0:24100 ns:test.foo.bar
|
|
|
|
-- > lastExtent:0:24100 ns:test.foo.bar
|
|
|
|
-- > # extents:1
|
|
|
|
-- > datasize?:180 nrecords?:5 lastExtentSize:1024
|
|
|
|
-- > padding:1
|
|
|
|
-- > first extent:
|
|
|
|
-- > loc:0:24100 xnext:null xprev:null
|
|
|
|
-- > nsdiag:test.foo.bar
|
|
|
|
-- > size:1024 firstRecord:0:241e4 lastRecord:0:24280
|
|
|
|
-- > 5 objects found, nobj:5
|
|
|
|
-- > 260 bytes data w/headers
|
|
|
|
-- > 180 bytes data wout/headers
|
|
|
|
-- > deletedList: 0100100000000000000
|
|
|
|
-- > deleted: n: 4 size: 588
|
|
|
|
-- > nIndexes:1
|
|
|
|
-- > test.foo.bar.$_id_ keys:5
|
2010-01-19 12:28:17 +00:00
|
|
|
validateCollection :: Connection -> FullCollection -> IO String
|
2010-01-19 03:39:38 +00:00
|
|
|
validateCollection c col = do
|
2010-01-19 12:28:17 +00:00
|
|
|
let (db, col') = splitFullCol col
|
2010-01-19 12:31:59 +00:00
|
|
|
res <- runCommand c db $ toBsonDoc [("validate", toBson col')]
|
2010-01-19 03:39:38 +00:00
|
|
|
return $ fromBson $ fromJust $ BSON.lookup "result" res
|
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
splitFullCol :: FullCollection -> (Database, Collection)
|
2010-01-21 05:19:39 +00:00
|
|
|
splitFullCol col = (L.takeWhile (c2w '.' /=) col,
|
|
|
|
L.tail $ L.dropWhile (c2w '.' /=) col)
|
2010-01-19 03:26:34 +00:00
|
|
|
|
2010-01-19 12:31:59 +00:00
|
|
|
-- | Run a database command. Usually this is unneeded as driver wraps
|
|
|
|
-- all of the commands for you (eg 'createCollection',
|
|
|
|
-- 'dropCollection', etc).
|
|
|
|
runCommand :: Connection -> Database -> BsonDoc -> IO BsonDoc
|
|
|
|
runCommand c db cmd = do
|
2010-01-21 05:19:39 +00:00
|
|
|
mres <- findOne c (L.append db $ s2L ".$cmd") cmd
|
2010-01-19 03:26:00 +00:00
|
|
|
let res = fromJust mres
|
2010-01-25 02:58:49 +00:00
|
|
|
when (1 /= (fromBson $ fromJust $ BSON.lookup "ok" res :: Int)) $
|
|
|
|
throwOpFailure $ "command \"" ++ show cmd ++ "\" failed: " ++
|
|
|
|
fromBson (fromJust $ BSON.lookup "errmsg" res)
|
2010-01-19 03:26:00 +00:00
|
|
|
return res
|
2010-01-18 21:03:13 +00:00
|
|
|
|
2010-01-26 07:40:00 +00:00
|
|
|
-- | An Iterator over the results of a query. Use 'nextDoc' to get each
|
2010-01-18 19:24:14 +00:00
|
|
|
-- successive result document, or 'allDocs' or 'allDocs'' to get lazy or
|
|
|
|
-- strict lists of results.
|
2010-01-16 21:54:39 +00:00
|
|
|
data Cursor = Cursor {
|
|
|
|
curCon :: Connection,
|
|
|
|
curID :: IORef Int64,
|
|
|
|
curNumToRet :: Int32,
|
2010-01-19 12:28:17 +00:00
|
|
|
curCol :: FullCollection,
|
2010-01-16 21:54:39 +00:00
|
|
|
curDocBytes :: IORef L.ByteString,
|
|
|
|
curClosed :: IORef Bool
|
|
|
|
}
|
|
|
|
|
2010-01-11 03:23:33 +00:00
|
|
|
data Opcode
|
2010-01-25 02:58:49 +00:00
|
|
|
= OPReply -- 1 Reply to a client request. responseTo is set
|
|
|
|
| OPMsg -- 1000 generic msg command followed by a string
|
|
|
|
| OPUpdate -- 2001 update document
|
|
|
|
| OPInsert -- 2002 insert new document
|
|
|
|
| OPGetByOid -- 2003 is this used?
|
|
|
|
| OPQuery -- 2004 query a collection
|
|
|
|
| OPGetMore -- 2005 Get more data from a query. See Cursors
|
|
|
|
| OPDelete -- 2006 Delete documents
|
|
|
|
| OPKillCursors -- 2007 Tell database client is done with a cursor
|
2010-01-11 03:23:33 +00:00
|
|
|
deriving (Show, Eq)
|
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
data MongoDBInternalError = MongoDBInternalError String
|
|
|
|
deriving (Eq, Show, Read)
|
|
|
|
|
|
|
|
mongoDBInternalError :: TyCon
|
|
|
|
mongoDBInternalError = mkTyCon "Database.MongoDB.MongoDBInternalError"
|
|
|
|
|
|
|
|
instance Typeable MongoDBInternalError where
|
|
|
|
typeOf _ = mkTyConApp mongoDBInternalError []
|
|
|
|
|
|
|
|
instance Exception MongoDBInternalError
|
|
|
|
|
2010-01-19 03:26:00 +00:00
|
|
|
data MongoDBCollectionInvalid = MongoDBCollectionInvalid String
|
|
|
|
deriving (Eq, Show, Read)
|
|
|
|
|
|
|
|
mongoDBCollectionInvalid :: TyCon
|
|
|
|
mongoDBCollectionInvalid = mkTyCon "Database.MongoDB.MongoDBcollectionInvalid"
|
|
|
|
|
|
|
|
instance Typeable MongoDBCollectionInvalid where
|
|
|
|
typeOf _ = mkTyConApp mongoDBCollectionInvalid []
|
|
|
|
|
|
|
|
instance Exception MongoDBCollectionInvalid
|
|
|
|
|
|
|
|
throwColInvalid :: String -> a
|
2010-01-25 02:58:49 +00:00
|
|
|
throwColInvalid = throw . MongoDBCollectionInvalid
|
2010-01-19 03:26:00 +00:00
|
|
|
|
|
|
|
data MongoDBOperationFailure = MongoDBOperationFailure String
|
|
|
|
deriving (Eq, Show, Read)
|
|
|
|
|
|
|
|
mongoDBOperationFailure :: TyCon
|
|
|
|
mongoDBOperationFailure = mkTyCon "Database.MongoDB.MongoDBoperationFailure"
|
|
|
|
|
|
|
|
instance Typeable MongoDBOperationFailure where
|
|
|
|
typeOf _ = mkTyConApp mongoDBOperationFailure []
|
|
|
|
|
|
|
|
instance Exception MongoDBOperationFailure
|
|
|
|
|
|
|
|
throwOpFailure :: String -> a
|
2010-01-25 02:58:49 +00:00
|
|
|
throwOpFailure = throw . MongoDBOperationFailure
|
2010-01-19 03:26:00 +00:00
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
fromOpcode :: Opcode -> Int32
|
2010-01-25 02:58:49 +00:00
|
|
|
fromOpcode OPReply = 1
|
|
|
|
fromOpcode OPMsg = 1000
|
|
|
|
fromOpcode OPUpdate = 2001
|
|
|
|
fromOpcode OPInsert = 2002
|
|
|
|
fromOpcode OPGetByOid = 2003
|
|
|
|
fromOpcode OPQuery = 2004
|
|
|
|
fromOpcode OPGetMore = 2005
|
|
|
|
fromOpcode OPDelete = 2006
|
|
|
|
fromOpcode OPKillCursors = 2007
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
toOpcode :: Int32 -> Opcode
|
2010-01-25 02:58:49 +00:00
|
|
|
toOpcode 1 = OPReply
|
|
|
|
toOpcode 1000 = OPMsg
|
|
|
|
toOpcode 2001 = OPUpdate
|
|
|
|
toOpcode 2002 = OPInsert
|
|
|
|
toOpcode 2003 = OPGetByOid
|
|
|
|
toOpcode 2004 = OPQuery
|
|
|
|
toOpcode 2005 = OPGetMore
|
|
|
|
toOpcode 2006 = OPDelete
|
|
|
|
toOpcode 2007 = OPKillCursors
|
2010-01-17 03:40:22 +00:00
|
|
|
toOpcode n = throw $ MongoDBInternalError $ "Got unexpected Opcode: " ++ show n
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-18 21:03:13 +00:00
|
|
|
-- | The name of a database.
|
2010-01-21 05:19:39 +00:00
|
|
|
type Database = L8.ByteString
|
2010-01-18 21:03:13 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | The full collection name. The full collection name is the
|
|
|
|
-- concatenation of the database name with the collection name, using
|
|
|
|
-- a @.@ for the concatenation. For example, for the database @foo@
|
|
|
|
-- and the collection @bar@, the full collection name is @foo.bar@.
|
2010-01-21 05:19:39 +00:00
|
|
|
type FullCollection = L8.ByteString
|
2010-01-19 12:28:17 +00:00
|
|
|
|
|
|
|
-- | The same as 'FullCollection' but without the 'Database' prefix.
|
2010-01-21 05:19:39 +00:00
|
|
|
type Collection = L8.ByteString
|
2010-01-18 19:24:14 +00:00
|
|
|
|
|
|
|
-- | A 'BsonDoc' representing restrictions for a query much like the
|
|
|
|
-- /where/ part of an SQL query.
|
2010-01-17 17:41:24 +00:00
|
|
|
type Selector = BsonDoc
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | A list of field names that limits the fields in the returned
|
|
|
|
-- documents. The list can contains zero or more elements, each of
|
|
|
|
-- which is the name of a field that should be returned. An empty list
|
|
|
|
-- means that no limiting is done and all fields are returned.
|
2010-01-18 18:43:05 +00:00
|
|
|
type FieldSelector = [L8.ByteString]
|
2010-01-11 03:23:33 +00:00
|
|
|
type RequestID = Int32
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Sets the number of documents to omit - starting from the first
|
|
|
|
-- document in the resulting dataset - when returning the result of
|
|
|
|
-- the query.
|
2010-01-11 03:23:33 +00:00
|
|
|
type NumToSkip = Int32
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | This controls how many documents are returned at a time. The
|
|
|
|
-- cursor works by requesting /NumToReturn/ documents, which are then
|
|
|
|
-- immediately all transfered over the network; these are held locally
|
|
|
|
-- until the those /NumToReturn/ are all consumed and then the network
|
|
|
|
-- will be hit again for the next /NumToReturn/ documents.
|
|
|
|
--
|
|
|
|
-- If the value @0@ is given, the database will choose the number of
|
|
|
|
-- documents to return.
|
|
|
|
--
|
|
|
|
-- Otherwise choosing a good value is very dependant on the document size
|
|
|
|
-- and the way the cursor is being used.
|
2010-01-11 03:23:33 +00:00
|
|
|
type NumToReturn = Int32
|
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Options that control the behavior of a 'query' operation.
|
2010-01-25 02:58:49 +00:00
|
|
|
data QueryOpt = QOTailableCursor
|
|
|
|
| QOSlaveOK
|
|
|
|
| QOOpLogReplay
|
|
|
|
| QONoCursorTimeout
|
|
|
|
deriving (Show)
|
2010-01-13 13:14:31 +00:00
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
fromQueryOpts :: [QueryOpt] -> Int32
|
2010-01-14 13:04:24 +00:00
|
|
|
fromQueryOpts opts = List.foldl (.|.) 0 $ fmap toVal opts
|
2010-01-25 02:58:49 +00:00
|
|
|
where toVal QOTailableCursor = 2
|
|
|
|
toVal QOSlaveOK = 4
|
|
|
|
toVal QOOpLogReplay = 8
|
|
|
|
toVal QONoCursorTimeout = 16
|
2010-01-13 13:14:31 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Options that effect the behavior of a 'update' operation.
|
2010-01-25 02:58:49 +00:00
|
|
|
data UpdateFlag = UFUpsert
|
|
|
|
| UFMultiupdate
|
2010-01-14 13:16:20 +00:00
|
|
|
deriving (Show, Enum)
|
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
fromUpdateFlags :: [UpdateFlag] -> Int32
|
2010-01-14 13:16:20 +00:00
|
|
|
fromUpdateFlags flags = List.foldl (.|.) 0 $
|
|
|
|
flip fmap flags $ (1 `shiftL`) . fromEnum
|
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Return the number of documents in /FullCollection/.
|
|
|
|
count :: Connection -> FullCollection -> IO Int64
|
2010-01-21 05:19:39 +00:00
|
|
|
count c col = countMatching c col empty
|
2010-01-19 04:13:32 +00:00
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Return the number of documents in /FullCollection/ matching /Selector/
|
|
|
|
countMatching :: Connection -> FullCollection -> Selector -> IO Int64
|
2010-01-19 04:13:32 +00:00
|
|
|
countMatching c col sel = do
|
2010-01-19 12:28:17 +00:00
|
|
|
let (db, col') = splitFullCol col
|
2010-01-19 12:31:59 +00:00
|
|
|
res <- runCommand c db $ toBsonDoc [("count", toBson col'),
|
2010-01-20 04:31:38 +00:00
|
|
|
("query", toBson sel)]
|
2010-01-19 04:13:32 +00:00
|
|
|
return $ fromBson $ fromJust $ BSON.lookup "n" res
|
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Delete documents matching /Selector/ from the given /FullCollection/.
|
|
|
|
delete :: Connection -> FullCollection -> Selector -> IO RequestID
|
2010-01-11 03:23:33 +00:00
|
|
|
delete c col sel = do
|
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putCol col
|
|
|
|
putI32 0
|
2010-01-21 04:29:27 +00:00
|
|
|
putBsonDoc sel
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg c OPDelete body
|
2010-01-11 03:23:33 +00:00
|
|
|
L.hPut (cHandle c) msg
|
2010-01-13 02:05:24 +00:00
|
|
|
return reqID
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | An alias for 'delete'.
|
2010-01-19 12:28:17 +00:00
|
|
|
remove :: Connection -> FullCollection -> Selector -> IO RequestID
|
2010-01-11 03:23:33 +00:00
|
|
|
remove = delete
|
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Insert a single document into /FullCollection/.
|
|
|
|
insert :: Connection -> FullCollection -> BsonDoc -> IO RequestID
|
2010-01-11 03:23:33 +00:00
|
|
|
insert c col doc = do
|
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putCol col
|
2010-01-21 04:29:27 +00:00
|
|
|
putBsonDoc doc
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg c OPInsert body
|
2010-01-11 03:23:33 +00:00
|
|
|
L.hPut (cHandle c) msg
|
2010-01-13 02:05:24 +00:00
|
|
|
return reqID
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Insert a list of documents into /FullCollection/.
|
|
|
|
insertMany :: Connection -> FullCollection -> [BsonDoc] -> IO RequestID
|
2010-01-11 03:23:33 +00:00
|
|
|
insertMany c col docs = do
|
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putCol col
|
2010-01-21 04:29:27 +00:00
|
|
|
forM_ docs putBsonDoc
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg c OPInsert body
|
2010-01-11 03:23:33 +00:00
|
|
|
L.hPut (cHandle c) msg
|
2010-01-13 02:05:24 +00:00
|
|
|
return reqID
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Open a cursor to find documents. If you need full functionality,
|
|
|
|
-- see 'query'
|
2010-01-19 12:28:17 +00:00
|
|
|
find :: Connection -> FullCollection -> Selector -> IO Cursor
|
2010-01-18 18:43:05 +00:00
|
|
|
find c col sel = query c col [] 0 0 sel []
|
2010-01-17 00:54:26 +00:00
|
|
|
|
2010-01-18 23:31:41 +00:00
|
|
|
-- | Query, but only return the first result, if any.
|
2010-01-19 12:28:17 +00:00
|
|
|
findOne :: Connection -> FullCollection -> Selector -> IO (Maybe BsonDoc)
|
2010-01-18 23:31:41 +00:00
|
|
|
findOne c col sel = do
|
2010-01-19 01:36:10 +00:00
|
|
|
cur <- query c col [] 0 (-1) sel []
|
2010-02-06 16:38:53 +00:00
|
|
|
nextDoc cur
|
2010-01-18 23:31:41 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Perform a query and return the result as a lazy list. Be sure to
|
|
|
|
-- understand the comments about using the lazy list given for
|
|
|
|
-- 'allDocs'.
|
2010-01-19 12:28:17 +00:00
|
|
|
quickFind :: Connection -> FullCollection -> Selector -> IO [BsonDoc]
|
2010-01-17 02:06:01 +00:00
|
|
|
quickFind c col sel = find c col sel >>= allDocs
|
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Perform a query and return the result as a strict list.
|
2010-01-19 12:28:17 +00:00
|
|
|
quickFind' :: Connection -> FullCollection -> Selector -> IO [BsonDoc]
|
2010-01-17 02:06:01 +00:00
|
|
|
quickFind' c col sel = find c col sel >>= allDocs'
|
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Open a cursor to find documents in /FullCollection/ that match
|
2010-01-18 19:24:14 +00:00
|
|
|
-- /Selector/. See the documentation for each argument's type for
|
|
|
|
-- information about how it effects the query.
|
2010-01-19 12:28:17 +00:00
|
|
|
query :: Connection -> FullCollection -> [QueryOpt] ->
|
|
|
|
NumToSkip -> NumToReturn -> Selector -> FieldSelector -> IO Cursor
|
2010-01-17 03:40:22 +00:00
|
|
|
query c col opts nskip ret sel fsel = do
|
2010-01-16 21:54:39 +00:00
|
|
|
let h = cHandle c
|
|
|
|
|
2010-01-11 03:23:33 +00:00
|
|
|
let body = runPut $ do
|
2010-01-14 13:04:24 +00:00
|
|
|
putI32 $ fromQueryOpts opts
|
2010-01-11 03:23:33 +00:00
|
|
|
putCol col
|
2010-01-17 03:40:22 +00:00
|
|
|
putI32 nskip
|
2010-01-11 03:23:33 +00:00
|
|
|
putI32 ret
|
2010-01-21 04:29:27 +00:00
|
|
|
putBsonDoc sel
|
2010-01-13 02:07:34 +00:00
|
|
|
case fsel of
|
2010-01-18 18:43:05 +00:00
|
|
|
[] -> putNothing
|
2010-01-21 04:29:27 +00:00
|
|
|
_ -> putBsonDoc $ toBsonDoc $ List.zip fsel $
|
|
|
|
repeat $ BsonInt32 1
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg c OPQuery body
|
2010-01-16 21:54:39 +00:00
|
|
|
L.hPut h msg
|
|
|
|
|
|
|
|
hdr <- getHeader h
|
2010-01-25 02:58:49 +00:00
|
|
|
assert (OPReply == hOp hdr) $ return ()
|
2010-01-16 21:54:39 +00:00
|
|
|
assert (hRespTo hdr == reqID) $ return ()
|
|
|
|
reply <- getReply h
|
|
|
|
assert (rRespFlags reply == 0) $ return ()
|
2010-01-25 02:58:49 +00:00
|
|
|
docBytes <- L.hGet h (fromIntegral $ hMsgLen hdr - 16 - 20) >>= newIORef
|
2010-01-16 21:54:39 +00:00
|
|
|
closed <- newIORef False
|
|
|
|
cid <- newIORef $ rCursorID reply
|
2010-01-25 02:58:49 +00:00
|
|
|
return Cursor {
|
2010-01-16 21:54:39 +00:00
|
|
|
curCon = c,
|
|
|
|
curID = cid,
|
|
|
|
curNumToRet = ret,
|
|
|
|
curCol = col,
|
|
|
|
curDocBytes = docBytes,
|
|
|
|
curClosed = closed
|
|
|
|
}
|
2010-01-13 02:08:28 +00:00
|
|
|
|
2010-01-19 12:28:17 +00:00
|
|
|
-- | Update documents with /BsonDoc/ in /FullCollection/ that match /Selector/.
|
|
|
|
update :: Connection -> FullCollection ->
|
2010-01-17 17:41:24 +00:00
|
|
|
[UpdateFlag] -> Selector -> BsonDoc -> IO RequestID
|
2010-01-14 13:16:20 +00:00
|
|
|
update c col flags sel obj = do
|
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putCol col
|
|
|
|
putI32 $ fromUpdateFlags flags
|
2010-01-21 04:29:27 +00:00
|
|
|
putBsonDoc sel
|
|
|
|
putBsonDoc obj
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg c OPUpdate body
|
2010-01-14 13:16:20 +00:00
|
|
|
L.hPut (cHandle c) msg
|
|
|
|
return reqID
|
|
|
|
|
2010-01-13 02:08:28 +00:00
|
|
|
data Hdr = Hdr {
|
|
|
|
hMsgLen :: Int32,
|
2010-01-17 03:40:22 +00:00
|
|
|
-- hReqID :: Int32,
|
2010-01-13 02:08:28 +00:00
|
|
|
hRespTo :: Int32,
|
|
|
|
hOp :: Opcode
|
|
|
|
} deriving (Show)
|
|
|
|
|
|
|
|
data Reply = Reply {
|
|
|
|
rRespFlags :: Int32,
|
2010-01-17 03:40:22 +00:00
|
|
|
rCursorID :: Int64
|
|
|
|
-- rStartFrom :: Int32,
|
|
|
|
-- rNumReturned :: Int32
|
2010-01-13 02:08:28 +00:00
|
|
|
} deriving (Show)
|
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
getHeader :: Handle -> IO Hdr
|
2010-01-16 21:54:39 +00:00
|
|
|
getHeader h = do
|
2010-01-13 02:08:28 +00:00
|
|
|
hdrBytes <- L.hGet h 16
|
2010-01-16 21:54:39 +00:00
|
|
|
return $ flip runGet hdrBytes $ do
|
|
|
|
msgLen <- getI32
|
2010-01-17 03:40:22 +00:00
|
|
|
skip 4 -- reqID <- getI32
|
2010-01-16 21:54:39 +00:00
|
|
|
respTo <- getI32
|
|
|
|
op <- getI32
|
2010-01-17 03:40:22 +00:00
|
|
|
return $ Hdr msgLen respTo $ toOpcode op
|
2010-01-16 21:54:39 +00:00
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
getReply :: Handle -> IO Reply
|
2010-01-16 21:54:39 +00:00
|
|
|
getReply h = do
|
|
|
|
replyBytes <- L.hGet h 20
|
|
|
|
return $ flip runGet replyBytes $ do
|
|
|
|
respFlags <- getI32
|
|
|
|
cursorID <- getI64
|
2010-01-17 03:40:22 +00:00
|
|
|
skip 4 -- startFrom <- getI32
|
|
|
|
skip 4 -- numReturned <- getI32
|
2010-01-25 02:58:49 +00:00
|
|
|
return $ Reply respFlags cursorID
|
2010-01-16 21:54:39 +00:00
|
|
|
|
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Return one document or Nothing if there are no more.
|
2010-01-26 07:40:00 +00:00
|
|
|
-- Automatically closes the cursor when last document is read
|
2010-01-17 17:41:24 +00:00
|
|
|
nextDoc :: Cursor -> IO (Maybe BsonDoc)
|
2010-01-16 21:54:39 +00:00
|
|
|
nextDoc cur = do
|
|
|
|
closed <- readIORef $ curClosed cur
|
2010-01-25 02:58:49 +00:00
|
|
|
if closed
|
|
|
|
then return Nothing
|
|
|
|
else do
|
2010-01-16 21:54:39 +00:00
|
|
|
docBytes <- readIORef $ curDocBytes cur
|
|
|
|
cid <- readIORef $ curID cur
|
|
|
|
case L.length docBytes of
|
|
|
|
0 -> if cid == 0
|
|
|
|
then writeIORef (curClosed cur) True >> return Nothing
|
|
|
|
else getMore cur
|
|
|
|
_ -> do
|
|
|
|
let (doc, docBytes') = getFirstDoc docBytes
|
|
|
|
writeIORef (curDocBytes cur) docBytes'
|
|
|
|
return $ Just doc
|
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Return a lazy list of all (of the rest) of the documents in the
|
|
|
|
-- cursor. This works much like hGetContents--it will lazily read the
|
|
|
|
-- cursor data out of the database as the list is used. The cursor is
|
|
|
|
-- automatically closed when the list has been fully read.
|
|
|
|
--
|
|
|
|
-- If you manually finish the cursor before consuming off this list
|
|
|
|
-- you won't get all the original documents in the cursor.
|
|
|
|
--
|
|
|
|
-- If you don't consume to the end of the list, you must manually
|
|
|
|
-- close the cursor or you will leak the cursor, which may also leak
|
|
|
|
-- on the database side.
|
2010-01-17 17:41:24 +00:00
|
|
|
allDocs :: Cursor -> IO [BsonDoc]
|
2010-01-17 01:14:47 +00:00
|
|
|
allDocs cur = unsafeInterleaveIO $ do
|
|
|
|
doc <- nextDoc cur
|
|
|
|
case doc of
|
|
|
|
Nothing -> return []
|
2010-01-25 02:58:49 +00:00
|
|
|
Just d -> liftM (d :) (allDocs cur)
|
2010-01-17 01:14:47 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Returns a strict list of all (of the rest) of the documents in
|
|
|
|
-- the cursor. This means that all of the documents will immediately
|
|
|
|
-- be read out of the database and loaded into memory.
|
2010-01-17 17:41:24 +00:00
|
|
|
allDocs' :: Cursor -> IO [BsonDoc]
|
2010-01-17 01:38:11 +00:00
|
|
|
allDocs' cur = do
|
|
|
|
doc <- nextDoc cur
|
|
|
|
case doc of
|
|
|
|
Nothing -> return []
|
2010-01-25 02:58:49 +00:00
|
|
|
Just d -> liftM (d :) (allDocs' cur)
|
2010-01-17 01:38:11 +00:00
|
|
|
|
2010-01-17 17:41:24 +00:00
|
|
|
getFirstDoc :: L.ByteString -> (BsonDoc, L.ByteString)
|
2010-01-16 21:54:39 +00:00
|
|
|
getFirstDoc docBytes = flip runGet docBytes $ do
|
2010-01-21 04:29:27 +00:00
|
|
|
doc <- getBsonDoc
|
2010-01-16 21:54:39 +00:00
|
|
|
docBytes' <- getRemainingLazyByteString
|
|
|
|
return (doc, docBytes')
|
|
|
|
|
2010-01-17 17:41:24 +00:00
|
|
|
getMore :: Cursor -> IO (Maybe BsonDoc)
|
2010-01-16 21:54:39 +00:00
|
|
|
getMore cur = do
|
|
|
|
let h = cHandle $ curCon cur
|
|
|
|
|
|
|
|
cid <- readIORef $ curID cur
|
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putCol $ curCol cur
|
|
|
|
putI32 $ curNumToRet cur
|
|
|
|
putI64 cid
|
2010-01-25 02:58:49 +00:00
|
|
|
(reqID, msg) <- packMsg (curCon cur) OPGetMore body
|
2010-01-16 21:54:39 +00:00
|
|
|
L.hPut h msg
|
|
|
|
|
|
|
|
hdr <- getHeader h
|
2010-01-25 02:58:49 +00:00
|
|
|
assert (OPReply == hOp hdr) $ return ()
|
2010-01-16 19:45:08 +00:00
|
|
|
assert (hRespTo hdr == reqID) $ return ()
|
2010-01-16 21:54:39 +00:00
|
|
|
reply <- getReply h
|
2010-01-16 19:45:08 +00:00
|
|
|
assert (rRespFlags reply == 0) $ return ()
|
2010-01-16 21:54:39 +00:00
|
|
|
case rCursorID reply of
|
|
|
|
0 -> writeIORef (curID cur) 0
|
|
|
|
ncid -> assert (ncid == cid) $ return ()
|
|
|
|
docBytes <- (L.hGet h $ fromIntegral $ hMsgLen hdr - 16 - 20)
|
|
|
|
case L.length docBytes of
|
|
|
|
0 -> writeIORef (curClosed cur) True >> return Nothing
|
|
|
|
_ -> do
|
|
|
|
let (doc, docBytes') = getFirstDoc docBytes
|
|
|
|
writeIORef (curDocBytes cur) docBytes'
|
|
|
|
return $ Just doc
|
2010-01-11 03:23:33 +00:00
|
|
|
|
2010-01-18 19:24:14 +00:00
|
|
|
-- | Manually close a cursor -- usually not needed if you use
|
|
|
|
-- 'allDocs', 'allDocs'', or 'nextDoc'.
|
2010-01-17 00:33:32 +00:00
|
|
|
finish :: Cursor -> IO ()
|
|
|
|
finish cur = do
|
2010-01-17 00:04:15 +00:00
|
|
|
let h = cHandle $ curCon cur
|
|
|
|
cid <- readIORef $ curID cur
|
2010-02-06 16:38:53 +00:00
|
|
|
unless (cid == 0) $ do
|
2010-02-01 15:04:43 +00:00
|
|
|
let body = runPut $ do
|
|
|
|
putI32 0
|
|
|
|
putI32 1
|
|
|
|
putI64 cid
|
|
|
|
(_reqID, msg) <- packMsg (curCon cur) OPKillCursors body
|
|
|
|
L.hPut h msg
|
|
|
|
writeIORef (curClosed cur) True
|
2010-02-06 16:38:53 +00:00
|
|
|
return ()
|
2010-01-17 00:04:15 +00:00
|
|
|
|
2010-01-20 04:31:38 +00:00
|
|
|
-- | The field key to index on.
|
2010-01-21 05:19:39 +00:00
|
|
|
type Key = L8.ByteString
|
2010-01-20 04:31:38 +00:00
|
|
|
|
|
|
|
-- | Direction to index.
|
|
|
|
data Direction = Ascending
|
|
|
|
| Descending
|
|
|
|
deriving (Show, Eq)
|
|
|
|
|
|
|
|
fromDirection :: Direction -> Int
|
|
|
|
fromDirection Ascending = 1
|
2010-01-25 02:58:49 +00:00
|
|
|
fromDirection Descending = - 1
|
2010-01-20 04:31:38 +00:00
|
|
|
|
|
|
|
-- | Should this index guarantee uniqueness?
|
|
|
|
type Unique = Bool
|
|
|
|
|
2010-01-20 04:35:21 +00:00
|
|
|
-- | Create a new index on /FullCollection/ on the list of /Key/ /
|
|
|
|
-- /Direction/ pairs.
|
2010-01-20 04:31:38 +00:00
|
|
|
createIndex :: Connection -> FullCollection ->
|
2010-01-21 05:19:39 +00:00
|
|
|
[(Key, Direction)] -> Unique -> IO L8.ByteString
|
2010-01-20 04:31:38 +00:00
|
|
|
createIndex c col keys uniq = do
|
|
|
|
let (db, _col') = splitFullCol col
|
|
|
|
name = indexName keys
|
|
|
|
keysDoc = flip fmap keys $
|
|
|
|
\(k, d) -> (k, toBson $ fromDirection d :: BsonValue)
|
2010-01-21 05:19:39 +00:00
|
|
|
_ <- insert c (L.append db $ s2L ".system.indexes") $
|
2010-01-20 04:31:38 +00:00
|
|
|
toBsonDoc [("name", toBson name),
|
|
|
|
("ns", toBson col),
|
|
|
|
("key", toBson keysDoc),
|
|
|
|
("unique", toBson uniq)]
|
|
|
|
return name
|
|
|
|
|
2010-01-20 04:44:14 +00:00
|
|
|
-- | Drop the specified index on the given /FullCollection/.
|
|
|
|
dropIndex :: Connection -> FullCollection -> [(Key, Direction)] -> IO ()
|
|
|
|
dropIndex c col keys = do
|
|
|
|
let (db, col') = splitFullCol col
|
|
|
|
name = indexName keys
|
|
|
|
_ <- runCommand c db $ toBsonDoc [("deleteIndexes", toBson col'),
|
|
|
|
("index", toBson name)]
|
|
|
|
return ()
|
|
|
|
|
2010-01-20 12:20:30 +00:00
|
|
|
-- | Drop all indexes on /FullCollection/.
|
|
|
|
dropIndexes :: Connection -> FullCollection -> IO ()
|
|
|
|
dropIndexes c col = do
|
|
|
|
let (db, col') = splitFullCol col
|
|
|
|
_ <- runCommand c db $ toBsonDoc [("deleteIndexes", toBson col'),
|
|
|
|
("index", toBson "*")]
|
|
|
|
return ()
|
|
|
|
|
2010-01-21 04:33:02 +00:00
|
|
|
-- | Return a BsonDoc describing the existing indexes on /FullCollection/.
|
|
|
|
--
|
|
|
|
-- With the current server versions (1.2) this will return documents
|
|
|
|
-- such as:
|
|
|
|
--
|
|
|
|
-- > {"key": {"lastname": -1, "firstname": 1},
|
|
|
|
-- > "name": "lastname_-1_firstname_1",
|
|
|
|
-- > "ns": "mydb.people",
|
|
|
|
-- > "unique": true}
|
|
|
|
--
|
|
|
|
-- Which is a single key that indexes on @lastname@ (descending) and
|
|
|
|
-- then @firstname@ (ascending) on the collection @people@ of the
|
|
|
|
-- database @mydb@ with a uniqueness requirement.
|
|
|
|
indexInformation :: Connection -> FullCollection -> IO [BsonDoc]
|
|
|
|
indexInformation c col = do
|
|
|
|
let (db, _col') = splitFullCol col
|
|
|
|
quickFind' c (L.append db $ s2L ".system.indexes") $
|
|
|
|
toBsonDoc [("ns", toBson col)]
|
2010-01-20 12:20:30 +00:00
|
|
|
|
2010-01-21 05:19:39 +00:00
|
|
|
indexName :: [(Key, Direction)] -> L8.ByteString
|
|
|
|
indexName = L.intercalate (s2L "_") . List.map partName
|
|
|
|
where partName (k, Ascending) = L.append k $ s2L "_1"
|
|
|
|
partName (k, Descending) = L.append k $ s2L "_-1"
|
2010-01-20 04:31:38 +00:00
|
|
|
|
2010-01-17 03:40:22 +00:00
|
|
|
putCol :: Collection -> Put
|
2010-01-21 05:19:39 +00:00
|
|
|
putCol col = putLazyByteString col >> putNull
|
2010-01-11 03:23:33 +00:00
|
|
|
|
|
|
|
packMsg :: Connection -> Opcode -> L.ByteString -> IO (RequestID, L.ByteString)
|
|
|
|
packMsg c op body = do
|
2010-01-13 02:05:24 +00:00
|
|
|
reqID <- randNum c
|
2010-01-11 03:23:33 +00:00
|
|
|
let msg = runPut $ do
|
|
|
|
putI32 $ fromIntegral $ L.length body + 16
|
2010-01-13 02:05:24 +00:00
|
|
|
putI32 reqID
|
2010-01-11 03:23:33 +00:00
|
|
|
putI32 0
|
|
|
|
putI32 $ fromOpcode op
|
|
|
|
putLazyByteString body
|
2010-01-13 02:05:24 +00:00
|
|
|
return (reqID, msg)
|
2010-01-11 03:23:33 +00:00
|
|
|
|
|
|
|
randNum :: Connection -> IO Int32
|
|
|
|
randNum Connection { cRand = nsRef } = atomicModifyIORef nsRef $ \ns ->
|
|
|
|
(List.tail ns,
|
|
|
|
fromIntegral $ List.head ns)
|
2010-01-21 05:19:39 +00:00
|
|
|
|
|
|
|
s2L :: String -> L8.ByteString
|
|
|
|
s2L = L8.fromString
|
2010-01-22 01:12:06 +00:00
|
|
|
|
|
|
|
validateCollectionName :: FullCollection -> IO (Database, Collection)
|
|
|
|
validateCollectionName col = do
|
|
|
|
let (db, col') = splitFullCol col
|
2010-01-25 02:58:49 +00:00
|
|
|
when (s2L ".." `List.elem` L.group col) $
|
|
|
|
throwColInvalid $ "Collection can't contain \"..\": " ++ show col
|
|
|
|
when (c2w '$' `L.elem` col &&
|
|
|
|
not (s2L "oplog.$mail" `L.isPrefixOf` col' ||
|
|
|
|
s2L "$cmd" `L.isPrefixOf` col')) $
|
|
|
|
throwColInvalid $ "Collection can't contain '$': " ++ show col
|
|
|
|
when (L.head col == c2w '.' || L.last col == c2w '.') $
|
|
|
|
throwColInvalid $ "Collection can't start or end with '.': " ++ show col
|
2010-01-22 01:12:06 +00:00
|
|
|
return (db, col')
|