-
Notifications
You must be signed in to change notification settings - Fork 301
Expand file tree
/
Copy pathMongoDB.hs
More file actions
1477 lines (1273 loc) · 58.7 KB
/
MongoDB.hs
File metadata and controls
1477 lines (1273 loc) · 58.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{-# LANGUAGE CPP #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -Wno-orphans #-}
-- | Use persistent-mongodb the same way you would use other persistent
-- libraries and refer to the general persistent documentation.
-- There are some new MongoDB specific filters under the filters section.
-- These help extend your query into a nested document.
--
-- However, at some point you will find the normal Persistent APIs lacking.
-- and want lower level-level MongoDB access.
-- There are functions available to make working with the raw driver
-- easier: they are under the Entity conversion section.
-- You should still use the same connection pool that you are using for Persistent.
--
-- MongoDB is a schema-less database.
-- The MongoDB Persistent backend does not help perform migrations.
-- Unlike SQL backends, uniqueness constraints cannot be created for you.
-- You must place a unique index on unique fields.
module Database.Persist.MongoDB
(
-- * Entity conversion
collectionName
, docToEntityEither
, docToEntityThrow
, recordToDocument
, documentFromEntity
, toInsertDoc
, entityToInsertDoc
, updatesToDoc
, filtersToDoc
, toUniquesDoc
-- * MongoDB specific queries
-- $nested
, (->.), (~>.), (?&->.), (?&~>.), (&->.), (&~>.)
-- ** Filters
-- $filters
, nestEq, nestNe, nestGe, nestLe, nestIn, nestNotIn
, anyEq, nestAnyEq, nestBsonEq, anyBsonEq
, inList, ninList
, (=~.)
-- non-operator forms of filters
, NestedField(..)
, MongoRegexSearchable
, MongoRegex
-- ** Updates
-- $updates
, nestSet, nestInc, nestDec, nestMul, push, pull, pullAll, addToSet, eachOp
-- * Key conversion helpers
, BackendKey(..)
, keyToOid
, oidToKey
, recordTypeFromKey
, readMayObjectId
, readMayMongoKey
, keyToText
-- * PersistField conversion
, fieldName
-- * using connections
, withConnection
, withMongoPool
, withMongoDBConn
, withMongoDBPool
, createMongoDBPool
, runMongoDBPool
, runMongoDBPoolDef
, ConnectionPool
, Connection
, MongoAuth (..)
-- * Connection configuration
, MongoConf (..)
, defaultMongoConf
, defaultHost
, defaultAccessMode
, defaultPoolStripes
, defaultConnectionIdleTime
, defaultStripeConnections
, applyDockerEnv
-- ** using raw MongoDB pipes
, PipePool
, createMongoDBPipePool
, runMongoDBPipePool
-- * network type
, HostName
-- * MongoDB driver types
, Database
, DB.Action
, DB.AccessMode(..)
, DB.master
, DB.slaveOk
, (DB.=:)
, DB.ObjectId
, DB.MongoContext
, DB.PortID
-- * Database.Persist
, module Database.Persist
) where
import Control.Exception (throw, throwIO)
import Control.Monad (liftM, (>=>), forM_, unless)
import Control.Monad.IO.Class (liftIO)
import qualified Control.Monad.IO.Class as Trans
import Control.Monad.IO.Unlift (MonadUnliftIO, withRunInIO)
import Control.Monad.Trans.Reader (ask, runReaderT)
import Data.Acquire (mkAcquire)
import Data.Aeson (Value (Number), (.:), (.:?), (.!=), FromJSON(..), ToJSON(..), withText, withObject)
import Data.Aeson.Types (modifyFailure)
import Data.Bits (shiftR)
import Data.Bson (ObjectId(..))
import qualified Data.ByteString as BS
import Data.Conduit
import Data.Maybe (mapMaybe, fromJust)
import Data.Monoid (mappend)
import qualified Data.Serialize as Serialize
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.Encoding as E
import qualified Data.Traversable as Traversable
import qualified Data.Pool as Pool
import Data.Time (NominalDiffTime)
import Data.Time.Calendar (Day(..))
#ifdef HIGH_PRECISION_DATE
import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds)
#endif
import Data.Word (Word16)
import Network.Socket (HostName)
import Numeric (readHex)
import System.Environment (lookupEnv)
import Unsafe.Coerce (unsafeCoerce)
import Web.PathPieces (PathPiece(..))
import Web.HttpApiData (ToHttpApiData(..), FromHttpApiData(..), parseUrlPieceMaybe, parseUrlPieceWithPrefix, readTextData)
#ifdef DEBUG
import FileLocation (debug)
#endif
import qualified Database.MongoDB as DB
import Database.MongoDB.Query (Database)
import Database.Persist
import qualified Database.Persist.Sql as Sql
instance HasPersistBackend DB.MongoContext where
type BaseBackend DB.MongoContext = DB.MongoContext
persistBackend = id
recordTypeFromKey :: Key record -> record
recordTypeFromKey _ = error "recordTypeFromKey"
newtype NoOrphanNominalDiffTime = NoOrphanNominalDiffTime NominalDiffTime
deriving (Show, Eq, Num)
instance FromJSON NoOrphanNominalDiffTime where
parseJSON (Number x) = (return . NoOrphanNominalDiffTime . fromRational . toRational) x
parseJSON _ = fail "couldn't parse diff time"
newtype NoOrphanPortID = NoOrphanPortID DB.PortID deriving (Show, Eq)
instance FromJSON NoOrphanPortID where
parseJSON (Number x) = (return . NoOrphanPortID . DB.PortNumber . fromIntegral ) cnvX
where cnvX :: Word16
cnvX = round x
parseJSON _ = fail "couldn't parse port number"
data Connection = Connection DB.Pipe DB.Database
type ConnectionPool = Pool.Pool Connection
instance ToHttpApiData (BackendKey DB.MongoContext) where
toUrlPiece = keyToText
instance FromHttpApiData (BackendKey DB.MongoContext) where
parseUrlPiece input = do
s <- parseUrlPieceWithPrefix "o" input <!> return input
MongoKey <$> readTextData s
where
infixl 3 <!>
Left _ <!> y = y
x <!> _ = x
-- | ToPathPiece is used to convert a key to/from text
instance PathPiece (BackendKey DB.MongoContext) where
toPathPiece = toUrlPiece
fromPathPiece = parseUrlPieceMaybe
keyToText :: BackendKey DB.MongoContext -> Text
keyToText = T.pack . show . unMongoKey
-- | Convert a Text to a Key
readMayMongoKey :: Text -> Maybe (BackendKey DB.MongoContext)
readMayMongoKey = fmap MongoKey . readMayObjectId
readMayObjectId :: Text -> Maybe DB.ObjectId
readMayObjectId str =
case filter (null . snd) $ reads $ T.unpack str :: [(DB.ObjectId,String)] of
(parsed,_):[] -> Just parsed
_ -> Nothing
instance PersistField DB.ObjectId where
toPersistValue = oidToPersistValue
fromPersistValue oid@(PersistObjectId _) = Right $ persistObjectIdToDbOid oid
fromPersistValue (PersistByteString bs) = fromPersistValue (PersistObjectId bs)
fromPersistValue _ = Left $ T.pack "expected PersistObjectId"
instance Sql.PersistFieldSql DB.ObjectId where
sqlType _ = Sql.SqlOther "doesn't make much sense for MongoDB"
instance Sql.PersistFieldSql (BackendKey DB.MongoContext) where
sqlType _ = Sql.SqlOther "doesn't make much sense for MongoDB"
withConnection :: (Trans.MonadIO m)
=> MongoConf
-> (ConnectionPool -> m b) -> m b
withConnection mc =
withMongoDBPool (mgDatabase mc) (T.unpack $ mgHost mc) (mgPort mc) (mgAuth mc) (mgPoolStripes mc) (mgStripeConnections mc) (mgConnectionIdleTime mc)
withMongoDBConn :: (Trans.MonadIO m)
=> Database -> HostName -> DB.PortID
-> Maybe MongoAuth -> NominalDiffTime
-> (ConnectionPool -> m b) -> m b
withMongoDBConn dbname hostname port mauth connectionIdleTime = withMongoDBPool dbname hostname port mauth 1 1 connectionIdleTime
createPipe :: HostName -> DB.PortID -> IO DB.Pipe
createPipe hostname port = DB.connect (DB.Host hostname port)
createReplicatSet :: (DB.ReplicaSetName, [DB.Host]) -> Database -> Maybe MongoAuth -> IO Connection
createReplicatSet rsSeed dbname mAuth = do
pipe <- DB.openReplicaSet rsSeed >>= DB.primary
testAccess pipe dbname mAuth
return $ Connection pipe dbname
createRsPool :: (Trans.MonadIO m) => Database -> ReplicaSetConfig
-> Maybe MongoAuth
-> Int -- ^ pool size (number of stripes)
-> Int -- ^ stripe size (number of connections per stripe)
-> NominalDiffTime -- ^ time a connection is left idle before closing
-> m ConnectionPool
createRsPool dbname (ReplicaSetConfig rsName rsHosts) mAuth connectionPoolSize stripeSize connectionIdleTime = do
Trans.liftIO $ Pool.createPool
(createReplicatSet (rsName, rsHosts) dbname mAuth)
(\(Connection pipe _) -> DB.close pipe)
connectionPoolSize
connectionIdleTime
stripeSize
testAccess :: DB.Pipe -> Database -> Maybe MongoAuth -> IO ()
testAccess pipe dbname mAuth = do
_ <- case mAuth of
Just (MongoAuth user pass) -> DB.access pipe DB.UnconfirmedWrites dbname (DB.auth user pass)
Nothing -> return undefined
return ()
createConnection :: Database -> HostName -> DB.PortID -> Maybe MongoAuth -> IO Connection
createConnection dbname hostname port mAuth = do
pipe <- createPipe hostname port
testAccess pipe dbname mAuth
return $ Connection pipe dbname
createMongoDBPool :: (Trans.MonadIO m) => Database -> HostName -> DB.PortID
-> Maybe MongoAuth
-> Int -- ^ pool size (number of stripes)
-> Int -- ^ stripe size (number of connections per stripe)
-> NominalDiffTime -- ^ time a connection is left idle before closing
-> m ConnectionPool
createMongoDBPool dbname hostname port mAuth connectionPoolSize stripeSize connectionIdleTime = do
Trans.liftIO $ Pool.createPool
(createConnection dbname hostname port mAuth)
(\(Connection pipe _) -> DB.close pipe)
connectionPoolSize
connectionIdleTime
stripeSize
createMongoPool :: (Trans.MonadIO m) => MongoConf -> m ConnectionPool
createMongoPool c@MongoConf{mgReplicaSetConfig = Just (ReplicaSetConfig rsName hosts)} =
createRsPool
(mgDatabase c)
(ReplicaSetConfig rsName ((DB.Host (T.unpack $ mgHost c) (mgPort c)):hosts))
(mgAuth c)
(mgPoolStripes c) (mgStripeConnections c) (mgConnectionIdleTime c)
createMongoPool c@MongoConf{mgReplicaSetConfig = Nothing} =
createMongoDBPool
(mgDatabase c) (T.unpack (mgHost c)) (mgPort c)
(mgAuth c)
(mgPoolStripes c) (mgStripeConnections c) (mgConnectionIdleTime c)
type PipePool = Pool.Pool DB.Pipe
-- | A pool of plain MongoDB pipes.
-- The database parameter has not yet been applied yet.
-- This is useful for switching between databases (on the same host and port)
-- Unlike the normal pool, no authentication is available
createMongoDBPipePool :: (Trans.MonadIO m) => HostName -> DB.PortID
-> Int -- ^ pool size (number of stripes)
-> Int -- ^ stripe size (number of connections per stripe)
-> NominalDiffTime -- ^ time a connection is left idle before closing
-> m PipePool
createMongoDBPipePool hostname port connectionPoolSize stripeSize connectionIdleTime =
Trans.liftIO $ Pool.createPool
(createPipe hostname port)
DB.close
connectionPoolSize
connectionIdleTime
stripeSize
withMongoPool :: (Trans.MonadIO m) => MongoConf -> (ConnectionPool -> m b) -> m b
withMongoPool conf connectionReader = createMongoPool conf >>= connectionReader
withMongoDBPool :: (Trans.MonadIO m) =>
Database -> HostName -> DB.PortID -> Maybe MongoAuth -> Int -> Int -> NominalDiffTime -> (ConnectionPool -> m b) -> m b
withMongoDBPool dbname hostname port mauth poolStripes stripeConnections connectionIdleTime connectionReader = do
pool <- createMongoDBPool dbname hostname port mauth poolStripes stripeConnections connectionIdleTime
connectionReader pool
-- | run a pool created with 'createMongoDBPipePool'
runMongoDBPipePool :: MonadUnliftIO m => DB.AccessMode -> Database -> DB.Action m a -> PipePool -> m a
runMongoDBPipePool accessMode db action pool =
withRunInIO $ \run ->
Pool.withResource pool $ \pipe ->
run $ DB.access pipe accessMode db action
runMongoDBPool :: MonadUnliftIO m => DB.AccessMode -> DB.Action m a -> ConnectionPool -> m a
runMongoDBPool accessMode action pool =
withRunInIO $ \run ->
Pool.withResource pool $ \(Connection pipe db) ->
run $ DB.access pipe accessMode db action
-- | use default 'AccessMode'
runMongoDBPoolDef :: MonadUnliftIO m => DB.Action m a -> ConnectionPool -> m a
runMongoDBPoolDef = runMongoDBPool defaultAccessMode
queryByKey :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Key record -> DB.Query
queryByKey k = (DB.select (keyToMongoDoc k) (collectionNameFromKey k)) {DB.project = projectionFromKey k}
selectByKey :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Key record -> DB.Selection
selectByKey k = DB.select (keyToMongoDoc k) (collectionNameFromKey k)
updatesToDoc :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> [Update record] -> DB.Document
updatesToDoc upds = map updateToMongoField upds
updateToBson :: Text
-> PersistValue
-> Either PersistUpdate MongoUpdateOperation
-> DB.Field
updateToBson fname v up =
#ifdef DEBUG
debug (
#endif
opName DB.:= DB.Doc [fname DB.:= opValue]
#ifdef DEBUG
)
#endif
where
inc = "$inc"
mul = "$mul"
(opName, opValue) = case up of
Left pup -> case (pup, v) of
(Assign, PersistNull) -> ("$unset", DB.Int64 1)
(Assign,a) -> ("$set", DB.val a)
(Add, a) -> (inc, DB.val a)
(Subtract, PersistInt64 i) -> (inc, DB.Int64 (-i))
(Multiply, PersistInt64 i) -> (mul, DB.Int64 i)
(Multiply, PersistDouble d) -> (mul, DB.Float d)
(Subtract, _) -> error "expected PersistInt64 for a subtraction"
(Multiply, _) -> error "expected PersistInt64 or PersistDouble for a subtraction"
-- Obviously this could be supported for floats by multiplying with 1/x
(Divide, _) -> throw $ PersistMongoDBUnsupported "divide not supported"
(BackendSpecificUpdate bsup, _) -> throw $ PersistMongoDBError $
T.pack $ "did not expect BackendSpecificUpdate " ++ T.unpack bsup
Right mup -> case mup of
MongoEach op -> case op of
MongoPull -> ("$pullAll", DB.val v)
_ -> (opToText op, DB.Doc ["$each" DB.:= DB.val v])
MongoSimple x -> (opToText x, DB.val v)
updateToMongoField :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Update record -> DB.Field
updateToMongoField (Update field v up) = updateToBson (fieldName field) (toPersistValue v) (Left up)
updateToMongoField (BackendUpdate up) = mongoUpdateToDoc up
-- | convert a unique key into a MongoDB document
toUniquesDoc :: forall record. (PersistEntity record) => Unique record -> [DB.Field]
toUniquesDoc uniq = zipWith (DB.:=)
(map (unDBName . snd) $ persistUniqueToFieldNames uniq)
(map DB.val (persistUniqueToValues uniq))
-- | convert a PersistEntity into document fields.
-- for inserts only: nulls are ignored so they will be unset in the document.
-- 'recordToDocument' includes nulls
toInsertDoc :: forall record. (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> record -> DB.Document
toInsertDoc record = zipFilter (embeddedFields $ toEmbedEntityDef entDef)
(map toPersistValue $ toPersistFields record)
where
entDef = entityDef $ Just record
zipFilter :: [EmbedFieldDef] -> [PersistValue] -> DB.Document
zipFilter [] _ = []
zipFilter _ [] = []
zipFilter (fd:efields) (pv:pvs) =
if isNull pv then recur else
(fieldToLabel fd DB.:= embeddedVal (emFieldEmbed fd) pv):recur
where
recur = zipFilter efields pvs
isNull PersistNull = True
isNull (PersistMap m) = null m
isNull (PersistList l) = null l
isNull _ = False
-- make sure to removed nulls from embedded entities also
embeddedVal :: Maybe EmbedEntityDef -> PersistValue -> DB.Value
embeddedVal (Just emDef) (PersistMap m) = DB.Doc $
zipFilter (embeddedFields emDef) $ map snd m
embeddedVal je@(Just _) (PersistList l) = DB.Array $ map (embeddedVal je) l
embeddedVal _ pv = DB.val pv
entityToInsertDoc :: forall record. (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Entity record -> DB.Document
entityToInsertDoc (Entity key record) = keyToMongoDoc key ++ toInsertDoc record
collectionName :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> record -> Text
collectionName = unDBName . entityDB . entityDef . Just
-- | convert a PersistEntity into document fields.
-- unlike 'toInsertDoc', nulls are included.
recordToDocument :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> record -> DB.Document
recordToDocument record = zipToDoc (map fieldDB $ entityFields entity) (toPersistFields record)
where
entity = entityDef $ Just record
documentFromEntity :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Entity record -> DB.Document
documentFromEntity (Entity key record) =
keyToMongoDoc key ++ recordToDocument record
zipToDoc :: PersistField a => [DBName] -> [a] -> [DB.Field]
zipToDoc [] _ = []
zipToDoc _ [] = []
zipToDoc (e:efields) (p:pfields) =
let pv = toPersistValue p
in (unDBName e DB.:= DB.val pv):zipToDoc efields pfields
fieldToLabel :: EmbedFieldDef -> Text
fieldToLabel = unDBName . emFieldDB
keyFrom_idEx :: (Trans.MonadIO m, PersistEntity record) => DB.Value -> m (Key record)
keyFrom_idEx idVal = case keyFrom_id idVal of
Right k -> return k
Left err -> liftIO $ throwIO $ PersistMongoDBError $ "could not convert key: "
`Data.Monoid.mappend` T.pack (show idVal)
`mappend` err
keyFrom_id :: (PersistEntity record) => DB.Value -> Either Text (Key record)
keyFrom_id idVal = case cast idVal of
(PersistMap m) -> keyFromValues $ map snd m
pv -> keyFromValues [pv]
-- | It would make sense to define the instance for ObjectId
-- and then use newtype deriving
-- however, that would create an orphan instance
instance ToJSON (BackendKey DB.MongoContext) where
toJSON (MongoKey (Oid x y)) = toJSON $ DB.showHexLen 8 x $ DB.showHexLen 16 y ""
instance FromJSON (BackendKey DB.MongoContext) where
parseJSON = withText "MongoKey" $ \t ->
maybe
(fail "Invalid base64")
(return . MongoKey . persistObjectIdToDbOid . PersistObjectId)
$ fmap (i2bs (8 * 12) . fst) $ headMay $ readHex $ T.unpack t
where
-- should these be exported from Types/Base.hs ?
headMay [] = Nothing
headMay (x:_) = Just x
-- taken from crypto-api
-- |@i2bs bitLen i@ converts @i@ to a 'ByteString' of @bitLen@ bits (must be a multiple of 8).
i2bs :: Int -> Integer -> BS.ByteString
i2bs l i = BS.unfoldr (\l' -> if l' < 0 then Nothing else Just (fromIntegral (i `shiftR` l'), l' - 8)) (l-8)
{-# INLINE i2bs #-}
-- | older versions versions of haddock (like that on hackage) do not show that this defines
-- @BackendKey DB.MongoContext = MongoKey { unMongoKey :: DB.ObjectId }@
instance PersistCore DB.MongoContext where
newtype BackendKey DB.MongoContext = MongoKey { unMongoKey :: DB.ObjectId }
deriving (Show, Read, Eq, Ord, PersistField)
instance PersistStoreWrite DB.MongoContext where
insert record = DB.insert (collectionName record) (toInsertDoc record)
>>= keyFrom_idEx
insertMany [] = return []
insertMany records@(r:_) = mapM keyFrom_idEx =<<
DB.insertMany (collectionName r) (map toInsertDoc records)
insertEntityMany [] = return ()
insertEntityMany ents@(Entity _ r : _) =
DB.insertMany_ (collectionName r) (map entityToInsertDoc ents)
insertKey k record = DB.insert_ (collectionName record) $
entityToInsertDoc (Entity k record)
repsert k record = DB.save (collectionName record) $
documentFromEntity (Entity k record)
replace k record = do
DB.replace (selectByKey k) (recordToDocument record)
return ()
delete k =
DB.deleteOne DB.Select {
DB.coll = collectionNameFromKey k
, DB.selector = keyToMongoDoc k
}
update _ [] = return ()
update key upds =
DB.modify
(DB.Select (keyToMongoDoc key) (collectionNameFromKey key))
$ updatesToDoc upds
updateGet key upds = do
context <- ask
result <- liftIO $ runReaderT (DB.findAndModify (queryByKey key) (updatesToDoc upds)) context
either err instantiate result
where
instantiate doc = do
Entity _ rec <- fromPersistValuesThrow t doc
return rec
err msg = Trans.liftIO $ throwIO $ KeyNotFound $ show key ++ msg
t = entityDefFromKey key
instance PersistStoreRead DB.MongoContext where
get k = do
d <- DB.findOne (queryByKey k)
case d of
Nothing -> return Nothing
Just doc -> do
Entity _ ent <- fromPersistValuesThrow t doc
return $ Just ent
where
t = entityDefFromKey k
instance PersistUniqueRead DB.MongoContext where
getBy uniq = do
mdoc <- DB.findOne $
(DB.select (toUniquesDoc uniq) (collectionName rec)) {DB.project = projectionFromRecord rec}
case mdoc of
Nothing -> return Nothing
Just doc -> liftM Just $ fromPersistValuesThrow t doc
where
t = entityDef $ Just rec
rec = dummyFromUnique uniq
instance PersistUniqueWrite DB.MongoContext where
deleteBy uniq =
DB.delete DB.Select {
DB.coll = collectionName $ dummyFromUnique uniq
, DB.selector = toUniquesDoc uniq
}
upsert newRecord upds = do
uniq <- onlyUnique newRecord
upsertBy uniq newRecord upds
-- - let uniqKeys = map DB.label uniqueDoc
-- - let insDoc = DB.exclude uniqKeys $ toInsertDoc newRecord
-- let selection = DB.select uniqueDoc $ collectionName newRecord
-- - if null upds
-- - then DB.upsert selection ["$set" DB.=: insDoc]
-- - else do
-- - DB.upsert selection ["$setOnInsert" DB.=: insDoc]
-- - DB.modify selection $ updatesToDoc upds
-- - -- because findAndModify $setOnInsert is broken we do a separate get now
upsertBy uniq newRecord upds = do
let uniqueDoc = toUniquesDoc uniq :: [DB.Field]
let uniqKeys = map DB.label uniqueDoc :: [DB.Label]
let insDoc = DB.exclude uniqKeys $ toInsertDoc newRecord :: DB.Document
let selection = DB.select uniqueDoc $ collectionName newRecord :: DB.Selection
mdoc <- getBy uniq
case mdoc of
Nothing -> unless (null upds) (DB.upsert selection ["$setOnInsert" DB.=: insDoc])
Just _ -> unless (null upds) (DB.modify selection $ DB.exclude uniqKeys $ updatesToDoc upds)
newMdoc <- getBy uniq
case newMdoc of
Nothing -> err "possible race condition: getBy found Nothing"
Just doc -> return doc
where
err = Trans.liftIO . throwIO . UpsertError
{-
-- cannot use findAndModify
-- because $setOnInsert is crippled
-- https://jira.mongodb.org/browse/SERVER-2643
result <- DB.findAndModifyOpts
selection
(DB.defFamUpdateOpts ("$setOnInsert" DB.=: insDoc : ["$set" DB.=: insDoc]))
{ DB.famUpsert = True }
either err instantiate result
where
-- this is only possible when new is False
instantiate Nothing = error "upsert: impossible null"
instantiate (Just doc) =
fromPersistValuesThrow (entityDef $ Just newRecord) doc
-}
-- | It would make more sense to call this _id, but GHC treats leading underscore in special ways
id_ :: T.Text
id_ = "_id"
-- _id is always the primary key in MongoDB
-- but _id can contain any unique value
keyToMongoDoc :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Key record -> DB.Document
keyToMongoDoc k = case entityPrimary $ entityDefFromKey k of
Nothing -> zipToDoc [DBName id_] values
Just pdef -> [id_ DB.=: zipToDoc (primaryNames pdef) values]
where
primaryNames = map fieldDB . compositeFields
values = keyToValues k
entityDefFromKey :: PersistEntity record => Key record -> EntityDef
entityDefFromKey = entityDef . Just . recordTypeFromKey
collectionNameFromKey :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext)
=> Key record -> Text
collectionNameFromKey = collectionName . recordTypeFromKey
projectionFromEntityDef :: EntityDef -> DB.Projector
projectionFromEntityDef eDef =
map toField (entityFields eDef)
where
toField :: FieldDef -> DB.Field
toField fDef = (unDBName (fieldDB fDef)) DB.=: (1 :: Int)
projectionFromKey :: PersistEntity record => Key record -> DB.Projector
projectionFromKey = projectionFromEntityDef . entityDefFromKey
projectionFromRecord :: PersistEntity record => record -> DB.Projector
projectionFromRecord = projectionFromEntityDef . entityDef . Just
instance PersistQueryWrite DB.MongoContext where
updateWhere _ [] = return ()
updateWhere filts upds =
DB.modify DB.Select {
DB.coll = collectionName $ dummyFromFilts filts
, DB.selector = filtersToDoc filts
} $ updatesToDoc upds
deleteWhere filts = do
DB.delete DB.Select {
DB.coll = collectionName $ dummyFromFilts filts
, DB.selector = filtersToDoc filts
}
instance PersistQueryRead DB.MongoContext where
count filts = do
i <- DB.count query
return $ fromIntegral i
where
query = DB.select (filtersToDoc filts) $
collectionName $ dummyFromFilts filts
-- | uses cursor option NoCursorTimeout
-- If there is no sorting, it will turn the $snapshot option on
-- and explicitly closes the cursor when done
selectSourceRes filts opts = do
context <- ask
return (pullCursor context `fmap` mkAcquire (open context) (close context))
where
close :: DB.MongoContext -> DB.Cursor -> IO ()
close context cursor = runReaderT (DB.closeCursor cursor) context
open :: DB.MongoContext -> IO DB.Cursor
open = runReaderT (DB.find (makeQuery filts opts)
-- it is an error to apply $snapshot when sorting
{ DB.snapshot = noSort
, DB.options = [DB.NoCursorTimeout]
})
pullCursor context cursor = do
mdoc <- liftIO $ runReaderT (DB.nextBatch cursor) context
case mdoc of
[] -> return ()
docs -> do
forM_ docs $ fromPersistValuesThrow t >=> yield
pullCursor context cursor
t = entityDef $ Just $ dummyFromFilts filts
(_, _, orders) = limitOffsetOrder opts
noSort = null orders
selectFirst filts opts = DB.findOne (makeQuery filts opts)
>>= Traversable.mapM (fromPersistValuesThrow t)
where
t = entityDef $ Just $ dummyFromFilts filts
selectKeysRes filts opts = do
context <- ask
let make = do
cursor <- liftIO $ flip runReaderT context $ DB.find $ (makeQuery filts opts) {
DB.project = [id_ DB.=: (1 :: Int)]
}
pullCursor context cursor
return $ return make
where
pullCursor context cursor = do
mdoc <- liftIO $ runReaderT (DB.next cursor) context
case mdoc of
Nothing -> return ()
Just [_id DB.:= idVal] -> do
k <- liftIO $ keyFrom_idEx idVal
yield k
pullCursor context cursor
Just y -> liftIO $ throwIO $ PersistMarshalError $ T.pack $ "Unexpected in selectKeys: " ++ show y
orderClause :: PersistEntity val => SelectOpt val -> DB.Field
orderClause o = case o of
Asc f -> fieldName f DB.=: ( 1 :: Int)
Desc f -> fieldName f DB.=: (-1 :: Int)
_ -> error "orderClause: expected Asc or Desc"
makeQuery :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext) => [Filter record] -> [SelectOpt record] -> DB.Query
makeQuery filts opts =
(DB.select (filtersToDoc filts) (collectionName $ dummyFromFilts filts)) {
DB.limit = fromIntegral limit
, DB.skip = fromIntegral offset
, DB.sort = orders
, DB.project = projectionFromRecord (dummyFromFilts filts)
}
where
(limit, offset, orders') = limitOffsetOrder opts
orders = map orderClause orders'
filtersToDoc :: (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext) => [Filter record] -> DB.Document
filtersToDoc filts =
#ifdef DEBUG
debug $
#endif
if null filts then [] else multiFilter AndDollar filts
filterToDocument :: (PersistEntity val, PersistEntityBackend val ~ DB.MongoContext) => Filter val -> DB.Document
filterToDocument f =
case f of
Filter field v filt -> [filterToBSON (fieldName field) v filt]
BackendFilter mf -> mongoFilterToDoc mf
-- The empty filter case should never occur when the user uses ||.
-- An empty filter list will throw an exception in multiFilter
--
-- The alternative would be to create a query which always returns true
-- However, I don't think an end user ever wants that.
FilterOr fs -> multiFilter OrDollar fs
-- Ignore an empty filter list instead of throwing an exception.
-- \$and is necessary in only a few cases, but it makes query construction easier
FilterAnd [] -> []
FilterAnd fs -> multiFilter AndDollar fs
data MultiFilter = OrDollar | AndDollar deriving Show
toMultiOp :: MultiFilter -> Text
toMultiOp OrDollar = orDollar
toMultiOp AndDollar = andDollar
multiFilter :: forall record. (PersistEntity record, PersistEntityBackend record ~ DB.MongoContext) => MultiFilter -> [Filter record] -> [DB.Field]
multiFilter _ [] = throw $ PersistMongoDBError "An empty list of filters was given"
multiFilter multi filters =
case (multi, filter (not . null) (map filterToDocument filters)) of
-- a $or must have at least 2 items
(OrDollar, []) -> orError
(AndDollar, []) -> []
(OrDollar, _:[]) -> orError
(AndDollar, doc:[]) -> doc
(_, doc) -> [toMultiOp multi DB.:= DB.Array (map DB.Doc doc)]
where
orError = throw $ PersistMongoDBError $
"An empty list of filters was given to one side of ||."
existsDollar, orDollar, andDollar :: Text
existsDollar = "$exists"
orDollar = "$or"
andDollar = "$and"
filterToBSON :: forall a. ( PersistField a)
=> Text
-> FilterValue a
-> PersistFilter
-> DB.Field
filterToBSON fname v filt = case filt of
Eq -> nullEq
Ne -> nullNeq
_ -> notEquality
where
dbv = toValue v
notEquality = fname DB.=: [showFilter filt DB.:= dbv]
nullEq = case dbv of
DB.Null -> orDollar DB.=:
[ [fname DB.:= DB.Null]
, [fname DB.:= DB.Doc [existsDollar DB.:= DB.Bool False]]
]
_ -> fname DB.:= dbv
nullNeq = case dbv of
DB.Null ->
fname DB.:= DB.Doc
[ showFilter Ne DB.:= DB.Null
, existsDollar DB.:= DB.Bool True
]
_ -> notEquality
showFilter Ne = "$ne"
showFilter Gt = "$gt"
showFilter Lt = "$lt"
showFilter Ge = "$gte"
showFilter Le = "$lte"
showFilter In = "$in"
showFilter NotIn = "$nin"
showFilter Eq = error "EQ filter not expected"
showFilter (BackendSpecificFilter bsf) = throw $ PersistMongoDBError $ T.pack $ "did not expect BackendSpecificFilter " ++ T.unpack bsf
mongoFilterToBSON :: forall typ. PersistField typ
=> Text
-> MongoFilterOperator typ
-> DB.Document
mongoFilterToBSON fname filt = case filt of
(PersistFilterOperator v op) -> [filterToBSON fname v op]
(MongoFilterOperator bval) -> [fname DB.:= bval]
mongoUpdateToBson :: forall typ. PersistField typ
=> Text
-> UpdateValueOp typ
-> DB.Field
mongoUpdateToBson fname upd = case upd of
UpdateValueOp (Left v) op -> updateToBson fname (toPersistValue v) op
UpdateValueOp (Right v) op -> updateToBson fname (PersistList $ map toPersistValue v) op
mongoUpdateToDoc :: PersistEntity record => MongoUpdate record -> DB.Field
mongoUpdateToDoc (NestedUpdate field op) = mongoUpdateToBson (nestedFieldName field) op
mongoUpdateToDoc (ArrayUpdate field op) = mongoUpdateToBson (fieldName field) op
mongoFilterToDoc :: PersistEntity record => MongoFilter record -> DB.Document
mongoFilterToDoc (NestedFilter field op) = mongoFilterToBSON (nestedFieldName field) op
mongoFilterToDoc (ArrayFilter field op) = mongoFilterToBSON (fieldName field) op
mongoFilterToDoc (NestedArrayFilter field op) = mongoFilterToBSON (nestedFieldName field) op
mongoFilterToDoc (RegExpFilter fn (reg, opts)) = [ fieldName fn DB.:= DB.RegEx (DB.Regex reg opts)]
nestedFieldName :: forall record typ. PersistEntity record => NestedField record typ -> Text
nestedFieldName = T.intercalate "." . nesFldName
where
nesFldName :: forall r1 r2. (PersistEntity r1) => NestedField r1 r2 -> [DB.Label]
nesFldName (nf1 `LastEmbFld` nf2) = [fieldName nf1, fieldName nf2]
nesFldName ( f1 `MidEmbFld` f2) = fieldName f1 : nesFldName f2
nesFldName ( f1 `MidNestFlds` f2) = fieldName f1 : nesFldName f2
nesFldName ( f1 `MidNestFldsNullable` f2) = fieldName f1 : nesFldName f2
nesFldName (nf1 `LastNestFld` nf2) = [fieldName nf1, fieldName nf2]
nesFldName (nf1 `LastNestFldNullable` nf2) = [fieldName nf1, fieldName nf2]
toValue :: forall a. PersistField a => FilterValue a -> DB.Value
toValue val =
case val of
FilterValue v -> DB.val $ toPersistValue v
UnsafeValue v -> DB.val $ toPersistValue v
FilterValues vs -> DB.val $ map toPersistValue vs
fieldName :: forall record typ. (PersistEntity record) => EntityField record typ -> DB.Label
fieldName f | fieldHaskell fd == HaskellName "Id" = id_
| otherwise = unDBName $ fieldDB $ fd
where
fd = persistFieldDef f
docToEntityEither :: forall record. (PersistEntity record) => DB.Document -> Either T.Text (Entity record)
docToEntityEither doc = entity
where
entDef = entityDef $ Just (getType entity)
entity = eitherFromPersistValues entDef doc
getType :: Either err (Entity ent) -> ent
getType = error "docToEntityEither/getType: never here"
docToEntityThrow :: forall m record. (Trans.MonadIO m, PersistEntity record, PersistEntityBackend record ~ DB.MongoContext) => DB.Document -> m (Entity record)
docToEntityThrow doc =
case docToEntityEither doc of
Left s -> Trans.liftIO . throwIO $ PersistMarshalError $ s
Right entity -> return entity
fromPersistValuesThrow :: (Trans.MonadIO m, PersistEntity record, PersistEntityBackend record ~ DB.MongoContext) => EntityDef -> [DB.Field] -> m (Entity record)
fromPersistValuesThrow entDef doc =
case eitherFromPersistValues entDef doc of
Left t -> Trans.liftIO . throwIO $ PersistMarshalError $
unHaskellName (entityHaskell entDef) `mappend` ": " `mappend` t
Right entity -> return entity
mapLeft :: (a -> c) -> Either a b -> Either c b
mapLeft _ (Right r) = Right r
mapLeft f (Left l) = Left (f l)
eitherFromPersistValues :: (PersistEntity record) => EntityDef -> [DB.Field] -> Either T.Text (Entity record)
eitherFromPersistValues entDef doc = case mKey of
Nothing -> addDetail $ Left $ "could not find _id field: "
Just kpv -> do
body <- addDetail (fromPersistValues (map snd $ orderPersistValues (toEmbedEntityDef entDef) castDoc))
key <- keyFromValues [kpv]
return $ Entity key body
where
addDetail :: Either Text a -> Either Text a
addDetail = mapLeft (\msg -> msg `mappend` " for doc: " `mappend` T.pack (show doc))
castDoc = assocListFromDoc doc
-- normally _id is the first field
mKey = lookup id_ castDoc
-- | unlike many SQL databases, MongoDB makes no guarantee of the ordering
-- of the fields returned in the document.
-- Ordering might be maintained if persistent were the only user of the db,
-- but other tools may be using MongoDB.
--
-- Persistent creates a Haskell record from a list of PersistValue
-- But most importantly it puts all PersistValues in the proper order
orderPersistValues :: EmbedEntityDef -> [(Text, PersistValue)] -> [(Text, PersistValue)]
orderPersistValues entDef castDoc = reorder
where
castColumns = map nameAndEmbed (embeddedFields entDef)
nameAndEmbed fdef = (fieldToLabel fdef, emFieldEmbed fdef)
-- TODO: the below reasoning should be re-thought now that we are no longer inserting null: searching for a null column will look at every returned field before giving up
-- Also, we are now doing the _id lookup at the start.
--
-- we have an alist of fields that need to be the same order as entityColumns
--
-- this naive lookup is O(n^2)
-- reorder = map (fromJust . (flip Prelude.lookup $ castDoc)) castColumns
--
-- this is O(n * log(n))
-- reorder = map (\c -> (M.fromList castDoc) M.! c) castColumns
--
-- and finally, this is O(n * log(n))
-- * do an alist lookup for each column
-- * but once we found an item in the alist use a new alist without that item for future lookups
-- * so for the last query there is only one item left
--
reorder :: [(Text, PersistValue)]
reorder = match castColumns castDoc []
where
match :: [(Text, Maybe EmbedEntityDef)]
-> [(Text, PersistValue)]
-> [(Text, PersistValue)]
-> [(Text, PersistValue)]
-- when there are no more Persistent castColumns we are done
--
-- allow extra mongoDB fields that persistent does not know about
-- another application may use fields we don't care about
-- our own application may set extra fields with the raw driver
match [] _ values = values
match (column:columns) fields values =
let (found, unused) = matchOne fields []
in match columns unused $ values ++
[(fst column, nestedOrder (snd column) (snd found))]
where
nestedOrder (Just em) (PersistMap m) =
PersistMap $ orderPersistValues em m
nestedOrder (Just em) (PersistList l) =
PersistList $ map (nestedOrder (Just em)) l
-- implied: nestedOrder Nothing found = found
nestedOrder _ found = found
matchOne (field:fs) tried =
if fst column == fst field
-- snd drops the name now that it has been used to make the match
-- persistent will add the field name later
then (field, tried ++ fs)
else matchOne fs (field:tried)
-- if field is not found, assume it was a Nothing
--