Switch to unliftio and lts-11.1 (#83)

- Also add .stylish-haskell.yaml
- Re-implements `limitedMapConcurrently` using STM
- Dependencies clean up in cabal file
- Fix shadow warnings and other build warnings
This commit is contained in:
Aditya Manthramurthy 2018-03-26 14:04:25 -07:00 committed by GitHub
parent 38b67b4dab
commit 51b3e51d46
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 427 additions and 165 deletions

233
.stylish-haskell.yaml Normal file
View File

@ -0,0 +1,233 @@
# stylish-haskell configuration file
# ==================================
# The stylish-haskell tool is mainly configured by specifying steps. These steps
# are a list, so they have an order, and one specific step may appear more than
# once (if needed). Each file is processed by these steps in the given order.
steps:
# Convert some ASCII sequences to their Unicode equivalents. This is disabled
# by default.
# - unicode_syntax:
# # In order to make this work, we also need to insert the UnicodeSyntax
# # language pragma. If this flag is set to true, we insert it when it's
# # not already present. You may want to disable it if you configure
# # language extensions using some other method than pragmas. Default:
# # true.
# add_language_pragma: true
# Align the right hand side of some elements. This is quite conservative
# and only applies to statements where each element occupies a single
# line.
- simple_align:
cases: true
top_level_patterns: true
records: true
# Import cleanup
- imports:
# There are different ways we can align names and lists.
#
# - global: Align the import names and import list throughout the entire
# file.
#
# - file: Like global, but don't add padding when there are no qualified
# imports in the file.
#
# - group: Only align the imports per group (a group is formed by adjacent
# import lines).
#
# - none: Do not perform any alignment.
#
# Default: global.
align: global
# The following options affect only import list alignment.
#
# List align has following options:
#
# - after_alias: Import list is aligned with end of import including
# 'as' and 'hiding' keywords.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - with_alias: Import list is aligned with start of alias or hiding.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - new_line: Import list starts always on new line.
#
# > import qualified Data.List as List
# > (concat, foldl, foldr, head, init, last, length)
#
# Default: after_alias
list_align: after_alias
# Right-pad the module names to align imports in a group:
#
# - true: a little more readable
#
# > import qualified Data.List as List (concat, foldl, foldr,
# > init, last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# - false: diff-safe
#
# > import qualified Data.List as List (concat, foldl, foldr, init,
# > last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# Default: true
pad_module_names: true
# Long list align style takes effect when import is too long. This is
# determined by 'columns' setting.
#
# - inline: This option will put as much specs on same line as possible.
#
# - new_line: Import list will start on new line.
#
# - new_line_multiline: Import list will start on new line when it's
# short enough to fit to single line. Otherwise it'll be multiline.
#
# - multiline: One line per import list entry.
# Type with constructor list acts like single import.
#
# > import qualified Data.Map as M
# > ( empty
# > , singleton
# > , ...
# > , delete
# > )
#
# Default: inline
long_list_align: inline
# Align empty list (importing instances)
#
# Empty list align has following options
#
# - inherit: inherit list_align setting
#
# - right_after: () is right after the module name:
#
# > import Vector.Instances ()
#
# Default: inherit
empty_list_align: inherit
# List padding determines indentation of import list on lines after import.
# This option affects 'long_list_align'.
#
# - <integer>: constant value
#
# - module_name: align under start of module name.
# Useful for 'file' and 'group' align settings.
list_padding: 4
# Separate lists option affects formatting of import list for type
# or class. The only difference is single space between type and list
# of constructors, selectors and class functions.
#
# - true: There is single space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable (fold, foldl, foldMap))
#
# - false: There is no space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable(fold, foldl, foldMap))
#
# Default: true
separate_lists: true
# Space surround option affects formatting of import lists on a single
# line. The only difference is single space after the initial
# parenthesis and a single space before the terminal parenthesis.
#
# - true: There is single space associated with the enclosing
# parenthesis.
#
# > import Data.Foo ( foo )
#
# - false: There is no space associated with the enclosing parenthesis
#
# > import Data.Foo (foo)
#
# Default: false
space_surround: false
# Language pragmas
- language_pragmas:
# We can generate different styles of language pragma lists.
#
# - vertical: Vertical-spaced language pragmas, one per line.
#
# - compact: A more compact style.
#
# - compact_line: Similar to compact, but wrap each line with
# `{-#LANGUAGE #-}'.
#
# Default: vertical.
style: vertical
# Align affects alignment of closing pragma brackets.
#
# - true: Brackets are aligned in same column.
#
# - false: Brackets are not aligned together. There is only one space
# between actual import and closing bracket.
#
# Default: true
align: true
# stylish-haskell can detect redundancy of some language pragmas. If this
# is set to true, it will remove those redundant pragmas. Default: true.
remove_redundant: true
# Replace tabs by spaces. This is disabled by default.
# - tabs:
# # Number of spaces to use for each tab. Default: 8, as specified by the
# # Haskell report.
# spaces: 8
# Remove trailing whitespace
- trailing_whitespace: {}
# A common setting is the number of columns (parts of) code will be wrapped
# to. Different steps take this into account. Default: 80.
columns: 80
# By default, line endings are converted according to the OS. You can override
# preferred format here.
#
# - native: Native newline format. CRLF on Windows, LF on other OSes.
#
# - lf: Convert to LF ("\n").
#
# - crlf: Convert to CRLF ("\r\n").
#
# Default: native.
newline: native
# Sometimes, language extensions are specified in a cabal file or from the
# command line instead of using language pragmas in the file. stylish-haskell
# needs to be aware of these, so it can parse the file correctly.
#
# No language extensions are enabled by default.
language_extensions:
- BangPatterns
- FlexibleContexts
- FlexibleInstances
- MultiParamTypeClasses
- MultiWayIf
- NoImplicitPrelude
- OverloadedStrings
- RankNTypes
- ScopedTypeVariables
- TupleSections
- TypeFamilies

View File

@ -46,12 +46,10 @@ library
build-depends: base >= 4.7 && < 5
, protolude >= 0.1.6
, aeson
, async
, base64-bytestring
, bytestring
, case-insensitive
, conduit
, conduit-combinators
, conduit-extra
, containers
, cryptonite
@ -62,22 +60,19 @@ library
, http-client
, http-conduit
, http-types
, lifted-async
, lifted-base
, memory
, monad-control
, resourcet
, text
, text-format
, time
, transformers
, transformers-base
, vector
, unliftio
, unliftio-core
, xml-conduit
default-language: Haskell2010
default-extensions: FlexibleContexts
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, BangPatterns
, MultiParamTypeClasses
, MultiWayIf
, NoImplicitPrelude
@ -100,12 +95,12 @@ test-suite minio-hs-live-server-test
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, OverloadedStrings
, NoImplicitPrelude
, MultiParamTypeClasses
, MultiWayIf
, ScopedTypeVariables
, NoImplicitPrelude
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TupleSections
, TypeFamilies
other-modules: Lib.Prelude
@ -133,12 +128,10 @@ test-suite minio-hs-live-server-test
, minio-hs
, protolude >= 0.1.6
, aeson
, async
, base64-bytestring
, bytestring
, case-insensitive
, conduit
, conduit-combinators
, conduit-extra
, containers
, cryptonite
@ -150,10 +143,7 @@ test-suite minio-hs-live-server-test
, http-client
, http-conduit
, http-types
, lifted-async
, lifted-base
, memory
, monad-control
, QuickCheck
, resourcet
, tasty
@ -165,8 +155,8 @@ test-suite minio-hs-live-server-test
, text-format
, time
, transformers
, transformers-base
, vector
, unliftio
, unliftio-core
, xml-conduit
if !flag(live-test)
buildable: False
@ -179,12 +169,10 @@ test-suite minio-hs-test
, minio-hs
, protolude >= 0.1.6
, aeson
, async
, base64-bytestring
, bytestring
, case-insensitive
, conduit
, conduit-combinators
, conduit-extra
, containers
, cryptonite
@ -192,14 +180,10 @@ test-suite minio-hs-test
, data-default
, directory
, exceptions
, filepath
, http-client
, http-conduit
, http-types
, lifted-async
, lifted-base
, memory
, monad-control
, QuickCheck
, resourcet
, tasty
@ -211,20 +195,20 @@ test-suite minio-hs-test
, text-format
, time
, transformers
, transformers-base
, vector
, unliftio
, unliftio-core
, xml-conduit
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N
default-language: Haskell2010
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, OverloadedStrings
, NoImplicitPrelude
, MultiParamTypeClasses
, MultiWayIf
, ScopedTypeVariables
, NoImplicitPrelude
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TupleSections
, TypeFamilies
other-modules: Lib.Prelude

View File

@ -179,7 +179,6 @@ import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC
import Data.Default (def)
import qualified Data.Map as Map
import Lib.Prelude
@ -189,6 +188,7 @@ import Network.Minio.Errors
import Network.Minio.ListOps
import Network.Minio.PutObject
import Network.Minio.S3API
import Network.Minio.Utils
-- | Lists buckets.
listBuckets :: Minio [BucketInfo]
@ -200,7 +200,7 @@ listBuckets = getService
fGetObject :: Bucket -> Object -> FilePath -> GetObjectOptions -> Minio ()
fGetObject bucket object fp opts = do
src <- getObject bucket object opts
src C.$$+- CB.sinkFileCautious fp
C.connect src $ CB.sinkFileCautious fp
-- | Upload the given file to the given object.
fPutObject :: Bucket -> Object -> FilePath
@ -212,7 +212,7 @@ fPutObject bucket object f opts =
-- known; this helps the library select optimal part sizes to perform
-- a multipart upload. If not specified, it is assumed that the object
-- can be potentially 5TiB and selects multipart sizes appropriately.
putObject :: Bucket -> Object -> C.Producer Minio ByteString
putObject :: Bucket -> Object -> C.ConduitM () ByteString Minio ()
-> Maybe Int64 -> PutObjectOptions -> Minio ()
putObject bucket object src sizeMay opts =
void $ putObjectInternal bucket object opts $ ODStream src sizeMay
@ -223,15 +223,18 @@ putObject bucket object src sizeMay opts =
-- copy operation if the new object is to be greater than 5GiB in
-- size.
copyObject :: DestinationInfo -> SourceInfo -> Minio ()
copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo) (dstObject dstInfo) srcInfo
copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo)
(dstObject dstInfo) srcInfo
-- | Remove an object from the object store.
removeObject :: Bucket -> Object -> Minio ()
removeObject = deleteObject
-- | Get an object from the object store as a resumable source (conduit).
getObject :: Bucket -> Object -> GetObjectOptions -> Minio (C.ResumableSource Minio ByteString)
getObject bucket object opts = snd <$> getObject' bucket object [] (gooToHeaders opts)
getObject :: Bucket -> Object -> GetObjectOptions
-> Minio (C.ConduitM () ByteString Minio ())
getObject bucket object opts = snd <$> getObject' bucket object []
(gooToHeaders opts)
-- | Get an object's metadata from the object store.
statObject :: Bucket -> Object -> Minio ObjectInfo
@ -245,21 +248,21 @@ makeBucket :: Bucket -> Maybe Region -> Minio ()
makeBucket bucket regionMay = do
region <- maybe (asks $ connectRegion . mcConnInfo) return regionMay
putBucket bucket region
modify (Map.insert bucket region)
addToRegionCache bucket region
-- | Removes a bucket from the object store.
removeBucket :: Bucket -> Minio ()
removeBucket bucket = do
deleteBucket bucket
modify (Map.delete bucket)
deleteFromRegionCache bucket
-- | Query the object store if a given bucket is present.
bucketExists :: Bucket -> Minio Bool
bucketExists = headBucket
-- | Removes an ongoing multipart upload of an object.
removeIncompleteUpload :: Bucket -> Object -> Minio ()
removeIncompleteUpload bucket object = do
uploads <- listIncompleteUploads bucket (Just object) False C.$$ CC.sinkList
uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads)

View File

@ -15,8 +15,7 @@
--
module Network.Minio.API
(
connect
( connect
, RequestInfo(..)
, runMinio
, executeRequest
@ -86,10 +85,10 @@ getLocation bucket = do
discoverRegion :: RequestInfo -> Minio (Maybe Region)
discoverRegion ri = runMaybeT $ do
bucket <- MaybeT $ return $ riBucket ri
regionMay <- gets (Map.lookup bucket)
regionMay <- lift $ lookupRegionCache bucket
maybe (do
l <- lift $ getLocation bucket
modify $ Map.insert bucket l
lift $ addToRegionCache bucket l
return l
) return regionMay
@ -161,7 +160,7 @@ executeRequest ri = do
mkStreamRequest :: RequestInfo
-> Minio (Response (C.ResumableSource Minio ByteString))
-> Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ri = do
req <- buildRequest ri
mgr <- asks mcConnManager

View File

@ -18,9 +18,11 @@
{-# LANGUAGE TypeFamilies #-}
module Network.Minio.Data where
import Control.Monad.Base
import Control.Concurrent.MVar (MVar)
import qualified Control.Concurrent.MVar as M
import qualified Control.Monad.Catch as MC
import Control.Monad.Trans.Control
import Control.Monad.IO.Unlift (MonadUnliftIO, UnliftIO (..),
askUnliftIO, withUnliftIO)
import Control.Monad.Trans.Resource
import qualified Data.ByteString as B
@ -195,7 +197,7 @@ data PutObjectOptions = PutObjectOptions {
, pooStorageClass :: Maybe Text
, pooUserMetadata :: [(Text, Text)]
, pooNumThreads :: Maybe Word
} deriving (Show, Eq)
} deriving (Show, Eq)
-- Provide a default instance
instance Default PutObjectOptions where
@ -498,7 +500,7 @@ type UrlExpiry = Int
type RegionMap = Map.Map Bucket Region
newtype Minio a = Minio {
unMinio :: ReaderT MinioConn (StateT RegionMap (ResourceT IO)) a
unMinio :: ReaderT MinioConn (ResourceT IO) a
}
deriving (
Functor
@ -506,38 +508,38 @@ newtype Minio a = Minio {
, Monad
, MonadIO
, MonadReader MinioConn
, MonadState RegionMap
, MonadThrow
, MonadCatch
, MonadBase IO
, MonadResource
)
instance MonadBaseControl IO Minio where
type StM Minio a = (a, RegionMap)
liftBaseWith f = Minio $ liftBaseWith $ \q -> f (q . unMinio)
restoreM = Minio . restoreM
instance MonadUnliftIO Minio where
askUnliftIO = Minio $ ReaderT $ \r ->
withUnliftIO $ \u ->
return (UnliftIO (unliftIO u . flip runReaderT r . unMinio))
-- | MinioConn holds connection info and a connection pool
data MinioConn = MinioConn {
mcConnInfo :: ConnectInfo
data MinioConn = MinioConn
{ mcConnInfo :: ConnectInfo
, mcConnManager :: NC.Manager
, mcRegionMap :: MVar RegionMap
}
-- | Takes connection information and returns a connection object to
-- be passed to 'runMinio'
connect :: ConnectInfo -> IO MinioConn
connect ci = do
let settings = bool defaultManagerSettings NC.tlsManagerSettings $
connectIsSecure ci
let settings | connectIsSecure ci = NC.tlsManagerSettings
| otherwise = defaultManagerSettings
mgr <- NC.newManager settings
return $ MinioConn ci mgr
rMapMVar <- M.newMVar Map.empty
return $ MinioConn ci mgr rMapMVar
-- | Run the Minio action and return the result or an error.
runMinio :: ConnectInfo -> Minio a -> IO (Either MinioErr a)
runMinio ci m = do
conn <- liftIO $ connect ci
runResourceT . flip evalStateT Map.empty . flip runReaderT conn . unMinio $
runResourceT . flip runReaderT conn . unMinio $
fmap Right m `MC.catches`
[ MC.Handler handlerServiceErr
, MC.Handler handlerHE

View File

@ -28,37 +28,38 @@ module Network.Minio.Data.Crypto
, digestToBase16
) where
import Crypto.Hash (SHA256(..), MD5(..), hashWith, Digest)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (hmac, HMAC)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (convertToBase, Base(Base16))
import qualified Data.Conduit as C
import Crypto.Hash (Digest, MD5 (..), SHA256 (..),
hashWith)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256
hashSHA256FromSource :: Monad m => C.Producer m ByteString -> m ByteString
hashSHA256FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashSHA256FromSource src = do
digest <- src C.$$ sinkSHA256Hash
digest <- C.connect src sinkSHA256Hash
return $ digestToBase16 digest
where
-- To help with type inference
sinkSHA256Hash :: Monad m => C.Consumer ByteString m (Digest SHA256)
sinkSHA256Hash :: Monad m => C.ConduitM ByteString Void m (Digest SHA256)
sinkSHA256Hash = sinkHash
hashMD5 :: ByteString -> ByteString
hashMD5 = digestToBase16 . hashWith MD5
hashMD5FromSource :: Monad m => C.Producer m ByteString -> m ByteString
hashMD5FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashMD5FromSource src = do
digest <- src C.$$ sinkMD5Hash
digest <- C.connect src sinkMD5Hash
return $ digestToBase16 digest
where
-- To help with type inference
sinkMD5Hash :: Monad m => C.Consumer ByteString m (Digest MD5)
sinkMD5Hash :: Monad m => C.ConduitM ByteString Void m (Digest MD5)
sinkMD5Hash = sinkHash
hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256

View File

@ -16,9 +16,9 @@
module Network.Minio.ListOps where
import qualified Data.Conduit as C
import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL
import qualified Data.Conduit.List as CL
import Lib.Prelude
@ -27,10 +27,10 @@ import Network.Minio.S3API
-- | List objects in a bucket matching the given prefix. If recurse is
-- set to True objects matching prefix are recursively listed.
listObjects :: Bucket -> Maybe Text -> Bool -> C.Producer Minio ObjectInfo
listObjects :: Bucket -> Maybe Text -> Bool -> C.ConduitM () ObjectInfo Minio ()
listObjects bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.Producer Minio ObjectInfo
loop :: Maybe Text -> C.ConduitM () ObjectInfo Minio ()
loop nextToken = do
let
delimiter = bool (Just "/") Nothing recurse
@ -42,10 +42,11 @@ listObjects bucket prefix recurse = loop Nothing
-- | List objects in a bucket matching the given prefix. If recurse is
-- set to True objects matching prefix are recursively listed.
listObjectsV1 :: Bucket -> Maybe Text -> Bool -> C.Producer Minio ObjectInfo
listObjectsV1 :: Bucket -> Maybe Text -> Bool
-> C.ConduitM () ObjectInfo Minio ()
listObjectsV1 bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.Producer Minio ObjectInfo
loop :: Maybe Text -> C.ConduitM () ObjectInfo Minio ()
loop nextMarker = do
let
delimiter = bool (Just "/") Nothing recurse
@ -59,10 +60,10 @@ listObjectsV1 bucket prefix recurse = loop Nothing
-- recurse is set to True incomplete uploads for the given prefix are
-- recursively listed.
listIncompleteUploads :: Bucket -> Maybe Text -> Bool
-> C.Producer Minio UploadInfo
-> C.ConduitM () UploadInfo Minio ()
listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
where
loop :: Maybe Text -> Maybe Text -> C.Producer Minio UploadInfo
loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio ()
loop nextKeyMarker nextUploadIdMarker = do
let
delimiter = bool (Just "/") Nothing recurse
@ -71,7 +72,8 @@ listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
nextKeyMarker nextUploadIdMarker Nothing
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
partInfos <- listIncompleteParts bucket uKey uId C.$$ CC.sinkList
partInfos <- C.runConduit $ listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList $
@ -86,10 +88,10 @@ listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
-- | List object parts of an ongoing multipart upload for given
-- bucket, object and uploadId.
listIncompleteParts :: Bucket -> Object -> UploadId
-> C.Producer Minio ObjectPartInfo
-> C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts bucket object uploadId = loop Nothing
where
loop :: Maybe Text -> C.Producer Minio ObjectPartInfo
loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio ()
loop nextPartMarker = do
res <- lift $ listIncompleteParts' bucket object uploadId Nothing
nextPartMarker

View File

@ -46,9 +46,14 @@ import Network.Minio.Utils
-- For streams also, a size may be provided. This is useful to limit
-- the input - if it is not provided, upload will continue until the
-- stream ends or the object reaches `maxObjectsize` size.
data ObjectData m =
ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional size.
| ODStream (C.Producer m ByteString) (Maybe Int64) -- ^ Pass size in bytes as maybe if known.
data ObjectData m
= ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional
-- size.
| ODStream (C.ConduitM () ByteString m ()) (Maybe Int64) -- ^ Pass
-- size
-- (bytes)
-- if
-- known.
-- | Put an object from ObjectData. This high-level API handles
-- objects of all sizes, and even if the object size is unknown.
@ -108,7 +113,7 @@ parallelMultipartUpload b o opts filePath size = do
-- | Upload multipart object from conduit source sequentially
sequentialMultipartUpload :: Bucket -> Object -> PutObjectOptions
-> Maybe Int64
-> C.Producer Minio ByteString
-> C.ConduitM () ByteString Minio ()
-> Minio ETag
sequentialMultipartUpload b o opts sizeMay src = do
-- get a new upload id.
@ -117,11 +122,12 @@ sequentialMultipartUpload b o opts sizeMay src = do
-- upload parts in loop
let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay
(pnums, _, sizes) = List.unzip3 partSizes
uploadedParts <- src
uploadedParts <- C.runConduit
$ src
C..| chunkBSConduit sizes
C..| CL.map PayloadBS
C..| uploadPart' uploadId pnums
C.$$ CC.sinkList
C..| CC.sinkList
-- complete multipart upload
completeMultipartUpload b o uploadId uploadedParts

View File

@ -114,7 +114,7 @@ getService = do
-- | GET an object from the service and return the response headers
-- and a conduit source for the object content
getObject' :: Bucket -> Object -> HT.Query -> [HT.Header]
-> Minio ([HT.Header], C.ResumableSource Minio ByteString)
-> Minio ([HT.Header], C.ConduitM () ByteString Minio ())
getObject' bucket object queryParams headers = do
resp <- mkStreamRequest reqInfo
return (NC.responseHeaders resp, NC.responseBody resp)

View File

@ -16,71 +16,71 @@
module Network.Minio.Utils where
import qualified Control.Concurrent.Async.Lifted as A
import qualified Control.Concurrent.QSem.Lifted as Q
import qualified Control.Exception.Lifted as ExL
import qualified Control.Monad.Catch as MC
import qualified Control.Monad.Trans.Resource as R
import qualified Data.Map as Map
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk)
import Data.CaseInsensitive (original)
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.List as List
import qualified Data.Text as T
import Data.Text.Encoding.Error (lenientDecode)
import Data.Text.Read (decimal)
import Data.Time
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import qualified System.IO as IO
import qualified Control.Monad.Catch as MC
import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.List as List
import qualified Data.Map as Map
import qualified Data.Text as T
import Data.Text.Encoding.Error (lenientDecode)
import Data.Text.Read (decimal)
import Data.Time (defaultTimeLocale, parseTimeM,
rfc822DateFormat)
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import qualified System.IO as IO
import qualified UnliftIO.Async as A
import qualified UnliftIO.Exception as UEx
import qualified UnliftIO.MVar as UM
import qualified UnliftIO.STM as U
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.ByteString
import Network.Minio.XmlParser (parseErrResponse)
import Network.Minio.XmlParser (parseErrResponse)
allocateReadFile :: (R.MonadResource m, R.MonadResourceBase m, MonadCatch m)
allocateReadFile :: (MonadUnliftIO m, R.MonadResource m, MonadCatch m)
=> FilePath -> m (R.ReleaseKey, Handle)
allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: IOException) -> throwM e) (return . (rk,)) hdlE
where
openReadFile f = ExL.try $ IO.openBinaryFile f IO.ReadMode
openReadFile f = UEx.try $ IO.openBinaryFile f IO.ReadMode
cleanup = either (const $ return ()) IO.hClose
-- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead.
getFileSize :: (R.MonadResourceBase m, R.MonadResource m)
getFileSize :: (MonadUnliftIO m, R.MonadResource m)
=> Handle -> m (Maybe Int64)
getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of
Left (_ :: IOException) -> return Nothing
Right s -> return $ Just s
Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead.
isHandleSeekable :: (R.MonadResource m, R.MonadResourceBase m)
isHandleSeekable :: (R.MonadResource m, MonadUnliftIO m)
=> Handle -> m Bool
isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h
case resE of
Left (_ :: IOException) -> return False
Right v -> return v
Right v -> return v
-- | Helper function that opens a handle to the filepath and performs
-- the given action on it. Exceptions of type MError are caught and
-- returned - both during file handle allocation and when the action
-- is run.
withNewHandle :: (R.MonadResourceBase m, R.MonadResource m, MonadCatch m)
withNewHandle :: (MonadUnliftIO m, R.MonadResource m, MonadCatch m)
=> FilePath -> (Handle -> m a) -> m (Either IOException a)
withNewHandle fp fileAction = do
-- opening a handle can throw MError exception.
@ -150,16 +150,17 @@ httpLbs req mgr = do
contentTypeMay resp = lookupHeader Hdr.hContentType $
NC.responseHeaders resp
http :: (R.MonadResourceBase m, R.MonadResource m)
http :: (MonadUnliftIO m, MonadThrow m, R.MonadResource m)
=> NC.Request -> NC.Manager
-> m (Response (C.ResumableSource m ByteString))
-> m (Response (C.ConduitT () ByteString m ()))
http req mgr = do
respE <- tryHttpEx $ NC.http req mgr
resp <- either throwM return respE
unless (isSuccessStatus $ NC.responseStatus resp) $
case contentTypeMay resp of
Just "application/xml" -> do
respBody <- NC.responseBody resp C.$$+- CB.sinkLbs
respBody <- C.connect (NC.responseBody resp) CB.sinkLbs
--respBody <- C.unsealConduitT (NC.responseBody resp) C.$$+- CB.sinkLbs
sErr <- parseErrResponse respBody
throwM sErr
@ -171,23 +172,34 @@ http req mgr = do
return resp
where
tryHttpEx :: (R.MonadResourceBase m) => m a
tryHttpEx :: (MonadUnliftIO m) => m a
-> m (Either NC.HttpException a)
tryHttpEx = ExL.try
tryHttpEx = UEx.try
contentTypeMay resp = lookupHeader Hdr.hContentType $ NC.responseHeaders resp
-- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore.
limitedMapConcurrently :: (MonadIO m, R.MonadBaseControl IO m)
limitedMapConcurrently :: MonadUnliftIO m
=> Int -> (t -> m a) -> [t] -> m [a]
limitedMapConcurrently 0 _ _ = return []
limitedMapConcurrently count act args = do
qSem <- liftIO $ Q.newQSem count
threads <- mapM (A.async . wThread qSem) args
t' <- U.newTVarIO count
threads <- mapM (A.async . wThread t') args
mapM A.wait threads
where
-- grab 1 unit from semaphore, run action and release it
wThread qs arg =
ExL.bracket_ (Q.waitQSem qs) (Q.signalQSem qs) $ act arg
wThread t arg =
UEx.bracket_ (waitSem t) (signalSem t) $ act arg
-- quantity semaphore implementation using TVar
waitSem t = U.atomically $ do
v <- U.readTVar t
if v > 0
then U.writeTVar t (v-1)
else U.retrySTM
signalSem t = U.atomically $ do
v <- U.readTVar t
U.writeTVar t (v+1)
-- helper function to 'drop' empty optional parameter.
mkQuery :: Text -> Maybe Text -> Maybe (Text, Text)
@ -199,7 +211,7 @@ mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
chunkBSConduit :: (Monad m, Integral a)
=> [a] -> C.Conduit ByteString m ByteString
=> [a] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit s = loop 0 [] s
where
loop _ _ [] = return ()
@ -231,3 +243,19 @@ selectPartSizes size = uncurry (List.zip3 [1..]) $
| st > sz = []
| st + m >= sz = [(st, sz - st)]
| otherwise = (st, m) : loop (st + m) sz
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ Map.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . Map.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . Map.delete b

View File

@ -15,7 +15,7 @@
# resolver:
# name: custom-snapshot
# location: "./custom-snapshot.yaml"
resolver: lts-9.1
resolver: lts-11.1
# User packages to be built.
# Various formats can be used as shown in the example below.

View File

@ -19,15 +19,10 @@ import Test.Tasty
import Test.Tasty.HUnit
import Test.Tasty.QuickCheck as QC
import Lib.Prelude
import System.Directory (getTemporaryDirectory)
import qualified System.IO as SIO
import qualified Control.Monad.Catch as MC
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as BS
import Data.Conduit (yield, ($$))
import Data.Conduit (yield)
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import Data.Conduit.Combinators (sinkList)
@ -39,7 +34,11 @@ import qualified Data.Time as Time
import qualified Network.HTTP.Client.MultipartFormData as Form
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import System.Directory (getTemporaryDirectory)
import System.Environment (lookupEnv)
import qualified System.IO as SIO
import Lib.Prelude
import Network.Minio
import Network.Minio.Data
@ -54,7 +53,7 @@ tests :: TestTree
tests = testGroup "Tests" [liveServerUnitTests]
-- conduit that generates random binary stream of given length
randomDataSrc :: MonadIO m => Int64 -> C.Producer m ByteString
randomDataSrc :: MonadIO m => Int64 -> C.ConduitM () ByteString m ()
randomDataSrc s' = genBS s'
where
concatIt bs n = BS.concat $ replicate (fromIntegral q) bs ++
@ -72,7 +71,7 @@ randomDataSrc s' = genBS s'
mkRandFile :: R.MonadResource m => Int64 -> m FilePath
mkRandFile size = do
dir <- liftIO $ getTemporaryDirectory
randomDataSrc size C.$$ CB.sinkTempFile dir "miniohstest.random"
C.runConduit $ randomDataSrc size C..| CB.sinkTempFile dir "miniohstest.random"
funTestBucketPrefix :: Text
funTestBucketPrefix = "miniohstest-"
@ -158,13 +157,14 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
\obj -> fPutObject bucket obj "/etc/lsb-release" def
step "High-level listing of objects"
objects <- listObjects bucket Nothing True $$ sinkList
objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList
liftIO $ assertEqual "Objects match failed!" (sort expectedObjects)
(map oiObject objects)
step "High-level listing of objects (version 1)"
objectsV1 <- listObjectsV1 bucket Nothing True $$ sinkList
objectsV1 <- C.runConduit $ listObjectsV1 bucket Nothing True C..|
sinkList
liftIO $ assertEqual "Objects match failed!" (sort expectedObjects)
(map oiObject objectsV1)
@ -181,7 +181,9 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
step "High-level listing of incomplete multipart uploads"
uploads <- listIncompleteUploads bucket (Just "newmpupload") True $$ sinkList
uploads <- C.runConduit $
listIncompleteUploads bucket (Just "newmpupload") True C..|
sinkList
liftIO $ length uploads @?= 10
step "cleanup"
@ -202,7 +204,8 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
putObjectPart bucket object uid pnum [] $ PayloadH h 0 mb5
step "fetch list parts"
incompleteParts <- listIncompleteParts bucket object uid $$ sinkList
incompleteParts <- C.runConduit $ listIncompleteParts bucket object uid
C..| sinkList
liftIO $ length incompleteParts @?= 10
step "cleanup"
@ -318,7 +321,8 @@ liveServerUnitTests = testGroup "Unit tests against a live server"
step "remove ongoing upload"
removeIncompleteUpload bucket object
uploads <- listIncompleteUploads bucket (Just object) False C.$$ sinkList
uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False
C..| sinkList
liftIO $ (null uploads) @? "removeIncompleteUploads didn't complete successfully"
, funTestWithBucket "putObject contentType tests" $ \step bucket -> do
@ -345,11 +349,11 @@ liveServerUnitTests = testGroup "Unit tests against a live server"
}
oiCE <- headObject bucket object
let m = oiMetadata oiCE
let m' = oiMetadata oiCE
step "Validate content-encoding"
liftIO $ assertEqual "Content-Encoding did not match" (Just "identity")
(Map.lookup "Content-Encoding" m)
(Map.lookup "Content-Encoding" m')
step "Cleanup actions"
@ -475,8 +479,8 @@ liveServerUnitTests = testGroup "Unit tests against a live server"
void $ completeMultipartUpload bucket copyObj uid parts
step "verify copied object size"
oi <- headObject bucket copyObj
let s' = oiSize oi
oi' <- headObject bucket copyObj
let s' = oiSize oi'
liftIO $ (s' == mb15) @? "Size failed to match"
@ -491,8 +495,8 @@ liveServerUnitTests = testGroup "Unit tests against a live server"
step "Prepare"
forM_ (zip srcs sizes) $ \(src, size) -> do
inputFile <- mkRandFile size
fPutObject bucket src inputFile def
inputFile' <- mkRandFile size
fPutObject bucket src inputFile' def
step "make small and large object copy"
forM_ (zip copyObjs srcs) $ \(cp, src) ->
@ -510,8 +514,8 @@ liveServerUnitTests = testGroup "Unit tests against a live server"
size = 15 * 1024 * 1024
step "Prepare"
inputFile <- mkRandFile size
fPutObject bucket src inputFile def
inputFile' <- mkRandFile size
fPutObject bucket src inputFile' def
step "copy last 10MiB of object"
copyObject def { dstBucket = bucket, dstObject = copyObj } def{
@ -579,18 +583,18 @@ basicTests = funTestWithBucket "Basic tests" $ \step bucket -> do
_ -> return ()
step "fGetObject an object with no matching etag, check for exception"
resE <- MC.try $ fGetObject bucket "lsb-release" outFile def{
resE1 <- MC.try $ fGetObject bucket "lsb-release" outFile def{
gooIfMatch = (Just "invalid-etag")
}
case resE of
case resE1 of
Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold"
_ -> return ()
step "fGetObject an object with no valid range, check for exception"
resE <- MC.try $ fGetObject bucket "lsb-release" outFile def{
resE2 <- MC.try $ fGetObject bucket "lsb-release" outFile def{
gooRange = (Just $ HT.ByteRangeFromTo 100 200)
}
case resE of
case resE2 of
Left exn -> liftIO $ exn @?= ServiceErr "InvalidRange" "The requested range is not satisfiable"
_ -> return ()
@ -600,8 +604,8 @@ basicTests = funTestWithBucket "Basic tests" $ \step bucket -> do
}
step "fGetObject a non-existent object and check for NoSuchKey exception"
resE <- MC.try $ fGetObject bucket "noSuchKey" outFile def
case resE of
resE3 <- MC.try $ fGetObject bucket "noSuchKey" outFile def
case resE3 of
Left exn -> liftIO $ exn @?= NoSuchKey
_ -> return ()
@ -658,7 +662,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
"presigned GET failed"
-- read content from file to compare with response above
bs <- CB.sourceFile inputFile $$ CB.sinkLbs
bs <- C.runConduit $ CB.sourceFile inputFile C..| CB.sinkLbs
liftIO $ (bs == NC.responseBody getResp) @?
"presigned put and get got mismatched data"
@ -693,7 +697,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
"presigned GET failed (presignedGetObjectUrl)"
-- read content from file to compare with response above
bs2 <- CB.sourceFile testFile $$ CB.sinkLbs
bs2 <- C.runConduit $ CB.sourceFile testFile C..| CB.sinkLbs
liftIO $ (bs2 == NC.responseBody getResp2) @?
"presigned put and get got mismatched data (presigned*Url)"