Update formatting with latest ormolu 1.4 (#163)
This commit is contained in:
parent
73bc5b64a0
commit
b8cc1e57ee
@ -34,9 +34,9 @@ main = do
|
|||||||
-- Performs a recursive listing of incomplete uploads under bucket "test"
|
-- Performs a recursive listing of incomplete uploads under bucket "test"
|
||||||
-- on a local minio server.
|
-- on a local minio server.
|
||||||
res <-
|
res <-
|
||||||
runMinio minioPlayCI
|
runMinio minioPlayCI $
|
||||||
$ runConduit
|
runConduit $
|
||||||
$ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
||||||
print res
|
print res
|
||||||
|
|
||||||
{-
|
{-
|
||||||
|
|||||||
@ -34,9 +34,9 @@ main = do
|
|||||||
-- Performs a recursive listing of all objects under bucket "test"
|
-- Performs a recursive listing of all objects under bucket "test"
|
||||||
-- on play.min.io.
|
-- on play.min.io.
|
||||||
res <-
|
res <-
|
||||||
runMinio minioPlayCI
|
runMinio minioPlayCI $
|
||||||
$ runConduit
|
runConduit $
|
||||||
$ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
||||||
print res
|
print res
|
||||||
|
|
||||||
{-
|
{-
|
||||||
|
|||||||
@ -73,8 +73,9 @@ main = do
|
|||||||
]
|
]
|
||||||
formOptions = B.intercalate " " $ map formFn $ H.toList formData
|
formOptions = B.intercalate " " $ map formFn $ H.toList formData
|
||||||
|
|
||||||
return $ B.intercalate " " $
|
return $
|
||||||
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
|
B.intercalate " " $
|
||||||
|
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
|
||||||
|
|
||||||
case res of
|
case res of
|
||||||
Left e -> putStrLn $ "post-policy error: " ++ (show e)
|
Left e -> putStrLn $ "post-policy error: " ++ (show e)
|
||||||
|
|||||||
@ -55,6 +55,7 @@ module Network.Minio
|
|||||||
gcsCI,
|
gcsCI,
|
||||||
|
|
||||||
-- * Minio Monad
|
-- * Minio Monad
|
||||||
|
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
-- | The Minio Monad provides connection-reuse, bucket-location
|
-- | The Minio Monad provides connection-reuse, bucket-location
|
||||||
|
|||||||
@ -186,9 +186,9 @@ buildRequest ri = do
|
|||||||
retryAPIRequest :: Minio a -> Minio a
|
retryAPIRequest :: Minio a -> Minio a
|
||||||
retryAPIRequest apiCall = do
|
retryAPIRequest apiCall = do
|
||||||
resE <-
|
resE <-
|
||||||
retrying retryPolicy (const shouldRetry)
|
retrying retryPolicy (const shouldRetry) $
|
||||||
$ const
|
const $
|
||||||
$ try apiCall
|
try apiCall
|
||||||
either throwIO return resE
|
either throwIO return resE
|
||||||
where
|
where
|
||||||
-- Retry using the full-jitter backoff method for up to 10 mins
|
-- Retry using the full-jitter backoff method for up to 10 mins
|
||||||
@ -266,9 +266,9 @@ isValidBucketName bucket =
|
|||||||
-- Throws exception iff bucket name is invalid according to AWS rules.
|
-- Throws exception iff bucket name is invalid according to AWS rules.
|
||||||
checkBucketNameValidity :: MonadIO m => Bucket -> m ()
|
checkBucketNameValidity :: MonadIO m => Bucket -> m ()
|
||||||
checkBucketNameValidity bucket =
|
checkBucketNameValidity bucket =
|
||||||
when (not $ isValidBucketName bucket)
|
when (not $ isValidBucketName bucket) $
|
||||||
$ throwIO
|
throwIO $
|
||||||
$ MErrVInvalidBucketName bucket
|
MErrVInvalidBucketName bucket
|
||||||
|
|
||||||
isValidObjectName :: Object -> Bool
|
isValidObjectName :: Object -> Bool
|
||||||
isValidObjectName object =
|
isValidObjectName object =
|
||||||
@ -276,6 +276,6 @@ isValidObjectName object =
|
|||||||
|
|
||||||
checkObjectNameValidity :: MonadIO m => Object -> m ()
|
checkObjectNameValidity :: MonadIO m => Object -> m ()
|
||||||
checkObjectNameValidity object =
|
checkObjectNameValidity object =
|
||||||
when (not $ isValidObjectName object)
|
when (not $ isValidObjectName object) $
|
||||||
$ throwIO
|
throwIO $
|
||||||
$ MErrVInvalidObjectName object
|
MErrVInvalidObjectName object
|
||||||
|
|||||||
@ -16,7 +16,8 @@
|
|||||||
|
|
||||||
module Network.Minio.AdminAPI
|
module Network.Minio.AdminAPI
|
||||||
( -- * MinIO Admin API
|
( -- * MinIO Admin API
|
||||||
--------------------
|
|
||||||
|
--------------------
|
||||||
|
|
||||||
-- | Provides MinIO admin API and related types. It is in
|
-- | Provides MinIO admin API and related types. It is in
|
||||||
-- experimental state.
|
-- experimental state.
|
||||||
@ -52,10 +53,7 @@ module Network.Minio.AdminAPI
|
|||||||
where
|
where
|
||||||
|
|
||||||
import Data.Aeson
|
import Data.Aeson
|
||||||
( (.:),
|
( FromJSON,
|
||||||
(.:?),
|
|
||||||
(.=),
|
|
||||||
FromJSON,
|
|
||||||
ToJSON,
|
ToJSON,
|
||||||
Value (Object),
|
Value (Object),
|
||||||
eitherDecode,
|
eitherDecode,
|
||||||
@ -66,6 +64,9 @@ import Data.Aeson
|
|||||||
toJSON,
|
toJSON,
|
||||||
withObject,
|
withObject,
|
||||||
withText,
|
withText,
|
||||||
|
(.:),
|
||||||
|
(.:?),
|
||||||
|
(.=),
|
||||||
)
|
)
|
||||||
import qualified Data.Aeson as A
|
import qualified Data.Aeson as A
|
||||||
import Data.Aeson.Types (typeMismatch)
|
import Data.Aeson.Types (typeMismatch)
|
||||||
@ -610,9 +611,9 @@ buildAdminRequest areq = do
|
|||||||
areq
|
areq
|
||||||
{ ariPayloadHash = Just sha256Hash,
|
{ ariPayloadHash = Just sha256Hash,
|
||||||
ariHeaders =
|
ariHeaders =
|
||||||
hostHeader
|
hostHeader :
|
||||||
: sha256Header sha256Hash
|
sha256Header sha256Hash :
|
||||||
: ariHeaders areq
|
ariHeaders areq
|
||||||
}
|
}
|
||||||
signReq = toRequest ci newAreq
|
signReq = toRequest ci newAreq
|
||||||
sp =
|
sp =
|
||||||
|
|||||||
@ -51,8 +51,8 @@ copyObjectInternal b' o srcInfo = do
|
|||||||
endOffset >= fromIntegral srcSize
|
endOffset >= fromIntegral srcSize
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
$ throwIO
|
$ throwIO $
|
||||||
$ MErrVInvalidSrcObjByteRange range
|
MErrVInvalidSrcObjByteRange range
|
||||||
|
|
||||||
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
|
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
|
||||||
-- 2. If startOffset /= 0 use multipart copy
|
-- 2. If startOffset /= 0 use multipart copy
|
||||||
@ -69,9 +69,9 @@ copyObjectInternal b' o srcInfo = do
|
|||||||
-- used is minPartSize.
|
-- used is minPartSize.
|
||||||
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
|
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
|
||||||
selectCopyRanges (st, end) =
|
selectCopyRanges (st, end) =
|
||||||
zip pns
|
zip pns $
|
||||||
$ map (\(x, y) -> (st + x, st + x + y - 1))
|
map (\(x, y) -> (st + x, st + x + y - 1)) $
|
||||||
$ zip startOffsets partSizes
|
zip startOffsets partSizes
|
||||||
where
|
where
|
||||||
size = end - st + 1
|
size = end - st + 1
|
||||||
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
|
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
|
||||||
|
|||||||
@ -38,8 +38,10 @@ class UriEncodable s where
|
|||||||
|
|
||||||
instance UriEncodable [Char] where
|
instance UriEncodable [Char] where
|
||||||
uriEncode encodeSlash payload =
|
uriEncode encodeSlash payload =
|
||||||
LB.toStrict $ BB.toLazyByteString $ mconcat $
|
LB.toStrict $
|
||||||
map (`uriEncodeChar` encodeSlash) payload
|
BB.toLazyByteString $
|
||||||
|
mconcat $
|
||||||
|
map (`uriEncodeChar` encodeSlash) payload
|
||||||
|
|
||||||
instance UriEncodable ByteString where
|
instance UriEncodable ByteString where
|
||||||
-- assumes that uriEncode is passed ASCII encoded strings.
|
-- assumes that uriEncode is passed ASCII encoded strings.
|
||||||
|
|||||||
@ -20,11 +20,11 @@ module Network.Minio.JsonParser
|
|||||||
where
|
where
|
||||||
|
|
||||||
import Data.Aeson
|
import Data.Aeson
|
||||||
( (.:),
|
( FromJSON,
|
||||||
FromJSON,
|
|
||||||
eitherDecode,
|
eitherDecode,
|
||||||
parseJSON,
|
parseJSON,
|
||||||
withObject,
|
withObject,
|
||||||
|
(.:),
|
||||||
)
|
)
|
||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
import Lib.Prelude
|
import Lib.Prelude
|
||||||
|
|||||||
@ -51,10 +51,10 @@ listObjects bucket prefix recurse = loop Nothing
|
|||||||
|
|
||||||
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
|
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
|
||||||
CL.sourceList $ map ListItemObject $ lorObjects res
|
CL.sourceList $ map ListItemObject $ lorObjects res
|
||||||
unless recurse
|
unless recurse $
|
||||||
$ CL.sourceList
|
CL.sourceList $
|
||||||
$ map ListItemPrefix
|
map ListItemPrefix $
|
||||||
$ lorCPrefixes res
|
lorCPrefixes res
|
||||||
when (lorHasMore res) $
|
when (lorHasMore res) $
|
||||||
loop (lorNextToken res)
|
loop (lorNextToken res)
|
||||||
|
|
||||||
@ -73,10 +73,10 @@ listObjectsV1 bucket prefix recurse = loop Nothing
|
|||||||
|
|
||||||
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
|
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
|
||||||
CL.sourceList $ map ListItemObject $ lorObjects' res
|
CL.sourceList $ map ListItemObject $ lorObjects' res
|
||||||
unless recurse
|
unless recurse $
|
||||||
$ CL.sourceList
|
CL.sourceList $
|
||||||
$ map ListItemPrefix
|
map ListItemPrefix $
|
||||||
$ lorCPrefixes' res
|
lorCPrefixes' res
|
||||||
when (lorHasMore' res) $
|
when (lorHasMore' res) $
|
||||||
loop (lorNextMarker res)
|
loop (lorNextMarker res)
|
||||||
|
|
||||||
@ -104,19 +104,20 @@ listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
|
|||||||
nextUploadIdMarker
|
nextUploadIdMarker
|
||||||
Nothing
|
Nothing
|
||||||
|
|
||||||
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
|
aggrSizes <- lift $
|
||||||
partInfos <-
|
forM (lurUploads res) $ \(uKey, uId, _) -> do
|
||||||
C.runConduit $
|
partInfos <-
|
||||||
listIncompleteParts bucket uKey uId
|
C.runConduit $
|
||||||
C..| CC.sinkList
|
listIncompleteParts bucket uKey uId
|
||||||
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
|
C..| CC.sinkList
|
||||||
|
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
|
||||||
|
|
||||||
CL.sourceList
|
CL.sourceList $
|
||||||
$ map
|
map
|
||||||
( \((uKey, uId, uInitTime), size) ->
|
( \((uKey, uId, uInitTime), size) ->
|
||||||
UploadInfo uKey uId uInitTime size
|
UploadInfo uKey uId uInitTime size
|
||||||
)
|
)
|
||||||
$ zip (lurUploads res) aggrSizes
|
$ zip (lurUploads res) aggrSizes
|
||||||
|
|
||||||
when (lurHasMore res) $
|
when (lurHasMore res) $
|
||||||
loop (lurNextKey res) (lurNextUpload res)
|
loop (lurNextKey res) (lurNextUpload res)
|
||||||
|
|||||||
@ -68,9 +68,9 @@ makePresignedUrl ::
|
|||||||
HT.RequestHeaders ->
|
HT.RequestHeaders ->
|
||||||
Minio ByteString
|
Minio ByteString
|
||||||
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
|
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
|
||||||
when (expiry > 7 * 24 * 3600 || expiry < 0)
|
when (expiry > 7 * 24 * 3600 || expiry < 0) $
|
||||||
$ throwIO
|
throwIO $
|
||||||
$ MErrVInvalidUrlExpiry expiry
|
MErrVInvalidUrlExpiry expiry
|
||||||
|
|
||||||
ci <- asks mcConnInfo
|
ci <- asks mcConnInfo
|
||||||
|
|
||||||
@ -103,11 +103,13 @@ makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
|
|||||||
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
|
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
|
||||||
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
||||||
|
|
||||||
return $ toStrictBS $ toLazyByteString $
|
return $
|
||||||
scheme
|
toStrictBS $
|
||||||
<> byteString (getHostAddr ci)
|
toLazyByteString $
|
||||||
<> byteString (getS3Path bucket object)
|
scheme
|
||||||
<> queryStr
|
<> byteString (getHostAddr ci)
|
||||||
|
<> byteString (getS3Path bucket object)
|
||||||
|
<> queryStr
|
||||||
|
|
||||||
-- | Generate a URL with authentication signature to PUT (upload) an
|
-- | Generate a URL with authentication signature to PUT (upload) an
|
||||||
-- object. Any extra headers if passed, are signed, and so they are
|
-- object. Any extra headers if passed, are signed, and so they are
|
||||||
@ -331,18 +333,21 @@ presignedPostPolicy p = do
|
|||||||
mkPair (PPCEquals k v) = Just (k, v)
|
mkPair (PPCEquals k v) = Just (k, v)
|
||||||
mkPair _ = Nothing
|
mkPair _ = Nothing
|
||||||
formFromPolicy =
|
formFromPolicy =
|
||||||
H.map toUtf8 $ H.fromList $ catMaybes $
|
H.map toUtf8 $
|
||||||
mkPair <$> conditions ppWithCreds
|
H.fromList $
|
||||||
|
catMaybes $
|
||||||
|
mkPair <$> conditions ppWithCreds
|
||||||
formData = formFromPolicy `H.union` signData
|
formData = formFromPolicy `H.union` signData
|
||||||
-- compute POST upload URL
|
-- compute POST upload URL
|
||||||
bucket = H.lookupDefault "" "bucket" formData
|
bucket = H.lookupDefault "" "bucket" formData
|
||||||
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
||||||
region = connectRegion ci
|
region = connectRegion ci
|
||||||
url =
|
url =
|
||||||
toStrictBS $ toLazyByteString $
|
toStrictBS $
|
||||||
scheme <> byteString (getHostAddr ci)
|
toLazyByteString $
|
||||||
<> byteString "/"
|
scheme <> byteString (getHostAddr ci)
|
||||||
<> byteString bucket
|
<> byteString "/"
|
||||||
<> byteString "/"
|
<> byteString bucket
|
||||||
|
<> byteString "/"
|
||||||
|
|
||||||
return (url, formData)
|
return (url, formData)
|
||||||
|
|||||||
@ -19,10 +19,12 @@ module Network.Minio.S3API
|
|||||||
getLocation,
|
getLocation,
|
||||||
|
|
||||||
-- * Listing buckets
|
-- * Listing buckets
|
||||||
|
|
||||||
--------------------
|
--------------------
|
||||||
getService,
|
getService,
|
||||||
|
|
||||||
-- * Listing objects
|
-- * Listing objects
|
||||||
|
|
||||||
--------------------
|
--------------------
|
||||||
ListObjectsResult (..),
|
ListObjectsResult (..),
|
||||||
ListObjectsV1Result (..),
|
ListObjectsV1Result (..),
|
||||||
@ -33,11 +35,13 @@ module Network.Minio.S3API
|
|||||||
headBucket,
|
headBucket,
|
||||||
|
|
||||||
-- * Retrieving objects
|
-- * Retrieving objects
|
||||||
|
|
||||||
-----------------------
|
-----------------------
|
||||||
getObject',
|
getObject',
|
||||||
headObject,
|
headObject,
|
||||||
|
|
||||||
-- * Creating buckets and objects
|
-- * Creating buckets and objects
|
||||||
|
|
||||||
---------------------------------
|
---------------------------------
|
||||||
putBucket,
|
putBucket,
|
||||||
ETag,
|
ETag,
|
||||||
@ -47,6 +51,7 @@ module Network.Minio.S3API
|
|||||||
copyObjectSingle,
|
copyObjectSingle,
|
||||||
|
|
||||||
-- * Multipart Upload APIs
|
-- * Multipart Upload APIs
|
||||||
|
|
||||||
--------------------------
|
--------------------------
|
||||||
UploadId,
|
UploadId,
|
||||||
PartTuple,
|
PartTuple,
|
||||||
@ -63,11 +68,13 @@ module Network.Minio.S3API
|
|||||||
listIncompleteParts',
|
listIncompleteParts',
|
||||||
|
|
||||||
-- * Deletion APIs
|
-- * Deletion APIs
|
||||||
|
|
||||||
--------------------------
|
--------------------------
|
||||||
deleteBucket,
|
deleteBucket,
|
||||||
deleteObject,
|
deleteObject,
|
||||||
|
|
||||||
-- * Presigned Operations
|
-- * Presigned Operations
|
||||||
|
|
||||||
-----------------------------
|
-----------------------------
|
||||||
module Network.Minio.PresignedOperations,
|
module Network.Minio.PresignedOperations,
|
||||||
|
|
||||||
@ -76,6 +83,7 @@ module Network.Minio.S3API
|
|||||||
setBucketPolicy,
|
setBucketPolicy,
|
||||||
|
|
||||||
-- * Bucket Notifications
|
-- * Bucket Notifications
|
||||||
|
|
||||||
-------------------------
|
-------------------------
|
||||||
Notification (..),
|
Notification (..),
|
||||||
NotificationConfig (..),
|
NotificationConfig (..),
|
||||||
@ -157,24 +165,26 @@ getObject' bucket object queryParams headers = do
|
|||||||
{ riBucket = Just bucket,
|
{ riBucket = Just bucket,
|
||||||
riObject = Just object,
|
riObject = Just object,
|
||||||
riQueryParams = queryParams,
|
riQueryParams = queryParams,
|
||||||
riHeaders = headers
|
riHeaders =
|
||||||
-- This header is required for safety as otherwise http-client,
|
headers
|
||||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
-- This header is required for safety as otherwise http-client,
|
||||||
-- body. In that case Content-Length header will be missing.
|
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||||
<> [("Accept-Encoding", "identity")]
|
-- body. In that case Content-Length header will be missing.
|
||||||
|
<> [("Accept-Encoding", "identity")]
|
||||||
}
|
}
|
||||||
|
|
||||||
-- | Creates a bucket via a PUT bucket call.
|
-- | Creates a bucket via a PUT bucket call.
|
||||||
putBucket :: Bucket -> Region -> Minio ()
|
putBucket :: Bucket -> Region -> Minio ()
|
||||||
putBucket bucket location = do
|
putBucket bucket location = do
|
||||||
ns <- asks getSvcNamespace
|
ns <- asks getSvcNamespace
|
||||||
void $ executeRequest $
|
void $
|
||||||
defaultS3ReqInfo
|
executeRequest $
|
||||||
{ riMethod = HT.methodPut,
|
defaultS3ReqInfo
|
||||||
riBucket = Just bucket,
|
{ riMethod = HT.methodPut,
|
||||||
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
|
riBucket = Just bucket,
|
||||||
riNeedsLocation = False
|
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
|
||||||
}
|
riNeedsLocation = False
|
||||||
|
}
|
||||||
|
|
||||||
-- | Single PUT object size.
|
-- | Single PUT object size.
|
||||||
maxSinglePutObjectSizeBytes :: Int64
|
maxSinglePutObjectSizeBytes :: Int64
|
||||||
@ -188,9 +198,9 @@ putObjectSingle' :: Bucket -> Object -> [HT.Header] -> ByteString -> Minio ETag
|
|||||||
putObjectSingle' bucket object headers bs = do
|
putObjectSingle' bucket object headers bs = do
|
||||||
let size = fromIntegral (BS.length bs)
|
let size = fromIntegral (BS.length bs)
|
||||||
-- check length is within single PUT object size.
|
-- check length is within single PUT object size.
|
||||||
when (size > maxSinglePutObjectSizeBytes)
|
when (size > maxSinglePutObjectSizeBytes) $
|
||||||
$ throwIO
|
throwIO $
|
||||||
$ MErrVSinglePUTSizeExceeded size
|
MErrVSinglePUTSizeExceeded size
|
||||||
|
|
||||||
let payload = mkStreamingPayload $ PayloadBS bs
|
let payload = mkStreamingPayload $ PayloadBS bs
|
||||||
resp <-
|
resp <-
|
||||||
@ -222,9 +232,9 @@ putObjectSingle ::
|
|||||||
Minio ETag
|
Minio ETag
|
||||||
putObjectSingle bucket object headers h offset size = do
|
putObjectSingle bucket object headers h offset size = do
|
||||||
-- check length is within single PUT object size.
|
-- check length is within single PUT object size.
|
||||||
when (size > maxSinglePutObjectSizeBytes)
|
when (size > maxSinglePutObjectSizeBytes) $
|
||||||
$ throwIO
|
throwIO $
|
||||||
$ MErrVSinglePUTSizeExceeded size
|
MErrVSinglePUTSizeExceeded size
|
||||||
|
|
||||||
-- content-length header is automatically set by library.
|
-- content-length header is automatically set by library.
|
||||||
let payload = mkStreamingPayload $ PayloadH h offset size
|
let payload = mkStreamingPayload $ PayloadH h offset size
|
||||||
@ -301,23 +311,23 @@ listObjects' bucket prefix nextToken delimiter maxKeys = do
|
|||||||
-- | DELETE a bucket from the service.
|
-- | DELETE a bucket from the service.
|
||||||
deleteBucket :: Bucket -> Minio ()
|
deleteBucket :: Bucket -> Minio ()
|
||||||
deleteBucket bucket =
|
deleteBucket bucket =
|
||||||
void
|
void $
|
||||||
$ executeRequest
|
executeRequest $
|
||||||
$ defaultS3ReqInfo
|
defaultS3ReqInfo
|
||||||
{ riMethod = HT.methodDelete,
|
{ riMethod = HT.methodDelete,
|
||||||
riBucket = Just bucket
|
riBucket = Just bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
-- | DELETE an object from the service.
|
-- | DELETE an object from the service.
|
||||||
deleteObject :: Bucket -> Object -> Minio ()
|
deleteObject :: Bucket -> Object -> Minio ()
|
||||||
deleteObject bucket object =
|
deleteObject bucket object =
|
||||||
void
|
void $
|
||||||
$ executeRequest
|
executeRequest $
|
||||||
$ defaultS3ReqInfo
|
defaultS3ReqInfo
|
||||||
{ riMethod = HT.methodDelete,
|
{ riMethod = HT.methodDelete,
|
||||||
riBucket = Just bucket,
|
riBucket = Just bucket,
|
||||||
riObject = Just object
|
riObject = Just object
|
||||||
}
|
}
|
||||||
|
|
||||||
-- | Create a new multipart upload.
|
-- | Create a new multipart upload.
|
||||||
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
|
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
|
||||||
@ -377,8 +387,8 @@ srcInfoToHeaders srcInfo =
|
|||||||
"/",
|
"/",
|
||||||
srcObject srcInfo
|
srcObject srcInfo
|
||||||
]
|
]
|
||||||
)
|
) :
|
||||||
: rangeHdr
|
rangeHdr
|
||||||
++ zip names values
|
++ zip names values
|
||||||
where
|
where
|
||||||
names =
|
names =
|
||||||
@ -477,14 +487,14 @@ completeMultipartUpload bucket object uploadId partTuple = do
|
|||||||
-- | Abort a multipart upload.
|
-- | Abort a multipart upload.
|
||||||
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
|
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
|
||||||
abortMultipartUpload bucket object uploadId =
|
abortMultipartUpload bucket object uploadId =
|
||||||
void
|
void $
|
||||||
$ executeRequest
|
executeRequest $
|
||||||
$ defaultS3ReqInfo
|
defaultS3ReqInfo
|
||||||
{ riMethod = HT.methodDelete,
|
{ riMethod = HT.methodDelete,
|
||||||
riBucket = Just bucket,
|
riBucket = Just bucket,
|
||||||
riObject = Just object,
|
riObject = Just object,
|
||||||
riQueryParams = mkOptionalParams params
|
riQueryParams = mkOptionalParams params
|
||||||
}
|
}
|
||||||
where
|
where
|
||||||
params = [("uploadId", Just uploadId)]
|
params = [("uploadId", Just uploadId)]
|
||||||
|
|
||||||
@ -509,14 +519,14 @@ listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys
|
|||||||
where
|
where
|
||||||
-- build query params
|
-- build query params
|
||||||
params =
|
params =
|
||||||
("uploads", Nothing)
|
("uploads", Nothing) :
|
||||||
: mkOptionalParams
|
mkOptionalParams
|
||||||
[ ("prefix", prefix),
|
[ ("prefix", prefix),
|
||||||
("delimiter", delimiter),
|
("delimiter", delimiter),
|
||||||
("key-marker", keyMarker),
|
("key-marker", keyMarker),
|
||||||
("upload-id-marker", uploadIdMarker),
|
("upload-id-marker", uploadIdMarker),
|
||||||
("max-uploads", show <$> maxKeys)
|
("max-uploads", show <$> maxKeys)
|
||||||
]
|
]
|
||||||
|
|
||||||
-- | List parts of an ongoing multipart upload.
|
-- | List parts of an ongoing multipart upload.
|
||||||
listIncompleteParts' ::
|
listIncompleteParts' ::
|
||||||
@ -553,15 +563,16 @@ headObject bucket object reqHeaders = do
|
|||||||
{ riMethod = HT.methodHead,
|
{ riMethod = HT.methodHead,
|
||||||
riBucket = Just bucket,
|
riBucket = Just bucket,
|
||||||
riObject = Just object,
|
riObject = Just object,
|
||||||
riHeaders = reqHeaders
|
riHeaders =
|
||||||
-- This header is required for safety as otherwise http-client,
|
reqHeaders
|
||||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
-- This header is required for safety as otherwise http-client,
|
||||||
-- body. In that case Content-Length header will be missing.
|
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||||
<> [("Accept-Encoding", "identity")]
|
-- body. In that case Content-Length header will be missing.
|
||||||
|
<> [("Accept-Encoding", "identity")]
|
||||||
}
|
}
|
||||||
maybe (throwIO MErrVInvalidObjectInfoResponse) return
|
maybe (throwIO MErrVInvalidObjectInfoResponse) return $
|
||||||
$ parseGetObjectHeaders object
|
parseGetObjectHeaders object $
|
||||||
$ NC.responseHeaders resp
|
NC.responseHeaders resp
|
||||||
|
|
||||||
-- | Query the object store if a given bucket exists.
|
-- | Query the object store if a given bucket exists.
|
||||||
headBucket :: Bucket -> Minio Bool
|
headBucket :: Bucket -> Minio Bool
|
||||||
@ -594,15 +605,16 @@ headBucket bucket =
|
|||||||
putBucketNotification :: Bucket -> Notification -> Minio ()
|
putBucketNotification :: Bucket -> Notification -> Minio ()
|
||||||
putBucketNotification bucket ncfg = do
|
putBucketNotification bucket ncfg = do
|
||||||
ns <- asks getSvcNamespace
|
ns <- asks getSvcNamespace
|
||||||
void $ executeRequest $
|
void $
|
||||||
defaultS3ReqInfo
|
executeRequest $
|
||||||
{ riMethod = HT.methodPut,
|
defaultS3ReqInfo
|
||||||
riBucket = Just bucket,
|
{ riMethod = HT.methodPut,
|
||||||
riQueryParams = [("notification", Nothing)],
|
riBucket = Just bucket,
|
||||||
riPayload =
|
riQueryParams = [("notification", Nothing)],
|
||||||
PayloadBS $
|
riPayload =
|
||||||
mkPutNotificationRequest ns ncfg
|
PayloadBS $
|
||||||
}
|
mkPutNotificationRequest ns ncfg
|
||||||
|
}
|
||||||
|
|
||||||
-- | Retrieve the notification configuration on a bucket.
|
-- | Retrieve the notification configuration on a bucket.
|
||||||
getBucketNotification :: Bucket -> Minio Notification
|
getBucketNotification :: Bucket -> Minio Notification
|
||||||
@ -644,20 +656,22 @@ setBucketPolicy bucket policy = do
|
|||||||
-- | Save a new policy on a bucket.
|
-- | Save a new policy on a bucket.
|
||||||
putBucketPolicy :: Bucket -> Text -> Minio ()
|
putBucketPolicy :: Bucket -> Text -> Minio ()
|
||||||
putBucketPolicy bucket policy = do
|
putBucketPolicy bucket policy = do
|
||||||
void $ executeRequest $
|
void $
|
||||||
defaultS3ReqInfo
|
executeRequest $
|
||||||
{ riMethod = HT.methodPut,
|
defaultS3ReqInfo
|
||||||
riBucket = Just bucket,
|
{ riMethod = HT.methodPut,
|
||||||
riQueryParams = [("policy", Nothing)],
|
riBucket = Just bucket,
|
||||||
riPayload = PayloadBS $ encodeUtf8 policy
|
riQueryParams = [("policy", Nothing)],
|
||||||
}
|
riPayload = PayloadBS $ encodeUtf8 policy
|
||||||
|
}
|
||||||
|
|
||||||
-- | Delete any policy set on a bucket.
|
-- | Delete any policy set on a bucket.
|
||||||
deleteBucketPolicy :: Bucket -> Minio ()
|
deleteBucketPolicy :: Bucket -> Minio ()
|
||||||
deleteBucketPolicy bucket = do
|
deleteBucketPolicy bucket = do
|
||||||
void $ executeRequest $
|
void $
|
||||||
defaultS3ReqInfo
|
executeRequest $
|
||||||
{ riMethod = HT.methodDelete,
|
defaultS3ReqInfo
|
||||||
riBucket = Just bucket,
|
{ riMethod = HT.methodDelete,
|
||||||
riQueryParams = [("policy", Nothing)]
|
riBucket = Just bucket,
|
||||||
}
|
riQueryParams = [("policy", Nothing)]
|
||||||
|
}
|
||||||
|
|||||||
@ -198,14 +198,14 @@ mkCanonicalRequest ::
|
|||||||
ByteString
|
ByteString
|
||||||
mkCanonicalRequest !isStreaming !sp !req !headersForSign =
|
mkCanonicalRequest !isStreaming !sp !req !headersForSign =
|
||||||
let canonicalQueryString =
|
let canonicalQueryString =
|
||||||
B.intercalate "&"
|
B.intercalate "&" $
|
||||||
$ map (\(x, y) -> B.concat [x, "=", y])
|
map (\(x, y) -> B.concat [x, "=", y]) $
|
||||||
$ sort
|
sort $
|
||||||
$ map
|
map
|
||||||
( \(x, y) ->
|
( \(x, y) ->
|
||||||
(uriEncode True x, maybe "" (uriEncode True) y)
|
(uriEncode True x, maybe "" (uriEncode True) y)
|
||||||
)
|
)
|
||||||
$ (parseQuery $ NC.queryString req)
|
$ (parseQuery $ NC.queryString req)
|
||||||
sortedHeaders = sort headersForSign
|
sortedHeaders = sort headersForSign
|
||||||
canonicalHeaders =
|
canonicalHeaders =
|
||||||
B.concat $
|
B.concat $
|
||||||
@ -298,8 +298,8 @@ signV4Stream !payloadLength !sp !req =
|
|||||||
in case ceMay of
|
in case ceMay of
|
||||||
Nothing -> ("content-encoding", "aws-chunked") : hs
|
Nothing -> ("content-encoding", "aws-chunked") : hs
|
||||||
Just (_, ce) ->
|
Just (_, ce) ->
|
||||||
("content-encoding", ce <> ",aws-chunked")
|
("content-encoding", ce <> ",aws-chunked") :
|
||||||
: filter (\(x, _) -> x /= "content-encoding") hs
|
filter (\(x, _) -> x /= "content-encoding") hs
|
||||||
-- headers to be added to the request
|
-- headers to be added to the request
|
||||||
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
|
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
|
||||||
computedHeaders =
|
computedHeaders =
|
||||||
|
|||||||
@ -170,8 +170,9 @@ httpLbs req mgr = do
|
|||||||
sErr <- parseErrResponseJSON $ NC.responseBody resp
|
sErr <- parseErrResponseJSON $ NC.responseBody resp
|
||||||
throwIO sErr
|
throwIO sErr
|
||||||
_ ->
|
_ ->
|
||||||
throwIO $ NC.HttpExceptionRequest req $
|
throwIO $
|
||||||
NC.StatusCodeException (void resp) (showBS resp)
|
NC.HttpExceptionRequest req $
|
||||||
|
NC.StatusCodeException (void resp) (showBS resp)
|
||||||
|
|
||||||
return resp
|
return resp
|
||||||
where
|
where
|
||||||
@ -199,8 +200,9 @@ http req mgr = do
|
|||||||
throwIO sErr
|
throwIO sErr
|
||||||
_ -> do
|
_ -> do
|
||||||
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
|
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
|
||||||
throwIO $ NC.HttpExceptionRequest req $
|
throwIO $
|
||||||
NC.StatusCodeException (void resp) content
|
NC.HttpExceptionRequest req $
|
||||||
|
NC.StatusCodeException (void resp) content
|
||||||
|
|
||||||
return resp
|
return resp
|
||||||
where
|
where
|
||||||
@ -265,9 +267,9 @@ chunkBSConduit (s : ss) = do
|
|||||||
-- be 64MiB.
|
-- be 64MiB.
|
||||||
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
|
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
|
||||||
selectPartSizes size =
|
selectPartSizes size =
|
||||||
uncurry (List.zip3 [1 ..])
|
uncurry (List.zip3 [1 ..]) $
|
||||||
$ List.unzip
|
List.unzip $
|
||||||
$ loop 0 size
|
loop 0 size
|
||||||
where
|
where
|
||||||
ceil :: Double -> Int64
|
ceil :: Double -> Int64
|
||||||
ceil = ceiling
|
ceil = ceiling
|
||||||
|
|||||||
@ -56,9 +56,9 @@ uncurry6 f (a, b, c, d, e, g) = f a b c d e g
|
|||||||
-- | Parse time strings from XML
|
-- | Parse time strings from XML
|
||||||
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
|
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
|
||||||
parseS3XMLTime t =
|
parseS3XMLTime t =
|
||||||
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return
|
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
|
||||||
$ parseTimeM True defaultTimeLocale s3TimeFormat
|
parseTimeM True defaultTimeLocale s3TimeFormat $
|
||||||
$ T.unpack t
|
T.unpack t
|
||||||
|
|
||||||
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
|
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
|
||||||
parseDecimal numStr =
|
parseDecimal numStr =
|
||||||
|
|||||||
@ -134,12 +134,12 @@ basicTests = funTestWithBucket "Basic tests" $
|
|||||||
\step bucket -> do
|
\step bucket -> do
|
||||||
step "getService works and contains the test bucket."
|
step "getService works and contains the test bucket."
|
||||||
buckets <- getService
|
buckets <- getService
|
||||||
unless (length (filter (== bucket) $ map biName buckets) == 1)
|
unless (length (filter (== bucket) $ map biName buckets) == 1) $
|
||||||
$ liftIO
|
liftIO $
|
||||||
$ assertFailure
|
assertFailure
|
||||||
( "The bucket " ++ show bucket
|
( "The bucket " ++ show bucket
|
||||||
++ " was expected to exist."
|
++ " was expected to exist."
|
||||||
)
|
)
|
||||||
|
|
||||||
step "makeBucket again to check if BucketAlreadyOwnedByYou exception is raised."
|
step "makeBucket again to check if BucketAlreadyOwnedByYou exception is raised."
|
||||||
mbE <- try $ makeBucket bucket Nothing
|
mbE <- try $ makeBucket bucket Nothing
|
||||||
@ -361,22 +361,24 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
|||||||
|
|
||||||
step "High-level listing of objects"
|
step "High-level listing of objects"
|
||||||
items <- C.runConduit $ listObjects bucket Nothing False C..| sinkList
|
items <- C.runConduit $ listObjects bucket Nothing False C..| sinkList
|
||||||
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
liftIO $
|
||||||
extractObjectsAndDirsFromList items
|
assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||||
|
extractObjectsAndDirsFromList items
|
||||||
|
|
||||||
step "High-level recursive listing of objects"
|
step "High-level recursive listing of objects"
|
||||||
objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList
|
objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList
|
||||||
|
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects match failed!"
|
"Objects match failed!"
|
||||||
(Just $ sort expectedObjects)
|
(Just $ sort expectedObjects)
|
||||||
$ extractObjectsFromList objects
|
$ extractObjectsFromList objects
|
||||||
|
|
||||||
step "High-level listing of objects (version 1)"
|
step "High-level listing of objects (version 1)"
|
||||||
itemsV1 <- C.runConduit $ listObjectsV1 bucket Nothing False C..| sinkList
|
itemsV1 <- C.runConduit $ listObjectsV1 bucket Nothing False C..| sinkList
|
||||||
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
liftIO $
|
||||||
extractObjectsAndDirsFromList itemsV1
|
assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||||
|
extractObjectsAndDirsFromList itemsV1
|
||||||
|
|
||||||
step "High-level recursive listing of objects (version 1)"
|
step "High-level recursive listing of objects (version 1)"
|
||||||
objectsV1 <-
|
objectsV1 <-
|
||||||
@ -384,45 +386,45 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
|||||||
listObjectsV1 bucket Nothing True
|
listObjectsV1 bucket Nothing True
|
||||||
C..| sinkList
|
C..| sinkList
|
||||||
|
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects match failed!"
|
"Objects match failed!"
|
||||||
(Just $ sort expectedObjects)
|
(Just $ sort expectedObjects)
|
||||||
$ extractObjectsFromList objectsV1
|
$ extractObjectsFromList objectsV1
|
||||||
|
|
||||||
let expectedPrefListing = ["dir/o1", "dir/dir1/", "dir/dir2/"]
|
let expectedPrefListing = ["dir/o1", "dir/dir1/", "dir/dir2/"]
|
||||||
expectedPrefListingRec = Just ["dir/dir1/o2", "dir/dir2/o3", "dir/o1"]
|
expectedPrefListingRec = Just ["dir/dir1/o2", "dir/dir2/o3", "dir/o1"]
|
||||||
step "High-level listing with prefix"
|
step "High-level listing with prefix"
|
||||||
prefItems <- C.runConduit $ listObjects bucket (Just "dir/") False C..| sinkList
|
prefItems <- C.runConduit $ listObjects bucket (Just "dir/") False C..| sinkList
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects/Dirs under prefix match failed!"
|
"Objects/Dirs under prefix match failed!"
|
||||||
expectedPrefListing
|
expectedPrefListing
|
||||||
$ extractObjectsAndDirsFromList prefItems
|
$ extractObjectsAndDirsFromList prefItems
|
||||||
|
|
||||||
step "High-level listing with prefix recursive"
|
step "High-level listing with prefix recursive"
|
||||||
prefItemsRec <- C.runConduit $ listObjects bucket (Just "dir/") True C..| sinkList
|
prefItemsRec <- C.runConduit $ listObjects bucket (Just "dir/") True C..| sinkList
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects/Dirs under prefix match recursive failed!"
|
"Objects/Dirs under prefix match recursive failed!"
|
||||||
expectedPrefListingRec
|
expectedPrefListingRec
|
||||||
$ extractObjectsFromList prefItemsRec
|
$ extractObjectsFromList prefItemsRec
|
||||||
|
|
||||||
step "High-level listing with prefix (version 1)"
|
step "High-level listing with prefix (version 1)"
|
||||||
prefItemsV1 <- C.runConduit $ listObjectsV1 bucket (Just "dir/") False C..| sinkList
|
prefItemsV1 <- C.runConduit $ listObjectsV1 bucket (Just "dir/") False C..| sinkList
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects/Dirs under prefix match failed!"
|
"Objects/Dirs under prefix match failed!"
|
||||||
expectedPrefListing
|
expectedPrefListing
|
||||||
$ extractObjectsAndDirsFromList prefItemsV1
|
$ extractObjectsAndDirsFromList prefItemsV1
|
||||||
|
|
||||||
step "High-level listing with prefix recursive (version 1)"
|
step "High-level listing with prefix recursive (version 1)"
|
||||||
prefItemsRecV1 <- C.runConduit $ listObjectsV1 bucket (Just "dir/") True C..| sinkList
|
prefItemsRecV1 <- C.runConduit $ listObjectsV1 bucket (Just "dir/") True C..| sinkList
|
||||||
liftIO
|
liftIO $
|
||||||
$ assertEqual
|
assertEqual
|
||||||
"Objects/Dirs under prefix match recursive failed!"
|
"Objects/Dirs under prefix match recursive failed!"
|
||||||
expectedPrefListingRec
|
expectedPrefListingRec
|
||||||
$ extractObjectsFromList prefItemsRecV1
|
$ extractObjectsFromList prefItemsRecV1
|
||||||
|
|
||||||
step "Cleanup actions"
|
step "Cleanup actions"
|
||||||
forM_ expectedObjects $
|
forM_ expectedObjects $
|
||||||
@ -910,8 +912,9 @@ putObjectUserMetadataTest = funTestWithBucket "putObject user-metadata test" $
|
|||||||
let m = oiUserMetadata oi
|
let m = oiUserMetadata oi
|
||||||
-- need to do a case-insensitive comparison
|
-- need to do a case-insensitive comparison
|
||||||
sortedMeta =
|
sortedMeta =
|
||||||
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
sort $
|
||||||
H.toList m
|
map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
||||||
|
H.toList m
|
||||||
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
||||||
|
|
||||||
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
||||||
@ -944,8 +947,9 @@ getObjectTest = funTestWithBucket "getObject test" $
|
|||||||
let m = oiUserMetadata $ gorObjectInfo gor
|
let m = oiUserMetadata $ gorObjectInfo gor
|
||||||
-- need to do a case-insensitive comparison
|
-- need to do a case-insensitive comparison
|
||||||
sortedMeta =
|
sortedMeta =
|
||||||
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
sort $
|
||||||
H.toList m
|
map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
||||||
|
H.toList m
|
||||||
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
||||||
|
|
||||||
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
||||||
|
|||||||
@ -63,8 +63,9 @@ parseServerInfoJSONTest =
|
|||||||
testGroup "Parse MinIO Admin API ServerInfo JSON test" $
|
testGroup "Parse MinIO Admin API ServerInfo JSON test" $
|
||||||
map
|
map
|
||||||
( \(tName, tDesc, tfn, tVal) ->
|
( \(tName, tDesc, tfn, tVal) ->
|
||||||
testCase tName $ assertBool tDesc $
|
testCase tName $
|
||||||
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
|
assertBool tDesc $
|
||||||
|
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
|
||||||
)
|
)
|
||||||
testCases
|
testCases
|
||||||
where
|
where
|
||||||
@ -82,8 +83,9 @@ parseHealStatusTest =
|
|||||||
testGroup "Parse MinIO Admin API HealStatus JSON test" $
|
testGroup "Parse MinIO Admin API HealStatus JSON test" $
|
||||||
map
|
map
|
||||||
( \(tName, tDesc, tfn, tVal) ->
|
( \(tName, tDesc, tfn, tVal) ->
|
||||||
testCase tName $ assertBool tDesc $
|
testCase tName $
|
||||||
tfn (eitherDecode tVal :: Either [Char] HealStatus)
|
assertBool tDesc $
|
||||||
|
tfn (eitherDecode tVal :: Either [Char] HealStatus)
|
||||||
)
|
)
|
||||||
testCases
|
testCases
|
||||||
where
|
where
|
||||||
@ -101,8 +103,9 @@ parseHealStartRespTest =
|
|||||||
testGroup "Parse MinIO Admin API HealStartResp JSON test" $
|
testGroup "Parse MinIO Admin API HealStartResp JSON test" $
|
||||||
map
|
map
|
||||||
( \(tName, tDesc, tfn, tVal) ->
|
( \(tName, tDesc, tfn, tVal) ->
|
||||||
testCase tName $ assertBool tDesc $
|
testCase tName $
|
||||||
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
|
assertBool tDesc $
|
||||||
|
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
|
||||||
)
|
)
|
||||||
testCases
|
testCases
|
||||||
where
|
where
|
||||||
|
|||||||
@ -43,9 +43,9 @@ testParseErrResponseJSON :: Assertion
|
|||||||
testParseErrResponseJSON = do
|
testParseErrResponseJSON = do
|
||||||
-- 1. Test parsing of an invalid error json.
|
-- 1. Test parsing of an invalid error json.
|
||||||
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
|
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
|
||||||
when (isRight parseResE)
|
when (isRight parseResE) $
|
||||||
$ assertFailure
|
assertFailure $
|
||||||
$ "Parsing should have failed => " ++ show parseResE
|
"Parsing should have failed => " ++ show parseResE
|
||||||
|
|
||||||
forM_ cases $ \(jsondata, sErr) -> do
|
forM_ cases $ \(jsondata, sErr) -> do
|
||||||
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
|
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
|
||||||
|
|||||||
@ -62,9 +62,9 @@ testParseLocation :: Assertion
|
|||||||
testParseLocation = do
|
testParseLocation = do
|
||||||
-- 1. Test parsing of an invalid location constraint xml.
|
-- 1. Test parsing of an invalid location constraint xml.
|
||||||
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
|
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
|
||||||
when (isRight parseResE)
|
when (isRight parseResE) $
|
||||||
$ assertFailure
|
assertFailure $
|
||||||
$ "Parsing should have failed => " ++ show parseResE
|
"Parsing should have failed => " ++ show parseResE
|
||||||
|
|
||||||
forM_ cases $ \(xmldata, expectedLocation) -> do
|
forM_ cases $ \(xmldata, expectedLocation) -> do
|
||||||
parseLocE <- tryValidationErr $ parseLocation xmldata
|
parseLocE <- tryValidationErr $ parseLocation xmldata
|
||||||
@ -344,11 +344,12 @@ testParseNotification = do
|
|||||||
"1"
|
"1"
|
||||||
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
|
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
|
||||||
[ObjectCreatedPut]
|
[ObjectCreatedPut]
|
||||||
( Filter $ FilterKey $
|
( Filter $
|
||||||
FilterRules
|
FilterKey $
|
||||||
[ FilterRule "prefix" "images/",
|
FilterRules
|
||||||
FilterRule "suffix" ".jpg"
|
[ FilterRule "prefix" "images/",
|
||||||
]
|
FilterRule "suffix" ".jpg"
|
||||||
|
]
|
||||||
),
|
),
|
||||||
NotificationConfig
|
NotificationConfig
|
||||||
""
|
""
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user