mirror of
https://github.com/commercialhaskell/stackage-server.git
synced 2026-01-12 04:08:29 +01:00
Merge pull request #324 from chreekat/b/handover-patches
Handover patches
This commit is contained in:
commit
6ff1ee7d15
1
LICENSE
1
LICENSE
@ -1,6 +1,7 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2014-2017 FP Complete
|
Copyright (c) 2014-2017 FP Complete
|
||||||
|
Copyright (c) 2024 Haskell Foundation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@ -38,17 +38,24 @@ optsParser =
|
|||||||
\their yaml files from stackage-snapshots repo have been updated or not.") <*>
|
\their yaml files from stackage-snapshots repo have been updated or not.") <*>
|
||||||
option
|
option
|
||||||
readText
|
readText
|
||||||
(long "download-bucket" <> value haddockBucketName <> metavar "DOWNLOAD_BUCKET" <>
|
(long "download-bucket" <> value defHaddockBucketName <> metavar "DOWNLOAD_BUCKET" <>
|
||||||
help
|
help
|
||||||
("S3 Bucket name where things like haddock and current hoogle files should \
|
("S3 Bucket name where things like haddock and current hoogle files should \
|
||||||
\be downloaded from. Default is: " <>
|
\be downloaded from. Used in S3 API read operations. Default is: " <>
|
||||||
T.unpack haddockBucketName)) <*>
|
T.unpack defHaddockBucketName)) <*>
|
||||||
option
|
option
|
||||||
readText
|
readText
|
||||||
(long "upload-bucket" <> value haddockBucketName <> metavar "UPLOAD_BUCKET" <>
|
(long "download-bucket-url" <> value defHaddockBucketUrl <> metavar "DOWNLOAD_BUCKET_URL" <>
|
||||||
|
help
|
||||||
|
("Publicly accessible URL where the download bucket can be accessed. Used for \
|
||||||
|
\serving the Haddocks on the website. Default is: " <>
|
||||||
|
T.unpack defHaddockBucketUrl)) <*>
|
||||||
|
option
|
||||||
|
readText
|
||||||
|
(long "upload-bucket" <> value defHaddockBucketName <> metavar "UPLOAD_BUCKET" <>
|
||||||
help
|
help
|
||||||
("S3 Bucket where hoogle db and snapshots.json file will be uploaded to. Default is: " <>
|
("S3 Bucket where hoogle db and snapshots.json file will be uploaded to. Default is: " <>
|
||||||
T.unpack haddockBucketName)) <*>
|
T.unpack defHaddockBucketName)) <*>
|
||||||
switch
|
switch
|
||||||
(long "do-not-upload" <>
|
(long "do-not-upload" <>
|
||||||
help "Stop from hoogle db and snapshots.json from being generated and uploaded") <*>
|
help "Stop from hoogle db and snapshots.json from being generated and uploaded") <*>
|
||||||
|
|||||||
@ -24,3 +24,6 @@ force-ssl: false
|
|||||||
|
|
||||||
postgres-string: "_env:PGSTRING:host=localhost port=5432 user=stackage dbname=stackage password=stackage"
|
postgres-string: "_env:PGSTRING:host=localhost port=5432 user=stackage dbname=stackage password=stackage"
|
||||||
postgres-poolsize: "_env:PGPOOLSIZE:8"
|
postgres-poolsize: "_env:PGPOOLSIZE:8"
|
||||||
|
|
||||||
|
# Publicly-accessible URL for the bucket holding Haddock contents.
|
||||||
|
download-bucket-url: "_env:DOWNLOAD_BUCKET_URL:https://s3.amazonaws.com/haddock.stackage.org"
|
||||||
|
|||||||
@ -157,7 +157,7 @@ withFoundation appLogFunc appSettings inner = do
|
|||||||
runRIO appLogFunc $ RIO.logError $ "Couldn't get Stack matcher: " <> displayShow e
|
runRIO appLogFunc $ RIO.logError $ "Couldn't get Stack matcher: " <> displayShow e
|
||||||
pure oldMatcher
|
pure oldMatcher
|
||||||
appMirrorStatus <- mkUpdateMirrorStatus
|
appMirrorStatus <- mkUpdateMirrorStatus
|
||||||
hoogleLocker <- newHoogleLocker appLogFunc appHttpManager
|
hoogleLocker <- newHoogleLocker appLogFunc appHttpManager (appDownloadBucketUrl appSettings)
|
||||||
let appGetHoogleDB = singleRun hoogleLocker
|
let appGetHoogleDB = singleRun hoogleLocker
|
||||||
let appGitRev = $$tGitRev
|
let appGitRev = $$tGitRev
|
||||||
runConcurrently $ runContentUpdates *> Concurrently (inner App {..})
|
runConcurrently $ runContentUpdates *> Concurrently (inner App {..})
|
||||||
|
|||||||
@ -84,6 +84,12 @@ singleRun sr@(SingleRun var f) k =
|
|||||||
|
|
||||||
-- OK, we're done running, so let other
|
-- OK, we're done running, so let other
|
||||||
-- threads run this again.
|
-- threads run this again.
|
||||||
|
|
||||||
|
-- NB: as soon as we've modified the MVar, the next
|
||||||
|
-- call to singleRun will think no thread is working and
|
||||||
|
-- start over. Anything waiting on us will get our
|
||||||
|
-- result, but nobody else will. That's ok: singleRun
|
||||||
|
-- just provides a little caching on top of a mutex.
|
||||||
modifyMVar_ var $ return . filter (\(k', _) -> k /= k')
|
modifyMVar_ var $ return . filter (\(k', _) -> k /= k')
|
||||||
|
|
||||||
case eres of
|
case eres of
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
{-# LANGUAGE NoImplicitPrelude #-}
|
{-# LANGUAGE NoImplicitPrelude #-}
|
||||||
{-# LANGUAGE RecordWildCards #-}
|
{-# LANGUAGE RecordWildCards #-}
|
||||||
{-# LANGUAGE TypeFamilies #-}
|
{-# LANGUAGE TypeFamilies #-}
|
||||||
|
{-# LANGUAGE TypeOperators #-}
|
||||||
module Data.WebsiteContent
|
module Data.WebsiteContent
|
||||||
( WebsiteContent (..)
|
( WebsiteContent (..)
|
||||||
, StackRelease (..)
|
, StackRelease (..)
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import Data.Conduit.Attoparsec (sinkParser)
|
|||||||
import Data.WebsiteContent
|
import Data.WebsiteContent
|
||||||
import Import
|
import Import
|
||||||
import Yesod.GitRepo
|
import Yesod.GitRepo
|
||||||
|
import qualified Data.Aeson.KeyMap as Aeson
|
||||||
|
|
||||||
getDownloadStackListR :: Handler Html
|
getDownloadStackListR :: Handler Html
|
||||||
getDownloadStackListR = track "Handler.DownloadStack.getDownloadStackListR" $ do
|
getDownloadStackListR = track "Handler.DownloadStack.getDownloadStackListR" $ do
|
||||||
@ -35,14 +36,14 @@ getLatestMatcher man = do
|
|||||||
return $ \pattern' -> do
|
return $ \pattern' -> do
|
||||||
let pattern'' = pattern' ++ "."
|
let pattern'' = pattern' ++ "."
|
||||||
Object top <- return val
|
Object top <- return val
|
||||||
Array assets <- lookup "assets" top
|
Array assets <- Aeson.lookup "assets" top
|
||||||
headMay $ preferZip $ catMaybes $ map (findMatch pattern'') assets
|
headMay $ preferZip $ catMaybes $ map (findMatch pattern'') assets
|
||||||
where
|
where
|
||||||
findMatch pattern' (Object o) = do
|
findMatch pattern' (Object o) = do
|
||||||
String name <- lookup "name" o
|
String name <- Aeson.lookup "name" o
|
||||||
guard $ not $ ".asc" `isSuffixOf` name
|
guard $ not $ ".asc" `isSuffixOf` name
|
||||||
guard $ pattern' `isInfixOf` name
|
guard $ pattern' `isInfixOf` name
|
||||||
String url <- lookup "browser_download_url" o
|
String url <- Aeson.lookup "browser_download_url" o
|
||||||
Just url
|
Just url
|
||||||
findMatch _ _ = Nothing
|
findMatch _ _ = Nothing
|
||||||
|
|
||||||
|
|||||||
@ -8,13 +8,14 @@ import Import
|
|||||||
import qualified Data.Text as T (takeEnd)
|
import qualified Data.Text as T (takeEnd)
|
||||||
import Stackage.Database
|
import Stackage.Database
|
||||||
|
|
||||||
makeURL :: SnapName -> [Text] -> Text
|
makeURL :: SnapName -> [Text] -> Handler Text
|
||||||
makeURL snapName rest = concat
|
makeURL snapName rest = do
|
||||||
$ "https://s3.amazonaws.com/"
|
bucketUrl <- getsYesod (appDownloadBucketUrl . appSettings)
|
||||||
: haddockBucketName
|
pure . concat
|
||||||
: "/"
|
$ bucketUrl
|
||||||
: toPathPiece snapName
|
: "/"
|
||||||
: map (cons '/') rest
|
: toPathPiece snapName
|
||||||
|
: map (cons '/') rest
|
||||||
|
|
||||||
shouldRedirect :: Bool
|
shouldRedirect :: Bool
|
||||||
shouldRedirect = False
|
shouldRedirect = False
|
||||||
@ -27,7 +28,7 @@ getHaddockR snapName rest
|
|||||||
result <- redirectWithVersion snapName rest
|
result <- redirectWithVersion snapName rest
|
||||||
case result of
|
case result of
|
||||||
Just route -> redirect route
|
Just route -> redirect route
|
||||||
Nothing -> redirect $ makeURL snapName rest
|
Nothing -> redirect =<< makeURL snapName rest
|
||||||
| Just docType <- mdocType = do
|
| Just docType <- mdocType = do
|
||||||
cacheSeconds $ 60 * 60 * 24 * 7
|
cacheSeconds $ 60 * 60 * 24 * 7
|
||||||
result <- redirectWithVersion snapName rest
|
result <- redirectWithVersion snapName rest
|
||||||
@ -41,7 +42,7 @@ getHaddockR snapName rest
|
|||||||
return ("text/html; charset=utf-8", mstyle /= Just "stackage")
|
return ("text/html; charset=utf-8", mstyle /= Just "stackage")
|
||||||
DocJson ->
|
DocJson ->
|
||||||
return ("application/jsontml; charset=utf-8", True)
|
return ("application/jsontml; charset=utf-8", True)
|
||||||
req <- parseRequest $ unpack $ makeURL snapName rest
|
req <- parseRequest =<< unpack <$> makeURL snapName rest
|
||||||
man <- getHttpManager <$> getYesod
|
man <- getHttpManager <$> getYesod
|
||||||
(_, res) <- runReaderT (acquireResponse req >>= allocateAcquire) man
|
(_, res) <- runReaderT (acquireResponse req >>= allocateAcquire) man
|
||||||
if plain
|
if plain
|
||||||
@ -54,7 +55,7 @@ getHaddockR snapName rest
|
|||||||
peekC >>= maybe (return ()) (const $ yield $ encodeUtf8 extra)
|
peekC >>= maybe (return ()) (const $ yield $ encodeUtf8 extra)
|
||||||
mapC id) .|
|
mapC id) .|
|
||||||
mapC (Chunk . toBuilder)
|
mapC (Chunk . toBuilder)
|
||||||
| otherwise = redirect $ makeURL snapName rest
|
| otherwise = redirect =<< makeURL snapName rest
|
||||||
where
|
where
|
||||||
mdocType =
|
mdocType =
|
||||||
case T.takeEnd 5 <$> headMay (reverse rest) of
|
case T.takeEnd 5 <$> headMay (reverse rest) of
|
||||||
@ -141,6 +142,9 @@ getHaddockBackupR (snap':rest)
|
|||||||
| Just branch <- fromPathPiece snap' = track "Handler.Haddock.getHaddockBackupR" $ do
|
| Just branch <- fromPathPiece snap' = track "Handler.Haddock.getHaddockBackupR" $ do
|
||||||
snapName <- newestSnapshot branch >>= maybe notFound pure
|
snapName <- newestSnapshot branch >>= maybe notFound pure
|
||||||
redirect $ HaddockR snapName rest
|
redirect $ HaddockR snapName rest
|
||||||
getHaddockBackupR rest = track "Handler.Haddock.getHaddockBackupR" $ redirect $ concat
|
getHaddockBackupR rest = track "Handler.Haddock.getHaddockBackupR" $ do
|
||||||
$ "https://s3.amazonaws.com/haddock.stackage.org"
|
bucketUrl <- getsYesod (appDownloadBucketUrl . appSettings)
|
||||||
: map (cons '/') rest
|
redirect
|
||||||
|
$ concat
|
||||||
|
$ bucketUrl
|
||||||
|
: map (cons '/') rest
|
||||||
|
|||||||
@ -12,6 +12,8 @@ import RIO.Time (diffUTCTime, addUTCTime, getCurrentTime)
|
|||||||
import Text.XML.Stream.Parse
|
import Text.XML.Stream.Parse
|
||||||
import Data.XML.Types (Event (EventContent), Content (ContentText))
|
import Data.XML.Types (Event (EventContent), Content (ContentText))
|
||||||
import qualified Prelude
|
import qualified Prelude
|
||||||
|
import qualified Data.Aeson.Key as Aeson
|
||||||
|
import qualified Data.Aeson.KeyMap as Aeson
|
||||||
|
|
||||||
getMirrorStatusR :: Handler Html
|
getMirrorStatusR :: Handler Html
|
||||||
getMirrorStatusR = do
|
getMirrorStatusR = do
|
||||||
@ -148,7 +150,7 @@ getLastModifiedGit org repo ref = do
|
|||||||
|
|
||||||
lookupJ :: MonadThrow m => Text -> Value -> m Value
|
lookupJ :: MonadThrow m => Text -> Value -> m Value
|
||||||
lookupJ key (Object o) =
|
lookupJ key (Object o) =
|
||||||
case lookup key o of
|
case Aeson.lookup (Aeson.fromText key) o of
|
||||||
Nothing -> error $ "Key not found: " ++ show key
|
Nothing -> error $ "Key not found: " ++ show key
|
||||||
Just x -> return x
|
Just x -> return x
|
||||||
lookupJ key val = error $ concat
|
lookupJ key val = error $ concat
|
||||||
|
|||||||
@ -2,13 +2,12 @@
|
|||||||
module Handler.StackageIndex where
|
module Handler.StackageIndex where
|
||||||
|
|
||||||
import Import
|
import Import
|
||||||
import Stackage.Database.Types (haddockBucketName)
|
|
||||||
|
|
||||||
getStackageIndexR :: SnapName -> Handler TypedContent
|
getStackageIndexR :: SnapName -> Handler TypedContent
|
||||||
getStackageIndexR slug =
|
getStackageIndexR slug = do
|
||||||
|
bucketUrl <- getsYesod (appDownloadBucketUrl . appSettings)
|
||||||
redirect $ concat
|
redirect $ concat
|
||||||
[ "https://s3.amazonaws.com/"
|
[ bucketUrl
|
||||||
, haddockBucketName
|
|
||||||
, "/package-index/"
|
, "/package-index/"
|
||||||
, toPathPiece slug
|
, toPathPiece slug
|
||||||
, ".tar.gz"
|
, ".tar.gz"
|
||||||
|
|||||||
@ -56,6 +56,8 @@ data AppSettings = AppSettings
|
|||||||
-- ^ Force redirect to SSL
|
-- ^ Force redirect to SSL
|
||||||
, appDevDownload :: Bool
|
, appDevDownload :: Bool
|
||||||
-- ^ Controls how Git and database resources are downloaded (True means less downloading)
|
-- ^ Controls how Git and database resources are downloaded (True means less downloading)
|
||||||
|
, appDownloadBucketUrl :: Text
|
||||||
|
-- ^ Publicly-accessible URL for the bucket holding Haddock contents.
|
||||||
}
|
}
|
||||||
|
|
||||||
data DatabaseSettings
|
data DatabaseSettings
|
||||||
@ -109,6 +111,7 @@ instance FromJSON AppSettings where
|
|||||||
appSkipCombining <- o .:? "skip-combining" .!= dev
|
appSkipCombining <- o .:? "skip-combining" .!= dev
|
||||||
appForceSsl <- o .:? "force-ssl" .!= not dev
|
appForceSsl <- o .:? "force-ssl" .!= not dev
|
||||||
appDevDownload <- o .:? "dev-download" .!= dev
|
appDevDownload <- o .:? "dev-download" .!= dev
|
||||||
|
appDownloadBucketUrl <- o .:? "download-bucket-url" .!= "https://s3.amazonaws.com/haddock.stackage.org"
|
||||||
|
|
||||||
return AppSettings {..}
|
return AppSettings {..}
|
||||||
|
|
||||||
|
|||||||
@ -5,18 +5,20 @@
|
|||||||
{-# LANGUAGE RecordWildCards #-}
|
{-# LANGUAGE RecordWildCards #-}
|
||||||
{-# LANGUAGE ScopedTypeVariables #-}
|
{-# LANGUAGE ScopedTypeVariables #-}
|
||||||
{-# LANGUAGE TupleSections #-}
|
{-# LANGUAGE TupleSections #-}
|
||||||
|
{-# LANGUAGE FlexibleContexts #-}
|
||||||
module Stackage.Database.Cron
|
module Stackage.Database.Cron
|
||||||
( stackageServerCron
|
( stackageServerCron
|
||||||
, newHoogleLocker
|
, newHoogleLocker
|
||||||
, singleRun
|
, singleRun
|
||||||
, StackageCronOptions(..)
|
, StackageCronOptions(..)
|
||||||
, haddockBucketName
|
, defHaddockBucketName
|
||||||
|
, defHaddockBucketUrl
|
||||||
) where
|
) where
|
||||||
|
|
||||||
import Conduit
|
import Conduit
|
||||||
import Control.DeepSeq
|
import Control.DeepSeq
|
||||||
import qualified Control.Monad.Trans.AWS as AWS (paginate)
|
|
||||||
import Control.SingleRun
|
import Control.SingleRun
|
||||||
|
import Control.Lens ((?~))
|
||||||
import qualified Data.ByteString.Char8 as BS8
|
import qualified Data.ByteString.Char8 as BS8
|
||||||
import qualified Data.Conduit.Binary as CB
|
import qualified Data.Conduit.Binary as CB
|
||||||
import Data.Conduit.Tar (FileInfo(..), FileType(..), untar)
|
import Data.Conduit.Tar (FileInfo(..), FileType(..), untar)
|
||||||
@ -28,9 +30,12 @@ import Data.Yaml (decodeFileEither)
|
|||||||
import Database.Persist hiding (exists)
|
import Database.Persist hiding (exists)
|
||||||
import Database.Persist.Postgresql hiding (exists)
|
import Database.Persist.Postgresql hiding (exists)
|
||||||
import qualified Hoogle
|
import qualified Hoogle
|
||||||
import Network.AWS hiding (Request, Response)
|
import Amazonka hiding (Request, length, error)
|
||||||
import Network.AWS.Data.Text (toText)
|
import Amazonka.Data.Text (toText)
|
||||||
import Network.AWS.S3
|
import Amazonka.S3
|
||||||
|
import Amazonka.S3.ListObjectsV2
|
||||||
|
import Amazonka.S3.Lens
|
||||||
|
import Amazonka.Lens
|
||||||
import Network.HTTP.Client
|
import Network.HTTP.Client
|
||||||
import Network.HTTP.Client.Conduit (bodyReaderSource)
|
import Network.HTTP.Client.Conduit (bodyReaderSource)
|
||||||
import Network.HTTP.Simple (getResponseBody, httpJSONEither)
|
import Network.HTTP.Simple (getResponseBody, httpJSONEither)
|
||||||
@ -59,6 +64,7 @@ import Stackage.Database.PackageInfo
|
|||||||
import Stackage.Database.Query
|
import Stackage.Database.Query
|
||||||
import Stackage.Database.Schema
|
import Stackage.Database.Schema
|
||||||
import Stackage.Database.Types
|
import Stackage.Database.Types
|
||||||
|
import System.Environment (getEnvironment)
|
||||||
import UnliftIO.Concurrent (getNumCapabilities)
|
import UnliftIO.Concurrent (getNumCapabilities)
|
||||||
import Web.PathPieces (fromPathPiece, toPathPiece)
|
import Web.PathPieces (fromPathPiece, toPathPiece)
|
||||||
import qualified Control.Retry as Retry
|
import qualified Control.Retry as Retry
|
||||||
@ -73,10 +79,9 @@ hoogleKey name = T.concat
|
|||||||
, ".hoo"
|
, ".hoo"
|
||||||
]
|
]
|
||||||
|
|
||||||
hoogleUrl :: SnapName -> Text
|
hoogleUrl :: SnapName -> Text -> Text
|
||||||
hoogleUrl n = T.concat
|
hoogleUrl n haddockBucketUrl = T.concat
|
||||||
[ "https://s3.amazonaws.com/"
|
[ haddockBucketUrl
|
||||||
, haddockBucketName
|
|
||||||
, "/"
|
, "/"
|
||||||
, hoogleKey n
|
, hoogleKey n
|
||||||
]
|
]
|
||||||
@ -99,9 +104,14 @@ getStackageSnapshotsDir = do
|
|||||||
withResponseUnliftIO :: MonadUnliftIO m => Request -> Manager -> (Response BodyReader -> m b) -> m b
|
withResponseUnliftIO :: MonadUnliftIO m => Request -> Manager -> (Response BodyReader -> m b) -> m b
|
||||||
withResponseUnliftIO req man f = withRunInIO $ \ runInIO -> withResponse req man (runInIO . f)
|
withResponseUnliftIO req man f = withRunInIO $ \ runInIO -> withResponse req man (runInIO . f)
|
||||||
|
|
||||||
|
-- | Under the SingleRun wrapper that ensures only one thing at a time is
|
||||||
|
-- writing the file in question, ensure that a Hoogle database exists on the
|
||||||
|
-- filesystem for the given SnapName. But only going so far as downloading it
|
||||||
|
-- from the haddock bucket. See 'createHoogleDB' for the function that puts it
|
||||||
|
-- there in the first place.
|
||||||
newHoogleLocker ::
|
newHoogleLocker ::
|
||||||
(HasLogFunc env, MonadIO m) => env -> Manager -> m (SingleRun SnapName (Maybe FilePath))
|
(HasLogFunc env, MonadIO m) => env -> Manager -> Text -> m (SingleRun SnapName (Maybe FilePath))
|
||||||
newHoogleLocker env man = mkSingleRun hoogleLocker
|
newHoogleLocker env man bucketUrl = mkSingleRun hoogleLocker
|
||||||
where
|
where
|
||||||
hoogleLocker :: MonadIO n => SnapName -> n (Maybe FilePath)
|
hoogleLocker :: MonadIO n => SnapName -> n (Maybe FilePath)
|
||||||
hoogleLocker name =
|
hoogleLocker name =
|
||||||
@ -111,7 +121,7 @@ newHoogleLocker env man = mkSingleRun hoogleLocker
|
|||||||
if exists
|
if exists
|
||||||
then return $ Just fp
|
then return $ Just fp
|
||||||
else do
|
else do
|
||||||
req' <- parseRequest $ T.unpack $ hoogleUrl name
|
req' <- parseRequest $ T.unpack $ hoogleUrl name bucketUrl
|
||||||
let req = req' {decompress = const False}
|
let req = req' {decompress = const False}
|
||||||
withResponseUnliftIO req man $ \res ->
|
withResponseUnliftIO req man $ \res ->
|
||||||
case responseStatus res of
|
case responseStatus res of
|
||||||
@ -124,11 +134,12 @@ newHoogleLocker env man = mkSingleRun hoogleLocker
|
|||||||
sinkHandle h
|
sinkHandle h
|
||||||
return $ Just fp
|
return $ Just fp
|
||||||
| status == status404 -> do
|
| status == status404 -> do
|
||||||
logDebug $ "NotFound: " <> display (hoogleUrl name)
|
logWarn $ "NotFound: " <> display (hoogleUrl name bucketUrl)
|
||||||
return Nothing
|
return Nothing
|
||||||
| otherwise -> do
|
| otherwise -> do
|
||||||
body <- liftIO $ brConsume $ responseBody res
|
body <- liftIO $ brConsume $ responseBody res
|
||||||
mapM_ (logDebug . displayBytesUtf8) body
|
logWarn $ "Unexpected status: " <> displayShow status
|
||||||
|
mapM_ (logWarn . displayBytesUtf8) body
|
||||||
return Nothing
|
return Nothing
|
||||||
|
|
||||||
getHackageDeprecations ::
|
getHackageDeprecations ::
|
||||||
@ -163,7 +174,12 @@ stackageServerCron StackageCronOptions {..} = do
|
|||||||
cabalMutable <- newIORef Map.empty
|
cabalMutable <- newIORef Map.empty
|
||||||
gpdCache <- newIORef IntMap.empty
|
gpdCache <- newIORef IntMap.empty
|
||||||
defaultProcessContext <- mkDefaultProcessContext
|
defaultProcessContext <- mkDefaultProcessContext
|
||||||
aws <- newEnv Discover
|
aws <- do
|
||||||
|
aws' <- newEnv discover
|
||||||
|
endpoint <- lookup "AWS_S3_ENDPOINT" <$> getEnvironment
|
||||||
|
pure $ case endpoint of
|
||||||
|
Nothing -> aws'
|
||||||
|
Just ep -> configureService (setEndpoint True (BS8.pack ep) 443 Amazonka.S3.defaultService) aws'
|
||||||
withLogFunc (setLogMinLevel scoLogLevel lo) $ \logFunc -> do
|
withLogFunc (setLogMinLevel scoLogLevel lo) $ \logFunc -> do
|
||||||
let pantryConfig =
|
let pantryConfig =
|
||||||
PantryConfig
|
PantryConfig
|
||||||
@ -179,8 +195,9 @@ stackageServerCron StackageCronOptions {..} = do
|
|||||||
, pcCasaMaxPerRequest = defaultCasaMaxPerRequest
|
, pcCasaMaxPerRequest = defaultCasaMaxPerRequest
|
||||||
, pcSnapshotLocation = defaultSnapshotLocation
|
, pcSnapshotLocation = defaultSnapshotLocation
|
||||||
}
|
}
|
||||||
currentHoogleVersionId <-
|
currentHoogleVersionId <- runRIO logFunc $ do
|
||||||
runRIO logFunc $ getCurrentHoogleVersionIdWithPantryConfig pantryConfig
|
runStackageMigrations' pantryConfig
|
||||||
|
getCurrentHoogleVersionIdWithPantryConfig pantryConfig
|
||||||
let stackage =
|
let stackage =
|
||||||
StackageCron
|
StackageCron
|
||||||
{ scPantryConfig = pantryConfig
|
{ scPantryConfig = pantryConfig
|
||||||
@ -191,6 +208,7 @@ stackageServerCron StackageCronOptions {..} = do
|
|||||||
, scCachedGPD = gpdCache
|
, scCachedGPD = gpdCache
|
||||||
, scEnvAWS = aws
|
, scEnvAWS = aws
|
||||||
, scDownloadBucketName = scoDownloadBucketName
|
, scDownloadBucketName = scoDownloadBucketName
|
||||||
|
, scDownloadBucketUrl = scoDownloadBucketUrl
|
||||||
, scUploadBucketName = scoUploadBucketName
|
, scUploadBucketName = scoUploadBucketName
|
||||||
, scSnapshotsRepo = scoSnapshotsRepo
|
, scSnapshotsRepo = scoSnapshotsRepo
|
||||||
, scReportProgress = scoReportProgress
|
, scReportProgress = scoReportProgress
|
||||||
@ -218,7 +236,6 @@ runStackageUpdate doNotUpload = do
|
|||||||
unless doNotUpload uploadSnapshotsJSON
|
unless doNotUpload uploadSnapshotsJSON
|
||||||
buildAndUploadHoogleDB doNotUpload
|
buildAndUploadHoogleDB doNotUpload
|
||||||
logInfo "Finished building and uploading Hoogle DBs"
|
logInfo "Finished building and uploading Hoogle DBs"
|
||||||
run $ rawExecute "TRUNCATE TABLE latest_version" []
|
|
||||||
|
|
||||||
|
|
||||||
-- | This will look at 'global-hints.yaml' and will create core package getters that are reused
|
-- | This will look at 'global-hints.yaml' and will create core package getters that are reused
|
||||||
@ -386,10 +403,11 @@ addPantryPackage sid compiler isHidden flags (PantryPackage pc treeKey) = do
|
|||||||
checkForDocs :: SnapshotId -> SnapName -> ResourceT (RIO StackageCron) ()
|
checkForDocs :: SnapshotId -> SnapName -> ResourceT (RIO StackageCron) ()
|
||||||
checkForDocs snapshotId snapName = do
|
checkForDocs snapshotId snapName = do
|
||||||
bucketName <- lift (scDownloadBucketName <$> ask)
|
bucketName <- lift (scDownloadBucketName <$> ask)
|
||||||
|
env <- asks scEnvAWS
|
||||||
mods <-
|
mods <-
|
||||||
runConduit $
|
runConduit $
|
||||||
AWS.paginate (req bucketName) .| concatMapC (^. lovrsContents) .|
|
paginate env (req bucketName) .| concatMapC (fromMaybe [] . (^. listObjectsV2Response_contents)) .|
|
||||||
mapC (\obj -> toText (obj ^. oKey)) .|
|
mapC (\obj -> toText (obj ^. object_key)) .|
|
||||||
concatMapC (T.stripSuffix ".html" >=> T.stripPrefix prefix >=> pathToPackageModule) .|
|
concatMapC (T.stripSuffix ".html" >=> T.stripPrefix prefix >=> pathToPackageModule) .|
|
||||||
sinkList
|
sinkList
|
||||||
-- it is faster to download all modules in this snapshot, than process them with a conduit all
|
-- it is faster to download all modules in this snapshot, than process them with a conduit all
|
||||||
@ -398,16 +416,16 @@ checkForDocs snapshotId snapName = do
|
|||||||
-- Cache is for SnapshotPackageId, there will be many modules per peckage, no need to look into
|
-- Cache is for SnapshotPackageId, there will be many modules per peckage, no need to look into
|
||||||
-- the database for each one of them.
|
-- the database for each one of them.
|
||||||
n <- max 1 . (`div` 2) <$> getNumCapabilities
|
n <- max 1 . (`div` 2) <$> getNumCapabilities
|
||||||
notFoundList <- lift $ pooledMapConcurrentlyN n (markModules sidsCacheRef) mods
|
unexpectedPackages <- lift $ pooledMapConcurrentlyN n (markModules sidsCacheRef) mods
|
||||||
forM_ (Set.fromList $ catMaybes notFoundList) $ \pid ->
|
forM_ (Set.fromList $ catMaybes unexpectedPackages) $ \pid ->
|
||||||
lift $
|
lift $
|
||||||
logWarn $
|
logWarn $
|
||||||
"Documentation available for package '" <> display pid <>
|
"Documentation found for package '" <> display pid <>
|
||||||
"' but was not found in this snapshot: " <>
|
"', which does not exist in this snapshot: " <>
|
||||||
display snapName
|
display snapName
|
||||||
where
|
where
|
||||||
prefix = textDisplay snapName <> "/"
|
prefix = textDisplay snapName <> "/"
|
||||||
req bucketName = listObjectsV2 (BucketName bucketName) & lovPrefix .~ Just prefix
|
req bucketName = newListObjectsV2 (BucketName bucketName) & listObjectsV2_prefix ?~ prefix
|
||||||
-- | This function records all package modules that have documentation available, the ones
|
-- | This function records all package modules that have documentation available, the ones
|
||||||
-- that are not found in the snapshot reported back as an error. Besides being run
|
-- that are not found in the snapshot reported back as an error. Besides being run
|
||||||
-- concurrently this function optimizes the SnapshotPackageId lookup as well, since that can
|
-- concurrently this function optimizes the SnapshotPackageId lookup as well, since that can
|
||||||
@ -417,7 +435,7 @@ checkForDocs snapshotId snapName = do
|
|||||||
let mSnapshotPackageId = Map.lookup pid sidsCache
|
let mSnapshotPackageId = Map.lookup pid sidsCache
|
||||||
mFound <- run $ markModuleHasDocs snapshotId pid mSnapshotPackageId modName
|
mFound <- run $ markModuleHasDocs snapshotId pid mSnapshotPackageId modName
|
||||||
case mFound of
|
case mFound of
|
||||||
Nothing -> pure $ Just pid
|
Nothing -> pure $ Just pid -- This package doesn't exist in the snapshot!
|
||||||
Just snapshotPackageId
|
Just snapshotPackageId
|
||||||
| Nothing <- mSnapshotPackageId -> do
|
| Nothing <- mSnapshotPackageId -> do
|
||||||
atomicModifyIORef'
|
atomicModifyIORef'
|
||||||
@ -663,9 +681,9 @@ uploadSnapshotsJSON = do
|
|||||||
uploadBucket <- scUploadBucketName <$> ask
|
uploadBucket <- scUploadBucketName <$> ask
|
||||||
let key = ObjectKey "snapshots.json"
|
let key = ObjectKey "snapshots.json"
|
||||||
uploadFromRIO key $
|
uploadFromRIO key $
|
||||||
set poACL (Just OPublicRead) $
|
set putObject_acl (Just ObjectCannedACL_Public_read) $
|
||||||
set poContentType (Just "application/json") $
|
set putObject_contentType (Just "application/json") $
|
||||||
putObject (BucketName uploadBucket) key (toBody snapshots)
|
newPutObject (BucketName uploadBucket) key (toBody snapshots)
|
||||||
|
|
||||||
-- | Writes a gzipped version of hoogle db into temporary file onto the file system and then uploads
|
-- | Writes a gzipped version of hoogle db into temporary file onto the file system and then uploads
|
||||||
-- it to S3. Temporary file is removed upon completion
|
-- it to S3. Temporary file is removed upon completion
|
||||||
@ -677,14 +695,14 @@ uploadHoogleDB fp key =
|
|||||||
body <- toBody <$> readFileBinary fpgz
|
body <- toBody <$> readFileBinary fpgz
|
||||||
uploadBucket <- scUploadBucketName <$> ask
|
uploadBucket <- scUploadBucketName <$> ask
|
||||||
uploadFromRIO key $
|
uploadFromRIO key $
|
||||||
set poACL (Just OPublicRead) $ putObject (BucketName uploadBucket) key body
|
set putObject_acl (Just ObjectCannedACL_Public_read) $ newPutObject (BucketName uploadBucket) key body
|
||||||
|
|
||||||
|
|
||||||
uploadFromRIO :: AWSRequest a => ObjectKey -> a -> RIO StackageCron ()
|
uploadFromRIO :: (AWSRequest a, Typeable a, Typeable (AWSResponse a)) => ObjectKey -> a -> RIO StackageCron ()
|
||||||
uploadFromRIO key po = do
|
uploadFromRIO key po = do
|
||||||
logInfo $ "Uploading " <> displayShow key <> " to S3 bucket."
|
logInfo $ "Uploading " <> displayShow key <> " to S3 bucket."
|
||||||
env <- ask
|
env <- asks scEnvAWS
|
||||||
eres <- runResourceT $ runAWS env $ trying _Error $ send po
|
eres <- runResourceT $ trying _Error $ send env po
|
||||||
case eres of
|
case eres of
|
||||||
Left e ->
|
Left e ->
|
||||||
logError $ "Couldn't upload " <> displayShow key <> " to S3 becuase " <> displayShow e
|
logError $ "Couldn't upload " <> displayShow key <> " to S3 becuase " <> displayShow e
|
||||||
@ -693,18 +711,30 @@ uploadFromRIO key po = do
|
|||||||
buildAndUploadHoogleDB :: Bool -> RIO StackageCron ()
|
buildAndUploadHoogleDB :: Bool -> RIO StackageCron ()
|
||||||
buildAndUploadHoogleDB doNotUpload = do
|
buildAndUploadHoogleDB doNotUpload = do
|
||||||
snapshots <- lastLtsNightlyWithoutHoogleDb 5 5
|
snapshots <- lastLtsNightlyWithoutHoogleDb 5 5
|
||||||
|
-- currentHoogleVersionId <- scHoogleVersionId <$> ask
|
||||||
env <- ask
|
env <- ask
|
||||||
locker <- newHoogleLocker (env ^. logFuncL) (env ^. envManager)
|
awsEnv <- asks scEnvAWS
|
||||||
|
bucketUrl <- asks scDownloadBucketUrl
|
||||||
|
-- locker is an action that returns the path to a hoogle db, if one exists
|
||||||
|
-- in the haddock bucket already.
|
||||||
|
locker <- newHoogleLocker (env ^. logFuncL) (awsEnv ^. env_manager) bucketUrl
|
||||||
|
let insertH = checkInsertSnapshotHoogleDb True
|
||||||
|
checkH = checkInsertSnapshotHoogleDb False
|
||||||
for_ snapshots $ \(snapshotId, snapName) ->
|
for_ snapshots $ \(snapshotId, snapName) ->
|
||||||
unlessM (checkInsertSnapshotHoogleDb False snapshotId) $ do
|
-- Even though we just got a list of snapshots that don't have hoogle
|
||||||
|
-- databases, we check again. For some reason. I don't see how this can
|
||||||
|
-- actually be useful. both lastLtsNightlyWithoutHoogleDb and
|
||||||
|
-- checkInsertSnapshotHoogleDb just check against SnapshotHoogleDb.
|
||||||
|
-- Perhaps the check can be removed.
|
||||||
|
unlessM (checkH snapshotId) $ do
|
||||||
logInfo $ "Starting Hoogle database download: " <> display (hoogleKey snapName)
|
logInfo $ "Starting Hoogle database download: " <> display (hoogleKey snapName)
|
||||||
mfp <- singleRun locker snapName
|
mfp <- singleRun locker snapName
|
||||||
case mfp of
|
case mfp of
|
||||||
Just _ -> do
|
Just _ -> do
|
||||||
logInfo $ "Current hoogle database exists for: " <> display snapName
|
logInfo $ "Current hoogle database exists for: " <> display snapName
|
||||||
void $ checkInsertSnapshotHoogleDb True snapshotId
|
void $ insertH snapshotId
|
||||||
Nothing -> do
|
Nothing -> do
|
||||||
logInfo $ "Current hoogle database does not yet exist for: " <> display snapName
|
logInfo $ "Current hoogle database does not yet exist in the bucket for: " <> display snapName
|
||||||
mfp' <- createHoogleDB snapshotId snapName
|
mfp' <- createHoogleDB snapshotId snapName
|
||||||
forM_ mfp' $ \fp -> do
|
forM_ mfp' $ \fp -> do
|
||||||
let key = hoogleKey snapName
|
let key = hoogleKey snapName
|
||||||
@ -713,24 +743,27 @@ buildAndUploadHoogleDB doNotUpload = do
|
|||||||
renamePath fp dest
|
renamePath fp dest
|
||||||
unless doNotUpload $ do
|
unless doNotUpload $ do
|
||||||
uploadHoogleDB dest (ObjectKey key)
|
uploadHoogleDB dest (ObjectKey key)
|
||||||
void $ checkInsertSnapshotHoogleDb True snapshotId
|
void $ insertH snapshotId
|
||||||
|
|
||||||
|
-- | Create a hoogle db from haddocks for the given snapshot, and upload it to
|
||||||
|
-- the haddock bucket.
|
||||||
createHoogleDB :: SnapshotId -> SnapName -> RIO StackageCron (Maybe FilePath)
|
createHoogleDB :: SnapshotId -> SnapName -> RIO StackageCron (Maybe FilePath)
|
||||||
createHoogleDB snapshotId snapName =
|
createHoogleDB snapshotId snapName =
|
||||||
handleAny logException $ do
|
handleAny logException $ do
|
||||||
logInfo $ "Creating Hoogle DB for " <> display snapName
|
logInfo $ "Creating Hoogle DB for " <> display snapName
|
||||||
downloadBucket <- scDownloadBucketName <$> ask
|
downloadBucketUrl <- scDownloadBucketUrl <$> ask
|
||||||
let root = "hoogle-gen"
|
let root = "hoogle-gen"
|
||||||
bindir = root </> "bindir"
|
bindir = root </> "bindir"
|
||||||
outname = root </> "output.hoo"
|
outname = root </> "output.hoo"
|
||||||
tarKey = toPathPiece snapName <> "/hoogle/orig.tar"
|
tarKey = toPathPiece snapName <> "/hoogle/orig.tar"
|
||||||
tarUrl = "https://s3.amazonaws.com/" <> downloadBucket <> "/" <> tarKey
|
tarUrl = downloadBucketUrl <> "/" <> tarKey
|
||||||
tarFP = root </> T.unpack tarKey
|
tarFP = root </> T.unpack tarKey
|
||||||
-- When tarball is downloaded it is saved with durability and atomicity, so if it
|
-- When tarball is downloaded it is saved with durability and atomicity, so if it
|
||||||
-- is present it is not in a corrupted state
|
-- is present it is not in a corrupted state
|
||||||
unlessM (doesFileExist tarFP) $ do
|
unlessM (doesFileExist tarFP) $ do
|
||||||
req <- parseRequest $ T.unpack tarUrl
|
req <- parseRequest $ T.unpack tarUrl
|
||||||
man <- view envManager
|
env <- asks scEnvAWS
|
||||||
|
let man = env ^. env_manager
|
||||||
withResponseUnliftIO req {decompress = const True} man $ \res -> do
|
withResponseUnliftIO req {decompress = const True} man $ \res -> do
|
||||||
throwErrorStatusCodes req res
|
throwErrorStatusCodes req res
|
||||||
createDirectoryIfMissing True $ takeDirectory tarFP
|
createDirectoryIfMissing True $ takeDirectory tarFP
|
||||||
|
|||||||
@ -23,7 +23,7 @@ import Distribution.Compiler (CompilerFlavor(GHC))
|
|||||||
import Distribution.Package (Dependency(..))
|
import Distribution.Package (Dependency(..))
|
||||||
import Distribution.PackageDescription (CondTree(..), Condition(..),
|
import Distribution.PackageDescription (CondTree(..), Condition(..),
|
||||||
ConfVar(..),
|
ConfVar(..),
|
||||||
Flag(flagDefault, flagName), FlagName,
|
PackageFlag(..), flagDefault, flagName, FlagName,
|
||||||
GenericPackageDescription, author,
|
GenericPackageDescription, author,
|
||||||
condExecutables, condLibrary,
|
condExecutables, condLibrary,
|
||||||
description, genPackageFlags, homepage,
|
description, genPackageFlags, homepage,
|
||||||
@ -152,7 +152,7 @@ getCheckCond compiler overrideFlags gpd = go
|
|||||||
where
|
where
|
||||||
go (Var (OS os)) = os == Linux -- arbitrary
|
go (Var (OS os)) = os == Linux -- arbitrary
|
||||||
go (Var (Arch arch)) = arch == X86_64 -- arbitrary
|
go (Var (Arch arch)) = arch == X86_64 -- arbitrary
|
||||||
go (Var (Flag flag)) = fromMaybe False $ Map.lookup flag flags
|
go (Var (PackageFlag flag)) = fromMaybe False $ Map.lookup flag flags
|
||||||
go (Var (Impl flavor range)) = flavor == compilerFlavor && compilerVersion `withinRange` range
|
go (Var (Impl flavor range)) = flavor == compilerFlavor && compilerVersion `withinRange` range
|
||||||
go (Lit b) = b
|
go (Lit b) = b
|
||||||
go (CNot c) = not $ go c
|
go (CNot c) = not $ go c
|
||||||
|
|||||||
@ -167,25 +167,48 @@ ltsBefore x y = do
|
|||||||
go (Entity _ lts) = (ltsSnap lts, SNLts (ltsMajor lts) (ltsMinor lts))
|
go (Entity _ lts) = (ltsSnap lts, SNLts (ltsMajor lts) (ltsMinor lts))
|
||||||
|
|
||||||
|
|
||||||
|
-- | Queries the database for the latest LTS and nightly snapshots that do not
|
||||||
|
-- have corresponding entries in the SnapshotHoogleDb table with the current
|
||||||
|
-- Hoogle version.
|
||||||
lastLtsNightlyWithoutHoogleDb :: Int -> Int -> RIO StackageCron [(SnapshotId, SnapName)]
|
lastLtsNightlyWithoutHoogleDb :: Int -> Int -> RIO StackageCron [(SnapshotId, SnapName)]
|
||||||
lastLtsNightlyWithoutHoogleDb ltsCount nightlyCount = do
|
lastLtsNightlyWithoutHoogleDb ltsCount nightlyCount = do
|
||||||
currentHoogleVersionId <- scHoogleVersionId <$> ask
|
currentHoogleVersionId <- scHoogleVersionId <$> ask
|
||||||
let getSnapshotsWithoutHoogeDb snapId snapCount =
|
let getSnapshotsWithoutHoogeDb snapId snapCount =
|
||||||
map (unValue *** unValue) <$>
|
map (unValue *** unValue) <$>
|
||||||
select
|
select
|
||||||
|
-- "snap" is either Lts or Nightly, while "snapshot" is indeed
|
||||||
|
-- "snapshot"
|
||||||
(from $ \(snap `InnerJoin` snapshot) -> do
|
(from $ \(snap `InnerJoin` snapshot) -> do
|
||||||
on $ snap ^. snapId ==. snapshot ^. SnapshotId
|
on $ snap ^. snapId ==. snapshot ^. SnapshotId
|
||||||
where_ $
|
where_ $
|
||||||
notExists $
|
notExists $
|
||||||
from $ \snapshotHoogleDb ->
|
from $ \snapshotHoogleDb ->
|
||||||
where_ $
|
where_ $
|
||||||
(snapshotHoogleDb ^. SnapshotHoogleDbSnapshot ==. snapshot ^.
|
(snapshotHoogleDb ^. SnapshotHoogleDbSnapshot
|
||||||
SnapshotId) &&.
|
==. snapshot ^. SnapshotId)
|
||||||
(snapshotHoogleDb ^. SnapshotHoogleDbVersion ==.
|
&&. (snapshotHoogleDb ^. SnapshotHoogleDbVersion
|
||||||
val currentHoogleVersionId)
|
==. val currentHoogleVersionId)
|
||||||
orderBy [desc (snapshot ^. SnapshotCreated)]
|
orderBy [desc (snapshot ^. SnapshotCreated)]
|
||||||
limit $ fromIntegral snapCount
|
limit $ fromIntegral snapCount
|
||||||
pure (snapshot ^. SnapshotId, snapshot ^. SnapshotName))
|
pure (snapshot ^. SnapshotId, snapshot ^. SnapshotName))
|
||||||
|
-- In sql, this query would be
|
||||||
|
--
|
||||||
|
-- select snapshot.id, snapshot.name
|
||||||
|
-- from snapshot
|
||||||
|
-- join $foo as snap -- either Lts or Nightly
|
||||||
|
-- on snap.snap = snapshot.id
|
||||||
|
-- where not exists (
|
||||||
|
-- select 1
|
||||||
|
-- from snapshot_hoogle_db
|
||||||
|
-- where snapshot_hoogle_db.snapshot = snapshot.id
|
||||||
|
-- and snapshot_hoogle_db.version = $currentHoogleVersionId
|
||||||
|
-- )
|
||||||
|
-- order by snapshot.created desc
|
||||||
|
-- limit $snapCount
|
||||||
|
--
|
||||||
|
-- So it returns a list of snapshots where there is no
|
||||||
|
-- corresponding entry in the snapshot_hoogle_db table for the
|
||||||
|
-- current hoogle version.
|
||||||
run $ do
|
run $ do
|
||||||
lts <- getSnapshotsWithoutHoogeDb LtsSnap ltsCount
|
lts <- getSnapshotsWithoutHoogeDb LtsSnap ltsCount
|
||||||
nightly <- getSnapshotsWithoutHoogeDb NightlySnap nightlyCount
|
nightly <- getSnapshotsWithoutHoogeDb NightlySnap nightlyCount
|
||||||
@ -206,7 +229,7 @@ snapshotsJSON = do
|
|||||||
Just n -> (("nightly" A..= printNightly n) :)
|
Just n -> (("nightly" A..= printNightly n) :)
|
||||||
return $ A.object $ nightly lts
|
return $ A.object $ nightly lts
|
||||||
where
|
where
|
||||||
toObj lts@(major, _) = T.pack ("lts-" <> show major) A..= printLts lts
|
toObj lts@(major, _) = fromString ("lts-" <> show major) A..= printLts lts
|
||||||
printLts (major, minor) = "lts-" <> show major <> "." <> show minor
|
printLts (major, minor) = "lts-" <> show major <> "." <> show minor
|
||||||
printNightly day = "nightly-" <> T.pack (show day)
|
printNightly day = "nightly-" <> T.pack (show day)
|
||||||
|
|
||||||
@ -1159,10 +1182,27 @@ checkInsertSnapshotHoogleDb shouldInsert snapshotId = do
|
|||||||
(from
|
(from
|
||||||
(\v -> do
|
(\v -> do
|
||||||
where_ $ v ^. VersionId ==. val hoogleVersionId
|
where_ $ v ^. VersionId ==. val hoogleVersionId
|
||||||
|
-- This is reaching into the *pantry*
|
||||||
|
-- database!
|
||||||
pure (v ^. VersionVersion)))
|
pure (v ^. VersionVersion)))
|
||||||
|
-- in sql, this query would be
|
||||||
|
--
|
||||||
|
-- select version.version
|
||||||
|
-- from version
|
||||||
|
-- where version.id = $hoogleVersionId
|
||||||
|
--
|
||||||
|
-- So it returns the "version"s that corresponds to the
|
||||||
|
-- current hoogle version id.
|
||||||
|
-- mhver is now Maybe Version, and corresponds to the current
|
||||||
|
-- hoogle version, assuming it exists in the Version table
|
||||||
forM_ mhver $ \hver ->
|
forM_ mhver $ \hver ->
|
||||||
lift $
|
lift $
|
||||||
logInfo $
|
logInfo $
|
||||||
"Marking hoogle database for version " <> display hver <> " as available."
|
"Marking hoogle database for version " <> display hver <> " as available."
|
||||||
|
-- whether or not the version exists, we still put it into snapshot_hoogle_db
|
||||||
|
-- So literally the only use of the above query is to log the
|
||||||
|
-- action we're taking.
|
||||||
isJust <$> P.insertUniqueEntity sh
|
isJust <$> P.insertUniqueEntity sh
|
||||||
|
-- if we're not inserting, we're just checking if it already exists
|
||||||
|
-- in snapshot_hoogle_db.
|
||||||
else isJust <$> P.checkUnique sh
|
else isJust <$> P.checkUnique sh
|
||||||
|
|||||||
@ -15,6 +15,7 @@
|
|||||||
{-# LANGUAGE TemplateHaskell #-}
|
{-# LANGUAGE TemplateHaskell #-}
|
||||||
{-# LANGUAGE TypeFamilies #-}
|
{-# LANGUAGE TypeFamilies #-}
|
||||||
{-# LANGUAGE UndecidableInstances #-}
|
{-# LANGUAGE UndecidableInstances #-}
|
||||||
|
{-# LANGUAGE TypeOperators #-}
|
||||||
module Stackage.Database.Schema
|
module Stackage.Database.Schema
|
||||||
( -- * Database
|
( -- * Database
|
||||||
run
|
run
|
||||||
@ -23,6 +24,7 @@ module Stackage.Database.Schema
|
|||||||
, GetStackageDatabase(..)
|
, GetStackageDatabase(..)
|
||||||
, withStackageDatabase
|
, withStackageDatabase
|
||||||
, runStackageMigrations
|
, runStackageMigrations
|
||||||
|
, runStackageMigrations'
|
||||||
, getCurrentHoogleVersionId
|
, getCurrentHoogleVersionId
|
||||||
, getCurrentHoogleVersionIdWithPantryConfig
|
, getCurrentHoogleVersionIdWithPantryConfig
|
||||||
-- * Tables
|
-- * Tables
|
||||||
@ -217,25 +219,33 @@ withStackageDatabase shouldLog dbs inner = do
|
|||||||
bracket (liftIO getPoolIO) (liftIO . destroyAllResources) $ \pool -> do
|
bracket (liftIO getPoolIO) (liftIO . destroyAllResources) $ \pool -> do
|
||||||
inner (StackageDatabase (`runSqlPool` pool))
|
inner (StackageDatabase (`runSqlPool` pool))
|
||||||
|
|
||||||
getSchema :: (HasLogFunc env, GetStackageDatabase env (RIO env)) => RIO env (Maybe Int)
|
getSchema :: ReaderT SqlBackend (RIO RIO.LogFunc) (Maybe Int)
|
||||||
getSchema =
|
getSchema =
|
||||||
run $ do
|
do
|
||||||
eres <- tryAny (selectList [] [])
|
eres <- tryAny (selectList [] [])
|
||||||
lift $ logInfo $ "getSchema result: " <> displayShow eres
|
lift $ logInfo $ "getSchema result: " <> displayShow eres
|
||||||
case eres of
|
case eres of
|
||||||
Right [Entity _ (Schema v)] -> return $ Just v
|
Right [Entity _ (Schema v)] -> return $ Just v
|
||||||
_ -> return Nothing
|
_ -> return Nothing
|
||||||
|
|
||||||
|
runStackageMigrations' :: PantryConfig -> RIO RIO.LogFunc () -- HasLogFunc env => PantryConfig -> RIO env ()
|
||||||
|
runStackageMigrations' pantryConfig = do
|
||||||
|
stackageDb <- getStackageDatabaseFromPantry pantryConfig
|
||||||
|
runDatabase stackageDb stackageMigrations
|
||||||
|
|
||||||
|
|
||||||
runStackageMigrations :: (HasLogFunc env, GetStackageDatabase env (RIO env)) => RIO env ()
|
runStackageMigrations :: (HasLogFunc env, GetStackageDatabase env (RIO env)) => RIO env ()
|
||||||
runStackageMigrations = do
|
runStackageMigrations = run stackageMigrations
|
||||||
|
|
||||||
|
stackageMigrations :: ReaderT SqlBackend (RIO RIO.LogFunc) () -- ReaderT SqlBackend (RIO RIO.LogFunc) ()
|
||||||
|
stackageMigrations = do
|
||||||
|
runMigration Pantry.migrateAll
|
||||||
|
runMigration migrateAll
|
||||||
actualSchema <- getSchema
|
actualSchema <- getSchema
|
||||||
run $ do
|
unless (actualSchema == Just currentSchema) $ do
|
||||||
runMigration Pantry.migrateAll
|
lift $
|
||||||
runMigration migrateAll
|
logWarn $
|
||||||
unless (actualSchema == Just currentSchema) $ do
|
"Current schema does not match actual schema: " <>
|
||||||
lift $
|
displayShow (actualSchema, currentSchema)
|
||||||
logWarn $
|
deleteWhere ([] :: [Filter Schema])
|
||||||
"Current schema does not match actual schema: " <>
|
insert_ $ Schema currentSchema
|
||||||
displayShow (actualSchema, currentSchema)
|
|
||||||
deleteWhere ([] :: [Filter Schema])
|
|
||||||
insert_ $ Schema currentSchema
|
|
||||||
|
|||||||
@ -40,7 +40,8 @@ module Stackage.Database.Types
|
|||||||
, Origin(..)
|
, Origin(..)
|
||||||
, LatestInfo(..)
|
, LatestInfo(..)
|
||||||
, Deprecation(..)
|
, Deprecation(..)
|
||||||
, haddockBucketName
|
, defHaddockBucketName
|
||||||
|
, defHaddockBucketUrl
|
||||||
, Changelog(..)
|
, Changelog(..)
|
||||||
, Readme(..)
|
, Readme(..)
|
||||||
, StackageCronOptions(..)
|
, StackageCronOptions(..)
|
||||||
@ -49,7 +50,7 @@ module Stackage.Database.Types
|
|||||||
import Data.Aeson
|
import Data.Aeson
|
||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
import Data.Text.Read (decimal)
|
import Data.Text.Read (decimal)
|
||||||
import Network.AWS (Env, HasEnv(..))
|
import Amazonka (Env)
|
||||||
import Pantry (BlobKey(..), CabalFileInfo(..), FileSize(..),
|
import Pantry (BlobKey(..), CabalFileInfo(..), FileSize(..),
|
||||||
HasPantryConfig(..), PantryConfig, PackageIdentifierRevision(..), TreeKey(..))
|
HasPantryConfig(..), PantryConfig, PackageIdentifierRevision(..), TreeKey(..))
|
||||||
import Pantry.SHA256 (fromHexText)
|
import Pantry.SHA256 (fromHexText)
|
||||||
@ -61,12 +62,16 @@ import Stackage.Database.Schema
|
|||||||
import Text.Blaze (ToMarkup(..))
|
import Text.Blaze (ToMarkup(..))
|
||||||
import Types
|
import Types
|
||||||
|
|
||||||
haddockBucketName :: Text
|
defHaddockBucketName :: Text
|
||||||
haddockBucketName = "haddock.stackage.org"
|
defHaddockBucketName = "haddock.stackage.org"
|
||||||
|
|
||||||
|
defHaddockBucketUrl :: Text
|
||||||
|
defHaddockBucketUrl = "https://s3.amazonaws.com/" <> defHaddockBucketName
|
||||||
|
|
||||||
data StackageCronOptions = StackageCronOptions
|
data StackageCronOptions = StackageCronOptions
|
||||||
{ scoForceUpdate :: !Bool
|
{ scoForceUpdate :: !Bool
|
||||||
, scoDownloadBucketName :: !Text
|
, scoDownloadBucketName :: !Text
|
||||||
|
, scoDownloadBucketUrl :: !Text
|
||||||
, scoUploadBucketName :: !Text
|
, scoUploadBucketName :: !Text
|
||||||
, scoDoNotUpload :: !Bool
|
, scoDoNotUpload :: !Bool
|
||||||
, scoLogLevel :: !LogLevel
|
, scoLogLevel :: !LogLevel
|
||||||
@ -84,6 +89,7 @@ data StackageCron = StackageCron
|
|||||||
, scCachedGPD :: !(IORef (IntMap GenericPackageDescription))
|
, scCachedGPD :: !(IORef (IntMap GenericPackageDescription))
|
||||||
, scEnvAWS :: !Env
|
, scEnvAWS :: !Env
|
||||||
, scDownloadBucketName :: !Text
|
, scDownloadBucketName :: !Text
|
||||||
|
, scDownloadBucketUrl :: !Text
|
||||||
, scUploadBucketName :: !Text
|
, scUploadBucketName :: !Text
|
||||||
, scSnapshotsRepo :: !GithubRepo
|
, scSnapshotsRepo :: !GithubRepo
|
||||||
, scReportProgress :: !Bool
|
, scReportProgress :: !Bool
|
||||||
@ -91,9 +97,6 @@ data StackageCron = StackageCron
|
|||||||
, scHoogleVersionId :: !VersionId
|
, scHoogleVersionId :: !VersionId
|
||||||
}
|
}
|
||||||
|
|
||||||
instance HasEnv StackageCron where
|
|
||||||
environment = lens scEnvAWS (\c f -> c {scEnvAWS = f})
|
|
||||||
|
|
||||||
instance HasLogFunc StackageCron where
|
instance HasLogFunc StackageCron where
|
||||||
logFuncL = lens scLogFunc (\c f -> c {scLogFunc = f})
|
logFuncL = lens scLogFunc (\c f -> c {scLogFunc = f})
|
||||||
|
|
||||||
|
|||||||
@ -15,6 +15,7 @@ module Stackage.Snapshot.Diff
|
|||||||
|
|
||||||
import ClassyPrelude (sortOn, toCaseFold)
|
import ClassyPrelude (sortOn, toCaseFold)
|
||||||
import Data.Aeson
|
import Data.Aeson
|
||||||
|
import Data.Aeson.Key
|
||||||
import qualified Data.Text as T (commonPrefixes)
|
import qualified Data.Text as T (commonPrefixes)
|
||||||
import Data.These
|
import Data.These
|
||||||
import RIO
|
import RIO
|
||||||
@ -61,7 +62,7 @@ newtype VersionChange = VersionChange { unVersionChange :: These VersionP Versio
|
|||||||
deriving (Show, Eq, Generic, Typeable)
|
deriving (Show, Eq, Generic, Typeable)
|
||||||
|
|
||||||
instance ToJSON (WithSnapshotNames VersionChange) where
|
instance ToJSON (WithSnapshotNames VersionChange) where
|
||||||
toJSON (WithSnapshotNames (toPathPiece -> aKey) (toPathPiece -> bKey) change) =
|
toJSON (WithSnapshotNames (fromText . toPathPiece -> aKey) (fromText . toPathPiece -> bKey) change) =
|
||||||
case change of
|
case change of
|
||||||
VersionChange (This a) -> object [ aKey .= a ]
|
VersionChange (This a) -> object [ aKey .= a ]
|
||||||
VersionChange (That b) -> object [ bKey .= b ]
|
VersionChange (That b) -> object [ bKey .= b ]
|
||||||
|
|||||||
20
src/Types.hs
20
src/Types.hs
@ -407,7 +407,25 @@ instance ToMarkup VersionRangeP where
|
|||||||
instance PersistField VersionRangeP where
|
instance PersistField VersionRangeP where
|
||||||
toPersistValue = PersistText . textDisplay
|
toPersistValue = PersistText . textDisplay
|
||||||
fromPersistValue v =
|
fromPersistValue v =
|
||||||
fromPersistValue v >>= bimap (T.pack . displayException) VersionRangeP . dtParse
|
fromPersistValue v >>= bimap (T.pack . displayException) VersionRangeP . dtParse . hackwardCompat_3_4
|
||||||
|
where
|
||||||
|
-- We use parseSimple under the hood, which always parses using
|
||||||
|
-- the latest version of the Cabal spec. In practice, this hasn't
|
||||||
|
-- been a problem. Until now.
|
||||||
|
--
|
||||||
|
-- Cabal spec 3.4 dropped support for "-any" as a version range, and the
|
||||||
|
-- database is full of such values. Luckily, ">=0" is a
|
||||||
|
-- backward-compatible synonym for "-any". New versions of this app will
|
||||||
|
-- write ">=0" instead of "-any", which old versions of this app will
|
||||||
|
-- understand just fine. We just need to substitute on read.
|
||||||
|
--
|
||||||
|
-- FIXME: strictly speaking, VersionRange cannot be parsed without
|
||||||
|
-- knowing the Cabal spec version of the package that used it. There's
|
||||||
|
-- nothing *wrong* with "-any". That means we probably need to decode it
|
||||||
|
-- no further than Text and do further processing outside of the
|
||||||
|
-- PersistField instance.
|
||||||
|
hackwardCompat_3_4 "-any" = ">=0"
|
||||||
|
hackwardCompat_3_4 t = t
|
||||||
instance PersistFieldSql VersionRangeP where
|
instance PersistFieldSql VersionRangeP where
|
||||||
sqlType _ = SqlString
|
sqlType _ = SqlString
|
||||||
|
|
||||||
|
|||||||
40
stack.yaml
40
stack.yaml
@ -1,16 +1,32 @@
|
|||||||
resolver: lts-18.28
|
resolver: lts-22.6
|
||||||
extra-deps:
|
extra-deps:
|
||||||
- amazonka-1.6.1
|
# WARNING: Changing the hoogle version causes stackage-server-cron to regenerate
|
||||||
- barrier-0.1.1
|
# Hoogle databases FOR EVERY SNAPSHOT, EVER. Usually, that's ok! But don't
|
||||||
- classy-prelude-yesod-1.5.0
|
# forget! The consequences are: (1) More disk usage. Hoogle databases are not
|
||||||
- unliftio-core-0.1.2.0
|
# cleaned up on the stackage-server-cron side, nor on the stackage-server side.
|
||||||
- yesod-gitrepo-0.3.0
|
# (Yet. This will change.) (2) More bucket usage. While it's easy to say it's a
|
||||||
- static-bytes-0.1.0
|
# drop in the literal bucket, such excessive misuse of storage makes
|
||||||
- companion-0.1.0
|
# administration, backups, disaster recovery, and many other DevOps concerns
|
||||||
- aeson-warning-parser-0.1.0
|
# harder and harder. All but the latest LTS's database are literally never used
|
||||||
- hpack-0.35.0
|
# anyway. (3) The Hoogle database schema is defined by the first three
|
||||||
|
# version components. Any more frequent regeneration is pure unadulterated
|
||||||
|
# waste. (4) Stackage's Hoogle search will be unavailable until the new
|
||||||
|
# databases have been generated.
|
||||||
|
- hoogle-5.0.18.4@sha256:1372458e97dff541fcda099236af7936bf93ee6b8c5d15695ee6d9426dff5eed,3171
|
||||||
|
- safe-0.3.20@sha256:7813ad56161f57d5162a924de5597d454162a2faed06be6e268b37bb5c19d48d,2312
|
||||||
|
- Cabal-3.8.1.0@sha256:77121d8e1aff14a0fd95684b751599db78a7dd26d55862d9fcef27c88b193e9d,12609
|
||||||
|
- Cabal-syntax-3.8.1.0@sha256:ed2d937ba6c6a20b75850349eedd41374885fc42369ef152d69e2ba70f44f593,7620
|
||||||
- git: https://github.com/commercialhaskell/pantry.git
|
- git: https://github.com/commercialhaskell/pantry.git
|
||||||
commit: 5df643cc1deb561d9c52a9cb6f593aba2bc4c08e
|
commit: 5df643cc1deb561d9c52a9cb6f593aba2bc4c08e
|
||||||
|
# This amazonka patched to support Cloudflare, which kinda has a bug. See
|
||||||
|
# https://github.com/brendanhay/amazonka/issues/975 for details.
|
||||||
|
- github: chreekat/amazonka
|
||||||
|
commit: b/r2-compat
|
||||||
|
subdirs: [lib/amazonka-core]
|
||||||
|
|
||||||
drop-packages:
|
nix:
|
||||||
- Cabal
|
packages:
|
||||||
|
- zlib
|
||||||
|
- postgresql
|
||||||
|
- pkg-config
|
||||||
|
- haskell-language-server
|
||||||
|
|||||||
@ -3,7 +3,6 @@
|
|||||||
<LongName>Hoogle Stackage.org</LongName>
|
<LongName>Hoogle Stackage.org</LongName>
|
||||||
<Description>Search modules on Stackage.org using hoogle</Description>
|
<Description>Search modules on Stackage.org using hoogle</Description>
|
||||||
<Developer>FP Complete CORP.</Developer>
|
<Developer>FP Complete CORP.</Developer>
|
||||||
<Attribution>Copyright FP Complete CORP.</Attribution>
|
|
||||||
<AdultContent>false</AdultContent>
|
<AdultContent>false</AdultContent>
|
||||||
<Language>en-us</Language>
|
<Language>en-us</Language>
|
||||||
<InputEncoding>UTF-8</InputEncoding>
|
<InputEncoding>UTF-8</InputEncoding>
|
||||||
|
|||||||
@ -1,15 +1,14 @@
|
|||||||
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
|
<OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
|
||||||
<ShortName>Stackage Packages</ShortName>
|
<ShortName>Stackage Packages</ShortName>
|
||||||
<LongName>Stackage.org package page</LongName>
|
<LongName>Stackage.org package page</LongName>
|
||||||
<Description>Just to a Stackage.org package page</Description>
|
<Description>Jump to a Stackage.org package page</Description>
|
||||||
<Developer>FP Complete CORP.</Developer>
|
<Developer>FP Complete CORP.</Developer>
|
||||||
<Attribution>Copyright FP Complete CORP.</Attribution>
|
|
||||||
<AdultContent>false</AdultContent>
|
<AdultContent>false</AdultContent>
|
||||||
<Language>en-us</Language>
|
<Language>en-us</Language>
|
||||||
<InputEncoding>UTF-8</InputEncoding>
|
<InputEncoding>UTF-8</InputEncoding>
|
||||||
<OutputEncoding>UTF-8</OutputEncoding>
|
<OutputEncoding>UTF-8</OutputEncoding>
|
||||||
<Image width="222" height="222" type="image/x-icon">https://www.stackage.org/static/img/stackage.png</Image>
|
<Image width="222" height="222" type="image/x-icon">https://www.stackage.org/static/img/stackage.png</Image>
|
||||||
<Url type="text/html" method="GET" template="https://www.stackage.org/package/{searchTerms}"/>
|
<Url type="text/html" method="GET" template="https://www.stackage.org/package/{searchTerms}"/>
|
||||||
<Query role="example" searchTerms="E.g. bytestring"/>
|
<Query role="example" searchTerms="bytestring"/>
|
||||||
<moz:SearchForm>https://www.stackage.org</moz:SearchForm>
|
<moz:SearchForm>https://www.stackage.org</moz:SearchForm>
|
||||||
</OpenSearchDescription>
|
</OpenSearchDescription>
|
||||||
|
|||||||
@ -30,6 +30,10 @@ $else
|
|||||||
<div .container>
|
<div .container>
|
||||||
<div .row>
|
<div .row>
|
||||||
<div .span12>
|
<div .span12>
|
||||||
A service provided by
|
A service created by
|
||||||
<a href="http://www.fpcomplete.com/">
|
<a href="https://www.fpcomplete.com/">
|
||||||
FP Complete
|
FP Complete
|
||||||
|
in 2014 | Donated to the
|
||||||
|
<a href="https://haskell.foundation">
|
||||||
|
Haskell Foundation
|
||||||
|
in 2024.
|
||||||
|
|||||||
@ -60,4 +60,7 @@
|
|||||||
<a href="https://github.com/fpco/stackage#frequently-asked-questions">FAQ section on Github</a>.
|
<a href="https://github.com/fpco/stackage#frequently-asked-questions">FAQ section on Github</a>.
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
Stackage's infrastructure, build machines, initial creation and ongoing maintenance, are proudly sponsored by <a href="https://www.fpcomplete.com">FP Complete</a>.
|
Stackage's infrastructure, build machines, initial creation and ongoing maintenance were proudly sponsored by <a href="https://www.fpcomplete.com">FP Complete</a> from 2014 to 2024.
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Today it is a service provided by the <a href="https://haskell.foundation">Haskell Foundation</a>.
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user