Merge remote-tracking branch 'upstream/master' into auth_opt_out

This commit is contained in:
Saatvik Shah 2021-07-18 18:46:18 +02:00
commit 3791b0ee7c
27 changed files with 1149 additions and 15 deletions

4
.gitignore vendored
View File

@ -1 +1,3 @@
.dir-locals.el
.dir-locals.el
# Local Netlify folder
.netlify

View File

@ -27,3 +27,5 @@ Here I write down how I felt while developing this app, so we can use this feedb
- Handling errors in React and fetching data -> boring, also not sure how to do it.
- I forgot to run `wasp db migrate-dev` after I switched db.system to PostgreSQL, and it took me some time to figure out why it is still using SQLite.
- I got a message from Prisma that I should remove my migrations directory. It can be confusing for the newcomers as to what really needs to be done.
- When I came back after some time, I forgot: how to run the database, what is the name of the heroku app. I luckily had command for running the database in the terminal history, and for deploying I opened the wasp docs and followed instructions.
- When deploying after making changes to schema that require data migration (adding required fields to entities), I deleted the whole database on Heroku. If I actually had to do a data migration, I am not sure how I would have done it though, so that is a very important question and something we need to figure out.

View File

@ -1,6 +1,8 @@
import HttpError from '@wasp/core/HttpError.js'
export const createThought = async (args, context) => {
if (!context.user) { throw new HttpError(403) }
args.tagNames?.map(tagName => {
if (!/^[a-z](\.?[a-z0-9])*$/.test(tagName)) {
throw new HttpError(400, "Tag must contain only lowercase letters and dots.")
@ -11,10 +13,11 @@ export const createThought = async (args, context) => {
textMarkdown: args.textMarkdown,
tags: {
connectOrCreate: args.tagNames?.map(tagName => ({
where: { name: tagName },
create: { name: tagName }
where: { name_userId: { name: tagName, userId: context.user.id } },
create: { name: tagName, user: { connect: { id: context.user.id } } }
}))
}
},
user: { connect: { id: context.user.id } }
}
})
}

View File

@ -1,15 +1,25 @@
import HttpError from '@wasp/core/HttpError.js'
export const getThoughts = async (args, context) => {
if (!context.user) { throw new HttpError(403) }
return context.entities.Thought.findMany({
orderBy: [{ createdAt: 'desc' }],
include: { tags: true },
where: {
user: { id: context.user.id },
tags: { some: { name: args.tagName || undefined } }
}
})
}
export const getTags = async (args, context) => {
if (!context.user) { throw new HttpError(403) }
return context.entities.Tag.findMany({
orderBy: [{ name: 'asc' }],
where: {
user: { id: context.user.id }
}
})
}

View File

@ -50,24 +50,35 @@ query getTags {
}
entity Thought {=psl
id Int @id @default(autoincrement())
textMarkdown String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
id Int @id @default(autoincrement())
textMarkdown String
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
tags Tag[]
user User @relation(fields: [userId], references: [id])
userId Int
psl=}
entity Tag {=psl
id Int @id @default(autoincrement())
name String @unique
createdAt DateTime @default(now())
id Int @id @default(autoincrement())
name String
createdAt DateTime @default(now())
thoughts Thought[]
user User @relation(fields: [userId], references: [id])
userId Int
@@unique([name, userId])
psl=}
entity User {=psl
id Int @id @default(autoincrement())
email String @unique
id Int @id @default(autoincrement())
email String @unique
password String
thoughts Thought[]
tags Tag[]
psl=}
dependencies {=json

View File

@ -0,0 +1,18 @@
/*
Warnings:
- Added the required column `userId` to the `Tag` table without a default value. This is not possible if the table is not empty.
- Added the required column `userId` to the `Thought` table without a default value. This is not possible if the table is not empty.
*/
-- AlterTable
ALTER TABLE "Tag" ADD COLUMN "userId" INTEGER NOT NULL;
-- AlterTable
ALTER TABLE "Thought" ADD COLUMN "userId" INTEGER NOT NULL;
-- AddForeignKey
ALTER TABLE "Tag" ADD FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Thought" ADD FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@ -0,0 +1,11 @@
/*
Warnings:
- A unique constraint covering the columns `[name,userId]` on the table `Tag` will be added. If there are existing duplicate values, this will fail.
*/
-- DropIndex
DROP INDEX "Tag.name_unique";
-- CreateIndex
CREATE UNIQUE INDEX "Tag.name_userId_unique" ON "Tag"("name", "userId");

View File

@ -120,6 +120,13 @@ NOTE: Reload page if blank.
Keep adding "fix" commits until PR is approved, then again squash them all into one commit.
9. Reviewer will merge the branch into `master`. Yay!
## Design docs (aka RFCs)
If the feature you are implementing is complex, be it regarding its design or technical implementation, we recommend creating a [design doc](https://www.industrialempathy.com/posts/design-docs-at-google/) (aka RFC).
It is a great way to share the idea you have with others while also getting help and feedback.
To create one, make a PR that adds a markdown document under `wasp/docs/design-docs`, and in that markdown document explain the thinking behind and choice made when deciding how to implement a feature.
Others will comment on your design doc, and once it has gone through needed iterations and is approved, you can start with the implementation of the feature (in a separate PR).
## Codebase overview
Wasp is implemented in Haskell.

View File

@ -0,0 +1,11 @@
# Design docs
[Design doc](https://www.industrialempathy.com/posts/design-docs-at-google/) (DD) is an informal document that primary author of a software system/feature creates before they start implementing it.
It documents the high level implementation strategy and key decisions with emphasis on the trade-offs that were considered during those decisions.
Contributors are encouraged to submit DDs via pull request if feature is complex and could benefit from documenting its design or from having a discussion before its implementation starts.
In this directory, we collect DDs for various Wasp features, one markdown document representing one DD.

View File

@ -0,0 +1,93 @@
# Support for executing custom code on server setup
Right now, only code that dev can write that executes on server are operations (actions and queries) and any code that they import.
They are executed when called by client.
In case that dev wants to write a piece of code that executes on server start, in order to perform some kind of setup, e.g. connect to the database or schedule a cron job, they can't do it.
Additionally, dev currently can't in any way influence or modify default server setup that Wasp performs, which might prove to be too rigid in the future.
While this is a separate concern, it is closely related to the execution of code on server start and is worth considering it at the same time.
NOTE: What dev can currently do, is implement a JS singleton which can then be imported in JS operations and used there.
Such singleton could be used to allocate resources the very first time it is called, e.g. it could establish connection to the database in a lazy manner.
This solves the problem above somewhat, in case when it is ok that resource allocation happens lazily on first request.
It can't be used to customize Wasp setup though, and you can't perform the setup before the very first request, upfront, which means it can't be used to e.g. define a scheduled job.
## Feedback
I found that people are requesting same feature from NextJS, in following discussions:
- https://github.com/vercel/next.js/discussions/11686
- https://github.com/vercel/next.js/discussions/15341
They want to run some custom setup, like:
- Connecting to a database.
- Starting some jobs.
- Setting up a store or DB that uses the filesystem. For example, a leveldb instance or a sqlite instance.
- Reading data from a file.
- Setting up a local 'scratch' dir, relative to the build file.
- init DB, configure the logger, even add middlewares.
- init crons and mongoose.
- initialize few application/site level parameters on the server which can be reused (at server) on each request and updated on set interval.
NextJS doesn't have a solution for this, instead their official answer is that you should create a standalone microservice for that,
or that you can use a [custom server](https://nextjs.org/docs/advanced-features/custom-server) feature where you use next programmatically in your custom nodejs server, but then you lose a lot of benefits that NextJS provides.
The solution that people suggested was an async function that returns an object (with e.g. allocated resources) that will be included in the "context" that is then passed around to the operations.
## Requirements
### Basic
- Dev can specify, through wasp language, a JS function that will be executed on server start.
Such function would be async, take no arguments, and return an object that would be avaialable in operations (through `context`).
### Advanced
- Instead of returning an object that will be added to `context`, function could return a function that modifies the `context`.
This gives more control to dev, but it can also lead to them messing up `context`.
- Function could take arguments which expose certain parts of the server and therefore give the dev an ooportunity to affect certain parts of the app.
For example, they could modify the expressJS router. Or they would get access to Prisma.
This again can be problematic as if gives developer space to mess things up.
- Function could return not just object to be added to `context`, but also other things that modify how Wasp works.
For example, it could return an expressJS router that will be added to the expressJS router created by Wasp.
This way, dev can extend different parts of Wasp while not being able to mess up things, since they don't modify existing configuration directly.
Instead, they return pluggable parts and Wasp plugs them in in the right places.
## Implementation
### Lang design
- New `server` declaration (in .wasp) with `setup` field:
```css
server: {
setup: {
fn: import { myCustomSetup } from '@ext/serverSetup.js'
}
}
```
- Function could be defined as:
```js
// In '@ext/serverSetup.js'
export const myCustomSetup = async () => {
const someResource = await setupSomeResource()
return { someResource }
}
```
- Resources returned during setup could be used from operations as:
```js
export const myAction = async (args, context) => {
console.log(context.server.setup.someResource)
}
```
I considered adding `server` as not a standalone declaration but a field of `app` declaration, but felt that would be too crowded and we already have stuff like `dependencies` and `auth`, so we already decided to go down the route where we don't put everything under the `app` and this way we are consistent with that.
I also considered multiple ideas on how to put the returned object in the `context`, and found that `server.setup` sounds specific enough that it will not clash with anything else nor will need changing in the future.
### MVP
For the very first version, I will go with basic requirements -> no args, and returned object goes under `context.server.setup`.
In the future we can consider implementing some of the advanced requirements.

View File

@ -25,18 +25,23 @@ and the standard library.
<expr> ::= <dict> | <list> | <dot>
| <string> | <extimport> | <quoter>
| <ident> | <num>
| <ident> | <num> | <bool>
<dict> ::= '{' <dictentry> (',' <dictentry>)* '}'
| '{' '}'
<dictentry> ::= <ident> : <expr>
<dictentry> ::= <ident> ':' <expr>
<list> ::= '[' <expr> (',' <expr>)* ']' | '[' ']'
<extimport> ::= 'import' <name> 'from' <string>
<name> ::= '{' <ident> '}' | <ident>
<dot> ::= <expr> '.' <ident>
<quoter> ::= '{=' <ident> <anything> <ident> '=}'
<bool> ::= 'true' | 'false'
\end{verbatim}
The \texttt{<anything>} inside \texttt{<quoter>} is ambiguous, so to correctly
parse wasp, a quoter starting with \texttt{\{=tag} must contain all text until
the first \texttt{tag=\}} or until the end of the input.
The nonterminals for strings, numbers, and identifers are not shown.
\subsection{Type System}

View File

@ -41,6 +41,10 @@ ghc-options:
- -Wall
- -optP-Wno-nonportable-include-path # To avoid warning caused by .../autogen/cabal_macros.h. on OSX.
build-tools:
- alex
- happy
library:
source-dirs: src
dependencies:
@ -64,6 +68,8 @@ library:
- utf8-string
- Glob
- unliftio
- array # Used by code generated by Alex for src/Analyzer/Parser/Lexer.x
- mtl
- strong-path
executables:

25
waspc/src/Analyzer.hs Normal file
View File

@ -0,0 +1,25 @@
module Analyzer
( analyze,
)
where
import Analyzer.Decl (Decl)
import qualified Analyzer.Evaluator as E
import qualified Analyzer.Parser as P
import Analyzer.StdTypeDefinitions (stdTypes)
import qualified Analyzer.TypeChecker as T
import Control.Arrow (left)
import Control.Monad ((>=>))
data AnalyzeError
= ParseError P.ParseError
| TypeError T.TypeError
| EvaluationError E.EvaluationError
-- | Takes a Wasp source file and produces a list of declarations or a
-- description of an error in the source file.
analyze :: String -> Either AnalyzeError [Decl]
analyze =
(left ParseError . P.parse)
>=> (left TypeError . T.typeCheck stdTypes)
>=> (left EvaluationError . E.evaluate stdTypes)

View File

@ -0,0 +1,36 @@
{-# LANGUAGE GADTs #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE TupleSections #-}
module Analyzer.Decl
( Decl (..),
takeDecls,
)
where
import Analyzer.TypeDefinitions (IsDeclType)
import Data.Maybe (mapMaybe)
import Data.Typeable (Typeable, cast)
-- | Used to store a heterogenous lists of evaluated declarations during
-- evaluation.
data Decl where
-- | @Decl "Name" value@ results from a declaration statement "declType Name value".
Decl :: (Typeable a, IsDeclType a) => String -> a -> Decl
-- | Extracts all declarations of a certain type from a @[Decl]@s
--
-- Example:
--
-- @
-- data Person = Person { name :: String, age :: Integer } deriving Generic
-- data Building = Building { address :: String } deriving Generic
-- let decls = [ Decl "Bob" $ Person "Bob" 42
-- , Decl "Office" $ Building "1 Road St"
-- , Decl "Alice" $ Person "Alice" 32
-- ]
-- takeDecls @Person decls == [("Bob", Person "Bob" 42), ("Alice", Person "Alice" 32)]
-- @
takeDecls :: (Typeable a, IsDeclType a) => [Decl] -> [(String, a)]
takeDecls = mapMaybe $ \case
Decl name value -> (name,) <$> cast value

View File

@ -0,0 +1,15 @@
module Analyzer.Evaluator
( EvaluationError,
evaluate,
)
where
import Analyzer.Decl (Decl)
import Analyzer.TypeChecker (TypedAST)
import Analyzer.TypeDefinitions (TypeDefinitions)
data EvaluationError
-- | Evaluate type-checked AST to produce a list of declarations.
evaluate :: TypeDefinitions -> TypedAST -> Either EvaluationError [Decl]
evaluate _ _ = Right []

View File

@ -0,0 +1,37 @@
module Analyzer.Parser
( -- * Overview
-- | The "Analyzer.Parser" module is built of two parts:
--
-- - The lexer, generated with Alex, which creates tokens from wasp source.
-- - The parser, generated with Happy, which builds an abstract syntax
-- tree from the tokens.
--
-- Lexing and parsing are not implemented as two separate phases that happen one after another.
-- Instead, parser controls and uses lexer internally to produce tokens as needed, on the go.
--
-- Both lexer and parser are operating in a "Parser" monad, which manages state and exceptions for the parser,
-- and therefore also for the lexer, which functions as a part of and is controlled by the parser.
parse,
AST (..),
Stmt (..),
Expr (..),
Ident,
ExtImportName (..),
ParseError (..),
SourcePosition (..),
Token (..),
TokenType (..),
)
where
import Analyzer.Parser.AST
import Analyzer.Parser.Monad (initialState)
import Analyzer.Parser.ParseError
import qualified Analyzer.Parser.Parser as P
import Analyzer.Parser.Token
import Control.Monad.Except (runExcept)
import Control.Monad.State (evalStateT)
parse :: String -> Either ParseError AST
parse = runExcept . evalStateT P.parse . initialState

View File

@ -0,0 +1,27 @@
module Analyzer.Parser.AST where
type Ident = String
newtype AST = AST {astStmts :: [Stmt]} deriving (Eq, Show)
-- Decl <declType> <name> <body>
data Stmt = Decl Ident Ident Expr deriving (Eq, Show)
data Expr
= Dict [(Ident, Expr)]
| List [Expr]
| StringLiteral String
| IntegerLiteral Integer
| DoubleLiteral Double
| BoolLiteral Bool
| ExtImport ExtImportName String
| Identifier Ident
| Quoter Ident String
deriving (Eq, Show)
data ExtImportName
= -- | Represents external imports like @import Ident from "file.js"@
ExtImportModule Ident
| -- | Represents external imports like @import { Ident } from "file.js"@
ExtImportField Ident
deriving (Eq, Show)

View File

@ -0,0 +1,157 @@
{
-- This file is processed by Alex (https://www.haskell.org/alex/) and generates
-- the module `Analyzer.Parser.Lexer`
{-# LANGUAGE NamedFieldPuns #-}
module Analyzer.Parser.Lexer
( lexer
) where
import Analyzer.Parser.Monad
import Analyzer.Parser.Token (Token (..), TokenType (..))
import Analyzer.Parser.ParseError (ParseError (..))
import Control.Monad.State.Lazy (gets)
import Control.Monad.Except (throwError)
import Data.Word (Word8)
import Codec.Binary.UTF8.String (encodeChar)
}
-- Character set aliases
$digit = 0-9
$alpha = [a-zA-Z]
$identstart = [_$alpha]
$ident = [_$alpha$digit]
$any = [.$white]
-- Regular expression aliases
@string = \"([^\\\"]|\\.)*\" -- matches string-literal on a single line, from https://stackoverflow.com/a/9260547/3902376
@double = "-"? $digit+ "." $digit+
@integer = "-"? $digit+
@ident = $identstart $ident* "'"*
-- Tokenization rules (regex -> token)
tokens :-
-- Skips whitespace
<0> $white+ ;
-- Quoter rules:
-- Uses Alex start codes to lex quoted characters with different rules:
-- - On "{=tag", enter <quoter> start code and make a TLQuote token
-- - While in <quoter>, if "tag=}" is seen
-- - If this closing tag matches the opening, enter <0> and make a TRQuote token
-- - Otherwise, stay in <quoter> and make a TQuoted token
-- - Otherwise, take one character at a time and make a TQuoted token
<0> "{=" @ident { beginQuoter }
<quoter> @ident "=}" { lexQuoterEndTag }
<quoter> $any { createValueToken TQuoted }
-- Simple tokens
<0> "{" { createConstToken TLCurly }
<0> "}" { createConstToken TRCurly }
<0> "," { createConstToken TComma }
<0> ":" { createConstToken TColon }
<0> "[" { createConstToken TLSquare }
<0> "]" { createConstToken TRSquare }
<0> "import" { createConstToken TImport }
<0> "from" { createConstToken TFrom }
<0> "true" { createConstToken TTrue }
<0> "false" { createConstToken TFalse }
-- Strings, numbers, identifiers
<0> @string { createValueToken $ \s -> TString $ read s }
<0> @double { createValueToken $ \s -> TDouble $ read s }
<0> @integer { createValueToken $ \s -> TInt $ read s }
<0> @ident { createValueToken $ \s -> TIdentifier s }
{
-- Alex needs the input type to be called "AlexInput"
type AlexInput = ParserInput
-- Convert the ParserState's start code to an int for Alex to use
startCodeToInt :: LexerStartCode -> Int
startCodeToInt DefaultStartCode = 0
startCodeToInt (QuoterStartCode _) = quoter
-- | Required by Alex.
--
-- This function is taken from the Alex basic wrapper.
alexGetByte :: AlexInput -> Maybe (Word8, AlexInput)
alexGetByte (c, (b:bs), s) = Just (b, (c, bs, s))
alexGetByte (_, [], []) = Nothing
alexGetByte (_, [], (c:s)) = case encodeChar c of
(b:bs) -> Just (b, (c, bs, s))
[] -> Nothing
-- | Required by Alex.
--
-- This function is taken from the Alex basic wrapper.
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (c, _, _) = c
-- | Lexes a single token from the input.
--
-- This function is designed for use with the Happy monadic parser that uses threaded/monadic lexer.
-- This means that parser, as it is building an AST, asks for a single token at a time from the lexer, on the go.
-- This is done in "continuation" style -> parser calls lexer while passing it the function ('parseToken') via which
-- lexer gives control back to the parser.
-- In such setup both lexer and parser are operating in the same 'Parser' monad.
-- Check https://www.haskell.org/happy/doc/html/sec-monads.html#sec-lexers for more details.
--
-- This function internally calls `alexScan`, which is a function generated by Alex responsible for doing actual lexing/scanning.
lexer :: (Token -> Parser a) -> Parser a
lexer parseToken = do
input@(previousChar, _, remainingSource) <- gets parserRemainingInput
startCodeInt <- gets $ startCodeToInt . parserLexerStartCode
case alexScan input startCodeInt of
AlexEOF -> do
createConstToken TEOF "" >>= parseToken
AlexError _ -> do
pos <- gets parserSourcePosition
throwError $ UnexpectedChar previousChar pos
AlexSkip input' numCharsSkipped -> do
updatePosition $ take numCharsSkipped remainingSource
putInput input'
lexer parseToken
AlexToken input' tokenLength action -> do
-- Token is made before `updatePosition` so that its `tokenPosition` points to
-- the start of the token's lexeme.
token <- action $ take tokenLength remainingSource
updatePosition $ take tokenLength remainingSource
putInput input'
parseToken token
-- | Takes a lexeme like "{=json" and sets the quoter start code
beginQuoter :: String -> Parser Token
beginQuoter leftQuoteTag = do
let tag = drop 2 leftQuoteTag
setStartCode $ QuoterStartCode tag
createConstToken (TLQuote tag) leftQuoteTag
-- | Takes a lexeme like "json=}" and either ends a quoter or add quoted text to the quoter
lexQuoterEndTag :: String -> Parser Token
lexQuoterEndTag rightQuoteTag = gets parserLexerStartCode >>= \startCode -> case startCode of
DefaultStartCode -> error "impossible: lexQuoterEndTag with DefaultStartCode"
QuoterStartCode startTag | startTag == tag -> do
setStartCode DefaultStartCode
createConstToken (TRQuote tag) rightQuoteTag
_ -> do
createValueToken TQuoted rightQuoteTag
where
tag = take (length rightQuoteTag - 2) rightQuoteTag
-- | Makes an action that creates a token from a constant TokenType.
createConstToken :: TokenType -> (String -> Parser Token)
createConstToken tokType lexeme = do
position <- gets parserSourcePosition
return $ Token { tokenType = tokType
, tokenPosition = position
, tokenLexeme = lexeme
}
-- | Makes an action that creates a token using the input lexeme.
createValueToken :: (String -> TokenType) -> (String -> Parser Token)
createValueToken getTokenType lexeme = createConstToken (getTokenType lexeme) lexeme
}

View File

@ -0,0 +1,69 @@
{-# LANGUAGE FlexibleContexts #-}
module Analyzer.Parser.Monad
( ParserState (..),
initialState,
Parser,
updatePosition,
putInput,
setStartCode,
ParserInput,
LexerStartCode (..),
)
where
import Analyzer.Parser.ParseError (ParseError)
import Analyzer.Parser.Token (SourcePosition (..))
import Control.Monad.Except (Except)
import Control.Monad.State.Lazy (StateT, get, modify)
import Data.Word (Word8)
type Parser a = StateT ParserState (Except ParseError) a
updatePosition :: String -> Parser ()
updatePosition parsedSourcePiece = do
position <- parserSourcePosition <$> get
let position' = calcNewPosition parsedSourcePiece position
modify $ \s -> s {parserSourcePosition = position'}
where
-- Scan the string character by character to look for newlines
calcNewPosition [] position = position
calcNewPosition ('\n' : cs) (SourcePosition line _) = calcNewPosition cs $ SourcePosition (line + 1) 1
calcNewPosition (_ : cs) (SourcePosition line col) = calcNewPosition cs $ SourcePosition line (col + 1)
putInput :: ParserInput -> Parser ()
putInput input = modify $ \s -> s {parserRemainingInput = input}
setStartCode :: LexerStartCode -> Parser ()
setStartCode startCode = modify $ \s -> s {parserLexerStartCode = startCode}
data ParserState = ParserState
{ parserSourcePosition :: SourcePosition,
parserRemainingInput :: ParserInput,
parserLexerStartCode :: LexerStartCode
}
deriving (Show)
-- | A representation of the lexer's start code: https://www.haskell.org/alex/doc/html/alex-files.html#startcodes
data LexerStartCode
= -- | For a start code @DefaultStartCode@, the lexer is in start code <0>
DefaultStartCode
| -- | For a start code @QuoterStartCode tag@, the lexer is in start code <quoter> and the opening tag was @tag@
QuoterStartCode String
deriving (Show)
initialState :: String -> ParserState
initialState source =
ParserState
{ parserSourcePosition = SourcePosition 1 1,
parserRemainingInput = ('\n', [], source),
parserLexerStartCode = DefaultStartCode
}
-- | The type of the input given to the parser/lexer
--
-- An input @(c, bs, str)@ represents
-- - @c@ The previous character consumed by the lexer
-- - @bs@ The UTF8 bytes of the current character being lexed
-- - @str@ The remaining input to be lexed and parsed
type ParserInput = (Char, [Word8], String)

View File

@ -0,0 +1,11 @@
module Analyzer.Parser.ParseError where
import Analyzer.Parser.Token
data ParseError
= -- | A lexical error representing an invalid character
UnexpectedChar Char SourcePosition
| -- | A parse error caused by some token
ParseError Token
| QuoterDifferentTags (String, SourcePosition) (String, SourcePosition)
deriving (Eq, Show)

View File

@ -0,0 +1,120 @@
{
-- This file is processed by Happy (https://www.haskell.org/happy/) and generates
-- the module `Analyzer.Parser.Parser`
module Analyzer.Parser.Parser
( parse
) where
import Analyzer.Parser.Lexer
import Analyzer.Parser.AST
import Analyzer.Parser.Token
import Analyzer.Parser.ParseError
import Analyzer.Parser.Monad (Parser, initialState, ParserState (..))
import Control.Monad.State.Lazy (get)
import Control.Monad.Except (throwError)
}
-- Lines below tell Happy:
-- - to name the main parsing function `parse` when generating it
-- - that input to parser is `Token` type
-- - to call`parseError` when the parser encounters an error
%name parse
%tokentype { Token }
%error { parseError }
-- This sets up Happy to use a monadic parser and threaded lexer.
-- This means that parser generated by Happy will request tokens from lexer as it needs them instead of
-- requiring a list of all tokens up front.
-- Both lexer and parser operate in the 'Parser' monad, which can be used to track shared state and errors.
-- Check https://www.haskell.org/happy/doc/html/sec-monads.html#sec-lexers for more details.
%monad { Parser }
%lexer { lexer } { Token { tokenType = TEOF } }
-- This section defines the names that are used in the grammar section to
-- refer to each type of token.
%token
import { Token { tokenType = TImport } }
from { Token { tokenType = TFrom } }
string { Token { tokenType = TString $$ } }
int { Token { tokenType = TInt $$ } }
double { Token { tokenType = TDouble $$ } }
true { Token { tokenType = TTrue } }
false { Token { tokenType = TFalse } }
'{=' { Token { tokenType = TLQuote $$ } }
quoted { Token { tokenType = TQuoted $$ } }
'=}' { Token { tokenType = TRQuote $$ } }
ident { Token { tokenType = TIdentifier $$ } }
'{' { Token { tokenType = TLCurly } }
'}' { Token { tokenType = TRCurly } }
',' { Token { tokenType = TComma } }
':' { Token { tokenType = TColon } }
'[' { Token { tokenType = TLSquare } }
']' { Token { tokenType = TRSquare } }
%%
-- Grammar rules
Wasp :: { AST }
: Stmt { AST [$1] }
| Wasp Stmt { AST $ astStmts $1 ++ [$2] }
Stmt :: { Stmt }
: Decl { $1 }
Decl :: { Stmt }
: ident ident Expr { Decl $1 $2 $3 }
Expr :: { Expr }
: Dict { $1 }
| List { $1 }
| Extimport { $1 }
| Quoter { $1 }
| string { StringLiteral $1 }
| int { IntegerLiteral $1 }
| double { DoubleLiteral $1 }
| true { BoolLiteral True }
| false { BoolLiteral False }
| ident { Identifier $1 }
Dict :: { Expr }
: '{' DictEntries '}' { Dict $2 }
| '{' DictEntries ',' '}' { Dict $2 }
| '{' '}' { Dict [] }
DictEntries :: { [(Ident, Expr)] }
: DictEntry { [$1] }
| DictEntries ',' DictEntry { $1 ++ [$3] }
DictEntry :: { (Ident, Expr) }
: ident ':' Expr { ($1, $3) }
List :: { Expr }
: '[' ListVals ']' { List $2 }
| '[' ListVals ',' ']' { List $2 }
| '[' ']' { List [] }
ListVals :: { [Expr] }
: Expr { [$1] }
| ListVals ',' Expr { $1 ++ [$3] }
Extimport :: { Expr }
: import Name from string { ExtImport $2 $4 }
Name :: { ExtImportName }
: ident { ExtImportModule $1 }
| '{' ident '}' { ExtImportField $2 }
Quoter :: { Expr }
: SourcePosition '{=' Quoted SourcePosition '=}' {% if $2 /= $5
then throwError $ QuoterDifferentTags ($2, $1) ($5, $4)
else return $ Quoter $2 $3
}
Quoted :: { String }
: quoted { $1 }
| Quoted quoted { $1 ++ $2 }
SourcePosition :: { SourcePosition }
: {- empty -} {% fmap parserSourcePosition get }
{
parseError :: Token -> Parser a
parseError token = throwError $ ParseError token
}

View File

@ -0,0 +1,32 @@
module Analyzer.Parser.Token where
-- | The first character on the first line is at position @Position 1 1@
data SourcePosition = SourcePosition Int Int deriving (Eq, Show)
data TokenType
= TLCurly
| TRCurly
| TComma
| TColon
| TLSquare
| TRSquare
| TImport
| TFrom
| TString String
| TInt Integer
| TDouble Double
| TTrue
| TFalse
| TLQuote String
| TRQuote String
| TQuoted String
| TIdentifier String
| TEOF
deriving (Eq, Show)
data Token = Token
{ tokenType :: TokenType,
tokenPosition :: SourcePosition,
tokenLexeme :: String
}
deriving (Eq, Show)

View File

@ -0,0 +1,28 @@
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TypeApplications #-}
-- Todo:
-- When the Analyzer is finished, these types should be moved to the `Wasp`
-- module.
module Analyzer.StdTypeDefinitions
( AuthMethod (..),
App (..),
stdTypes,
)
where
import qualified Analyzer.TypeDefinitions as TD
import GHC.Generics (Generic)
data AuthMethod = EmailAndPassword deriving (Generic)
data App = App {title :: String, authMethod :: AuthMethod} deriving (Generic)
-- | A Wasp Library containing all of the standard types required for Wasp to
-- work.
stdTypes :: TD.TypeDefinitions
stdTypes =
-- addEnumType @AuthMethod $
-- addDeclType @App $
TD.empty

View File

@ -0,0 +1,29 @@
module Analyzer.Type
( Type (..),
DictEntryType (..),
dictEntryRequired,
)
where
-- | All possible types in Wasp.
data Type
= DeclType String
| EnumType String
| DictType [DictEntryType]
| ListType Type
| StringType
| NumberType
| BoolType
| ExtImportType
| QuoterType String
-- | The type of an entry in a `Dict`.
data DictEntryType
= DictEntry {dictEntryName :: String, dictEntryType :: Type}
| DictOptionalEntry {dictEntryName :: String, dictEntryType :: Type}
-- | Determines whether the entry must be present in an instance of its parent
-- `Dict` type.
dictEntryRequired :: DictEntryType -> Bool
dictEntryRequired DictEntry {} = True
dictEntryRequired DictOptionalEntry {} = False

View File

@ -0,0 +1,53 @@
module Analyzer.TypeChecker
( TypedAST (..),
TypedStmt (..),
TypedExpr (..),
TypeError,
exprType,
typeCheck,
)
where
import Analyzer.Parser (AST, ExtImportName, Ident)
import Analyzer.Type (Type (..))
import Analyzer.TypeDefinitions (TypeDefinitions)
-- | Checks that an AST conforms to the type rules of Wasp and produces a
-- an AST labelled with type information.
typeCheck :: TypeDefinitions -> AST -> Either TypeError TypedAST
typeCheck _ _ = Right $ TypedAST {typedStmts = []}
-- TODO: instead of having separate AST for type checker, give `Parser.AST` a
-- "content" type argument that type information can be attached to
newtype TypedAST = TypedAST {typedStmts :: [TypedStmt]}
data TypedStmt = Decl Ident TypedExpr Type
data TypedExpr
= Dict [(Ident, TypedExpr)] Type
| List [TypedExpr] Type
| StringLiteral String
| IntegerLiteral Integer
| DoubleLiteral Double
| BoolLiteral Bool
| ExtImport ExtImportName String
| Identifier Ident Type
| -- TODO: What type to represent these?
JSON String
| PSL String
-- | Given a `TypedExpr`, determines its `Type`.
exprType :: TypedExpr -> Type
exprType (Dict _ t) = t
exprType (List _ t) = t
exprType (StringLiteral _) = StringType
exprType (IntegerLiteral _) = NumberType
exprType (DoubleLiteral _) = NumberType
exprType (BoolLiteral _) = BoolType
exprType (ExtImport _ _) = ExtImportType
exprType (Identifier _ t) = t
exprType (JSON _) = QuoterType "json"
exprType (PSL _) = QuoterType "psl"
data TypeError

View File

@ -0,0 +1,147 @@
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
module Analyzer.TypeDefinitions
( TypeDefinitions,
empty,
getDeclType,
getEnumType,
addDeclType,
addEnumType,
DeclType (..),
EnumType (..),
IsDeclType,
IsEnumType,
)
where
import Analyzer.Type (Type)
import qualified Data.HashMap.Strict as M
data EnumType = EnumType {etName :: String, etVariants :: [String]}
data DeclType = DeclType {dtName :: String, dtBodyType :: Type}
-- | The parser, type-checking, and evaluator require information about declaration,
-- enum, and quoter types, but the specific types are not hardcoded into each
-- phase of the analyzer.
--
-- Instead, this information is injected into them via 'TypeDefinitions',
-- which defines in one place the specific declaration/enum/quoter types.
--
-- This enables us to easily modify / add / remove specific types as Wasp evolves as a language without
-- having to touch the core functionality of the Analyzer.
data TypeDefinitions = TypeDefinitions
{ declTypes :: M.HashMap String DeclType,
enumTypes :: M.HashMap String EnumType
-- TODO: In the future, add quoters to the type definitions
}
empty :: TypeDefinitions
empty = TypeDefinitions {declTypes = M.empty, enumTypes = M.empty}
getDeclType :: String -> TypeDefinitions -> Maybe DeclType
getDeclType name (TypeDefinitions dts _) = M.lookup name dts
getEnumType :: String -> TypeDefinitions -> Maybe EnumType
getEnumType name (TypeDefinitions _ ets) = M.lookup name ets
-- | Add a declaration type to type definitions. Requires the type to be in the form
-- of a Wasp decl. See "IsDecl" for requirements.
--
-- Example:
--
-- @
-- {-# LANGUAGE TypeApplications, DeriveGeneric #-}
-- data App = App { title :: String } deriving Generic
-- let exampleDefinitions = addDeclType @App empty
-- @
addDeclType :: forall typ. (IsDeclType typ) => TypeDefinitions -> TypeDefinitions
addDeclType lib =
let decl = DeclType {dtName = declTypeName @typ, dtBodyType = declTypeBodyType @typ}
in lib {declTypes = M.insert (declTypeName @typ) decl $ declTypes lib}
-- | Add an enum type to type definitions. Requires the type to be in the form
-- of a Wasp enum. See "IsEnum" for requirements.
--
-- Example:
--
-- @
-- {-# LANGUAGE TypeApplications, DeriveGeneric #-}
-- data ParamType = StringParam | NumberParam deriving Generic
-- let exampleDefinitions = addEnumType @ParamType empty
-- @
addEnumType :: forall typ. (IsEnumType typ) => TypeDefinitions -> TypeDefinitions
addEnumType lib =
let enum = EnumType {etName = enumTypeName @typ, etVariants = enumTypeVariants @typ}
in lib {enumTypes = M.insert (enumTypeName @typ) enum $ enumTypes lib}
-- | Marks haskell type as a representation of a specific Wasp declaration type.
-- Instead of defining a Wasp declaration type manually by constructing it with constructors from
-- Analyzer.Type, this allows us to specify it as a haskell type which knows how to translate itself into Analyzer.Type representation and back.
-- If this haskell type satisfies certain requirements, the knowledge to translate itself back and forth into Analyzer.Type representation can be automatically derived from its shape.
--
-- Requirements on type:
-- - The type must be an instance of `Generic`.
-- - The type must be an ADT with one constructor.
-- - The type must have just one field OR use record syntax (in which case it can have multiple fields).
--
-- Some assumptions are required of `declType` and `a`:
-- - If and only if `declType` is a `Dict`, then
-- - `a` uses record syntax.
-- - If and only if there is a key `x` in `declType`, then `a` has a
-- record `x` with the same type.
-- - If a key `x` is optional, then the record `x` in `a` is a `Maybe`
-- - Otherwise, `a` has one field and `declType` maps to the type of that
-- field.
--
-- Examples:
--
-- Using record fields for dictionary types:
--
-- @
-- >>> data User = User { name :: String, email :: Maybe String } deriving Generic
-- >>> declTypeName @User
-- "user"
-- >>> declTypeBodyType @User
-- DictType [DictEntry "name" StringType, DictOptionalEntry "email" StringLiteral]
-- @
--
-- No records for a type that doesn't use a dictionary:
--
-- @
-- >>> data Admins = Admins [User] deriving Generic
-- >>> declTypeBodyType @Admins
-- ListType (DeclType "User")
-- @
class IsDeclType a where
declTypeName :: String
declTypeBodyType :: Type
-- | Marks Haskell type as a representation of a specific Wasp enum type.
-- Check "IsDeclType" above for more details.
--
-- Requirements on type:
-- - The type must be an instance of 'Generic'.
-- - The type must be an ADT with at least one constructor.
-- - Each constructor of the type must have 0 fields.
--
-- Some properties are required of `enumTypeVariants` and `a`:
-- - If and only if there is a string `x` in `enumTypeVariants`, then `a` has
-- a constructor called `x`.
--
-- Examples:
--
-- An allowed enum type:
--
-- @
-- >>> data AuthMethod = OAuth2 | EmailAndPassword deriving Generic
-- >>> enumTypeName @AuthMethod
-- "authMethod"
-- >>> enumTypeVariants @AuthMethod
-- ["OAuth2", "EmailAndPassword"]
-- @
class IsEnumType a where
enumTypeName :: String
enumTypeVariants :: [String]

View File

@ -0,0 +1,169 @@
module Analyzer.ParserTest where
import Analyzer.Parser
import Data.Either (isLeft)
import Test.Tasty.Hspec
spec_Parser :: Spec
spec_Parser = do
describe "Analyzer.Parser" $ do
it "Parses decls, dicts, and literals" $ do
let source =
unlines
[ "test Decl {",
" string: \"Hello Wasp =}\",",
" escapedString: \"Look, a \\\"\",",
" integer: 42,",
" real: 3.14,",
" yes: true,",
" no: false,",
" ident: Wasp",
"}"
]
let ast =
AST
[ Decl "test" "Decl" $
Dict
[ ("string", StringLiteral "Hello Wasp =}"),
("escapedString", StringLiteral "Look, a \""),
("integer", IntegerLiteral 42),
("real", DoubleLiteral 3.14),
("yes", BoolLiteral True),
("no", BoolLiteral False),
("ident", Identifier "Wasp")
]
]
parse source `shouldBe` Right ast
it "Parses external imports" $ do
let source =
unlines
[ "test Imports {",
" module: import Page from \"page.jsx\",",
" field: import { Page } from \"page.jsx\"",
"}"
]
let ast =
AST
[ Decl "test" "Imports" $
Dict
[ ("module", ExtImport (ExtImportModule "Page") "page.jsx"),
("field", ExtImport (ExtImportField "Page") "page.jsx")
]
]
parse source `shouldBe` Right ast
it "Parses unary lists" $ do
let source = "test Decl [ 1 ]"
let ast = AST [Decl "test" "Decl" $ List [IntegerLiteral 1]]
parse source `shouldBe` Right ast
it "Parses lists of multiple elements" $ do
let source = "test Decl [ 1, 2, 3 ]"
let ast = AST [Decl "test" "Decl" $ List [IntegerLiteral 1, IntegerLiteral 2, IntegerLiteral 3]]
parse source `shouldBe` Right ast
it "Parses empty dictionaries and lists" $ do
let source = "test Decl { dict: {}, list: [] }"
let ast =
AST
[ Decl "test" "Decl" $
Dict
[ ("dict", Dict []),
("list", List [])
]
]
parse source `shouldBe` Right ast
it "Allows trailing commas in lists and dictionaries" $ do
let source =
unlines
[ "test Decl {",
" list: [ 1, ],",
"}"
]
let ast =
AST
[ Decl "test" "Decl" $
Dict
[("list", List [IntegerLiteral 1])]
]
parse source `shouldBe` Right ast
it "Parses quoted PSL" $ do
let source =
unlines
[ "test PSL {=psl",
" id Int @id",
"psl=}"
]
let ast = AST [Decl "test" "PSL" $ Quoter "psl" "\n id Int @id\n"]
parse source `shouldBe` Right ast
it "Parses quoted JSON" $ do
let source =
unlines
[ "test JSON {=json",
" \"key\": \"value\"",
"json=}"
]
let ast = AST [Decl "test" "JSON" $ Quoter "json" "\n \"key\": \"value\"\n"]
parse source `shouldBe` Right ast
it "Parses multiple quoters" $ do
let source =
unlines
[ "test JSON {=json",
" \"key\": \"value\"",
"json=}",
"test JSON2 {=json",
" \"key\": \"value\"",
"json=}"
]
let ast =
AST
[ Decl "test" "JSON" $ Quoter "json" "\n \"key\": \"value\"\n",
Decl "test" "JSON2" $ Quoter "json" "\n \"key\": \"value\"\n"
]
parse source `shouldBe` Right ast
it "Fails to parse a quoter with unmatched tags" $ do
let source = "test Failure {=a b=}"
parse source `shouldSatisfy` isLeft
it "Parses nested quoters correctly" $ do
parse "test Case1 {=foo {=foo foo=} foo=}" `shouldSatisfy` isLeft
parse "test Case2 {=foo foo=} foo=}" `shouldSatisfy` isLeft
parse "test Case3 {=foo {=foo foo=}"
`shouldBe` Right (AST [Decl "test" "Case3" $ Quoter "foo" " {=foo "])
parse "test Case4 {=foo {=bar foo=}"
`shouldBe` Right (AST [Decl "test" "Case4" $ Quoter "foo" " {=bar "])
parse "test Case5 {=foo bar=} foo=}"
`shouldBe` Right (AST [Decl "test" "Case5" $ Quoter "foo" " bar=} "])
parse "test Case6 {=foo {=bar bar=} foo=}"
`shouldBe` Right (AST [Decl "test" "Case6" $ Quoter "foo" " {=bar bar=} "])
it "Requires dictionaries to have an ending bracket" $ do
let source = "test Decl {"
let expected =
Left $
ParseError $
Token
{ tokenType = TEOF,
tokenPosition = SourcePosition 1 12,
tokenLexeme = ""
}
parse source `shouldBe` expected
it "Parses multiple statements" $ do
let source =
unlines
[ "constant Pi 3.14159",
"constant E 2.71828"
]
let ast =
AST
[ Decl "constant" "Pi" $ DoubleLiteral 3.14159,
Decl "constant" "E" $ DoubleLiteral 2.71828
]
parse source `shouldBe` Right ast