_id stringlengths 64 64 | repository stringlengths 6 84 | name stringlengths 4 110 | content stringlengths 0 248k | license null | download_url stringlengths 89 454 | language stringclasses 7
values | comments stringlengths 0 74.6k | code stringlengths 0 248k |
|---|---|---|---|---|---|---|---|---|
60661de82cd5558e61966fc308eba23d1fdfe4e8ffbcd7429031ca14e7b1b50a | genmeblog/genuary | day15.clj | ;; Sand.
(ns genuary.2022.day15
(:require [clojure2d.core :as c2d]
[clojure2d.extra.utils :as utils]
[fastmath.core :as m]
[fastmath.random :as r]
[fastmath.vector :as v]
[clojure2d.pixels :as p]))
(set! *warn-on-reflection* true)
(set! *unchecked-math* :warn-on-boxed)
(m/use-primitive-operators)
(defn distort
[v ^double weight]
(-> (v/vec2 (/ weight 2.0) (r/drand m/TWO_PI))
(v/from-polar)
(v/add v)
(v/shift (r/drand (* -1.5 weight)))))
(def vb (v/vec2 -500 -500))
(def ^:const ^double maxdist (m/dist -500 -500 500 500))
(c2d/with-canvas [c (c2d/canvas 1800 1800)]
(c2d/set-background c [240 216 168])
(c2d/translate c 900 750)
(c2d/rotate c (+ m/PI m/QUARTER_PI))
(dotimes [_ 12000000]
(let [x (r/drand -500 500)
y (r/drand -500 500)
v (v/vec2 x y)
d (- 150.0 (* 150.0 (m/sqrt (/ (v/dist vb v) maxdist))))
pos (distort v d)
d2 (/ (v/dist pos v) 1.5)]
(c2d/set-color c (+ 60.0 d2) (- 30.0 (/ d2 6.0)) 5 80)
(c2d/ellipse c (pos 0) (pos 1) 0.8 0.8)))
(let [res (c2d/resize (->> (p/to-pixels c)
(p/filter-channels p/gaussian-blur-1)) 800 800)]
;; (c2d/save res "results/2022/day15.jpg")
(utils/show-image res)))
| null | https://raw.githubusercontent.com/genmeblog/genuary/c8d5d23d5bc3d91b90a894461c9af27f9a15ad65/src/genuary/2022/day15.clj | clojure | Sand.
(c2d/save res "results/2022/day15.jpg") | (ns genuary.2022.day15
(:require [clojure2d.core :as c2d]
[clojure2d.extra.utils :as utils]
[fastmath.core :as m]
[fastmath.random :as r]
[fastmath.vector :as v]
[clojure2d.pixels :as p]))
(set! *warn-on-reflection* true)
(set! *unchecked-math* :warn-on-boxed)
(m/use-primitive-operators)
(defn distort
[v ^double weight]
(-> (v/vec2 (/ weight 2.0) (r/drand m/TWO_PI))
(v/from-polar)
(v/add v)
(v/shift (r/drand (* -1.5 weight)))))
(def vb (v/vec2 -500 -500))
(def ^:const ^double maxdist (m/dist -500 -500 500 500))
(c2d/with-canvas [c (c2d/canvas 1800 1800)]
(c2d/set-background c [240 216 168])
(c2d/translate c 900 750)
(c2d/rotate c (+ m/PI m/QUARTER_PI))
(dotimes [_ 12000000]
(let [x (r/drand -500 500)
y (r/drand -500 500)
v (v/vec2 x y)
d (- 150.0 (* 150.0 (m/sqrt (/ (v/dist vb v) maxdist))))
pos (distort v d)
d2 (/ (v/dist pos v) 1.5)]
(c2d/set-color c (+ 60.0 d2) (- 30.0 (/ d2 6.0)) 5 80)
(c2d/ellipse c (pos 0) (pos 1) 0.8 0.8)))
(let [res (c2d/resize (->> (p/to-pixels c)
(p/filter-channels p/gaussian-blur-1)) 800 800)]
(utils/show-image res)))
|
57bca7c97fedf87d3887533f880d882ee9dfa52aad9b2420a46128867725754d | ajhc/ajhc | Verbosity.hs | -----------------------------------------------------------------------------
-- |
-- Module : Distribution.Verbosity
Copyright : 2007
--
-- Maintainer :
-- Portability : portable
--
A simple ' Verbosity ' type with associated utilities . There are 4 standard
-- verbosity levels from 'silent', 'normal', 'verbose' up to 'deafening'. This
-- is used for deciding what logging messages to print.
Verbosity for functions
Copyright ( c ) 2007 ,
All rights reserved .
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution .
* Neither the name of nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Isaac Jones nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -}
module Distribution.Verbosity (
-- * Verbosity
Verbosity,
silent, normal, verbose, deafening,
moreVerbose, lessVerbose,
intToVerbosity,
showForCabal, showForGHC
) where
import Data.List (elemIndex)
data Verbosity = Silent | Normal | Verbose | Deafening
deriving (Show, Read, Eq, Ord, Enum, Bounded)
-- We shouldn't print /anything/ unless an error occurs in silent mode
silent :: Verbosity
silent = Silent
-- Print stuff we want to see by default
normal :: Verbosity
normal = Normal
-- Be more verbose about what's going on
verbose :: Verbosity
verbose = Verbose
-- Not only are we verbose ourselves (perhaps even noisier than when
-- being "verbose"), but we tell everything we run to be verbose too
deafening :: Verbosity
deafening = Deafening
moreVerbose :: Verbosity -> Verbosity
moreVerbose Silent = Silent --silent should stay silent
moreVerbose Normal = Verbose
moreVerbose Verbose = Deafening
moreVerbose Deafening = Deafening
lessVerbose :: Verbosity -> Verbosity
lessVerbose Deafening = Deafening
lessVerbose Verbose = Normal
lessVerbose Normal = Silent
lessVerbose Silent = Silent
intToVerbosity :: Int -> Maybe Verbosity
intToVerbosity 0 = Just Silent
intToVerbosity 1 = Just Normal
intToVerbosity 2 = Just Verbose
intToVerbosity 3 = Just Deafening
intToVerbosity _ = Nothing
showForCabal, showForGHC :: Verbosity -> String
showForCabal v = maybe (error "unknown verbosity") show $
elemIndex v [silent,normal,verbose,deafening]
showForGHC v = maybe (error "unknown verbosity") show $
elemIndex v [silent,normal,__,verbose,deafening]
where __ = silent -- this will be always ignored by elemIndex
| null | https://raw.githubusercontent.com/ajhc/ajhc/8ef784a6a3b5998cfcd95d0142d627da9576f264/src_jahm/Distribution/Verbosity.hs | haskell | ---------------------------------------------------------------------------
|
Module : Distribution.Verbosity
Maintainer :
Portability : portable
verbosity levels from 'silent', 'normal', 'verbose' up to 'deafening'. This
is used for deciding what logging messages to print.
* Verbosity
We shouldn't print /anything/ unless an error occurs in silent mode
Print stuff we want to see by default
Be more verbose about what's going on
Not only are we verbose ourselves (perhaps even noisier than when
being "verbose"), but we tell everything we run to be verbose too
silent should stay silent
this will be always ignored by elemIndex | Copyright : 2007
A simple ' Verbosity ' type with associated utilities . There are 4 standard
Verbosity for functions
Copyright ( c ) 2007 ,
All rights reserved .
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution .
* Neither the name of nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Isaac Jones nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -}
module Distribution.Verbosity (
Verbosity,
silent, normal, verbose, deafening,
moreVerbose, lessVerbose,
intToVerbosity,
showForCabal, showForGHC
) where
import Data.List (elemIndex)
data Verbosity = Silent | Normal | Verbose | Deafening
deriving (Show, Read, Eq, Ord, Enum, Bounded)
silent :: Verbosity
silent = Silent
normal :: Verbosity
normal = Normal
verbose :: Verbosity
verbose = Verbose
deafening :: Verbosity
deafening = Deafening
moreVerbose :: Verbosity -> Verbosity
moreVerbose Normal = Verbose
moreVerbose Verbose = Deafening
moreVerbose Deafening = Deafening
lessVerbose :: Verbosity -> Verbosity
lessVerbose Deafening = Deafening
lessVerbose Verbose = Normal
lessVerbose Normal = Silent
lessVerbose Silent = Silent
intToVerbosity :: Int -> Maybe Verbosity
intToVerbosity 0 = Just Silent
intToVerbosity 1 = Just Normal
intToVerbosity 2 = Just Verbose
intToVerbosity 3 = Just Deafening
intToVerbosity _ = Nothing
showForCabal, showForGHC :: Verbosity -> String
showForCabal v = maybe (error "unknown verbosity") show $
elemIndex v [silent,normal,verbose,deafening]
showForGHC v = maybe (error "unknown verbosity") show $
elemIndex v [silent,normal,__,verbose,deafening]
|
800e765fc4edb7b71dd3bf8aa79f133d4cb1621b1a64022056fc6d6b43b13d21 | nuprl/gradual-typing-performance | data.rkt | #lang racket
(define-struct posn (x y) #:prefab)
(define-struct block (x y color) #:prefab)
(define-struct tetra (center blocks) #:prefab)
(define-struct world (tetra blocks) #:prefab)
(define (posn=? p1 p2)
(and (= (posn-x p1) (posn-x p2))
(= (posn-y p1) (posn-y p2))))
(provide
(struct-out block)
(struct-out posn)
(struct-out tetra)
(struct-out world)
posn=?)
#;
(provide
(contract-out
(struct block ([x real?] [y real?] [color COLOR/C]))
(struct posn ([x real?] [y real?]))
(struct tetra ([center POSN/C] [blocks BSET/C]))
(struct world ([tetra TETRA/C] [blocks BSET/C]))
[posn=? (POSN/C POSN/C . -> . boolean?)])
COLOR/C
POSN/C
BLOCK/C
TETRA/C
WORLD/C
BSET/C)
| null | https://raw.githubusercontent.com/nuprl/gradual-typing-performance/35442b3221299a9cadba6810573007736b0d65d4/experimental/unsafe/tetris/both/data.rkt | racket | #lang racket
(define-struct posn (x y) #:prefab)
(define-struct block (x y color) #:prefab)
(define-struct tetra (center blocks) #:prefab)
(define-struct world (tetra blocks) #:prefab)
(define (posn=? p1 p2)
(and (= (posn-x p1) (posn-x p2))
(= (posn-y p1) (posn-y p2))))
(provide
(struct-out block)
(struct-out posn)
(struct-out tetra)
(struct-out world)
posn=?)
(provide
(contract-out
(struct block ([x real?] [y real?] [color COLOR/C]))
(struct posn ([x real?] [y real?]))
(struct tetra ([center POSN/C] [blocks BSET/C]))
(struct world ([tetra TETRA/C] [blocks BSET/C]))
[posn=? (POSN/C POSN/C . -> . boolean?)])
COLOR/C
POSN/C
BLOCK/C
TETRA/C
WORLD/C
BSET/C)
| |
28de03a883a53b79d09d0397212ffc0910ea5fd2c70a4f09e0c631279b11accd | jeapostrophe/exp | nsb2org.rkt | #lang racket
(require racket/pretty
(planet "html-parsing.rkt" ("neil" "html-parsing.plt")))
(define p "/home/jay/Dev/scm/github.jeapostrophe/home/etc/bookmarks_12_20_11.html")
(define s (file->string p))
(define x (html->xexp s))
(define l (list-ref x 11))
(define current-indent 1)
(define-syntax-rule (indent e ...)
(dynamic-wind
(lambda () (set! current-indent (add1 current-indent)))
(lambda () e ...)
(lambda () (set! current-indent (sub1 current-indent)))))
(define (iprintf . args)
(for ([i (in-range current-indent)])
(display "*"))
(display " ")
(apply printf args))
(define convert
(match-lambda
[`(dl ,_ ,entries ...)
(for-each convert entries)]
[`(dt (h3 ,_ ,name) ,_ ,_ ,entries ...)
(iprintf "~a\n" name)
(indent
(for-each convert entries))]
[`(dt (a (@ (href ,link) . ,_) ,text) . ,_)
(iprintf "[[~a][~a]]\n" link text)]
[`(p "\r\n" . ,_)
(void)]
[x
(printf "Error:\n")
(pretty-print x)
(exit 1)]))
(convert l)
| null | https://raw.githubusercontent.com/jeapostrophe/exp/43615110fd0439d2ef940c42629fcdc054c370f9/nsb2org.rkt | racket | #lang racket
(require racket/pretty
(planet "html-parsing.rkt" ("neil" "html-parsing.plt")))
(define p "/home/jay/Dev/scm/github.jeapostrophe/home/etc/bookmarks_12_20_11.html")
(define s (file->string p))
(define x (html->xexp s))
(define l (list-ref x 11))
(define current-indent 1)
(define-syntax-rule (indent e ...)
(dynamic-wind
(lambda () (set! current-indent (add1 current-indent)))
(lambda () e ...)
(lambda () (set! current-indent (sub1 current-indent)))))
(define (iprintf . args)
(for ([i (in-range current-indent)])
(display "*"))
(display " ")
(apply printf args))
(define convert
(match-lambda
[`(dl ,_ ,entries ...)
(for-each convert entries)]
[`(dt (h3 ,_ ,name) ,_ ,_ ,entries ...)
(iprintf "~a\n" name)
(indent
(for-each convert entries))]
[`(dt (a (@ (href ,link) . ,_) ,text) . ,_)
(iprintf "[[~a][~a]]\n" link text)]
[`(p "\r\n" . ,_)
(void)]
[x
(printf "Error:\n")
(pretty-print x)
(exit 1)]))
(convert l)
| |
6e9057896c186fb279b15d43577370147761563c4f465a770f6e2f087ec1962e | ingesolvoll/kee-frame | controller.cljc | (ns ^:no-doc kee-frame.controller
(:require
#?(:cljs
[cljs.core.match :refer [match]])
#?(:clj
[clojure.core.match :refer [match]])
[kee-frame.state :as state]
[kee-frame.spec :as spec]
[clojure.spec.alpha :as s]
[expound.alpha :as e]
[taoensso.timbre :as log]
[re-frame.core :as rf]))
(defn process-params [params route]
(cond
(vector? params) (get-in route params)
(ifn? params) (params route)))
(defn validate-and-dispatch! [dispatch]
(when dispatch
(log/debug "Dispatch returned from controller function " dispatch)
(do
(when-not (s/valid? ::spec/event-vector dispatch)
(e/expound ::spec/event-vector dispatch)
(throw (ex-info "Invalid dispatch value"
(s/explain-data ::spec/event-vector dispatch))))
dispatch)))
(defn stop-controller [ctx {:keys [stop] :as controller}]
(log/debug {:type :controller-stop
:controller controller
:ctx ctx})
(cond
(vector? stop) stop
(ifn? stop) (validate-and-dispatch! (stop ctx))))
(defn start-controller [ctx {:keys [last-params start] :as controller}]
(log/debug {:type :controller-start
:controller controller
:ctx ctx})
(when start
(cond
(vector? start) (conj start last-params)
(ifn? start) (validate-and-dispatch! (start ctx last-params)))))
(defn controller-actions [controllers route]
(reduce (fn [actions {:keys [id last-params params start stop]}]
(let [current-params (process-params params route)
controller {:id id
:start start
:stop stop
:last-params current-params}]
(match [last-params current-params (= last-params current-params)]
[_ _ true] actions
[nil _ false] (update actions :start conj controller)
[_ nil false] (update actions :stop conj controller)
[_ _ false] (-> actions
(update :stop conj controller)
(update :start conj controller)))))
{}
controllers))
(defn update-controllers [controllers new-controllers]
(let [id->new-controller (->> new-controllers
(map (juxt :id identity))
(into {}))]
(map (fn [{:keys [id] :as controller}]
(if-let [updated-controller (id->new-controller id)]
(assoc controller :last-params (:last-params updated-controller))
controller))
controllers)))
(rf/reg-event-fx ::start-controllers
(fn [_ [_ dispatches]]
;; Another dispatch to make sure all controller stop commands are processed before the starts
{:dispatch-n dispatches}))
(defn controller-effects [controllers ctx route]
(let [{:keys [start stop]} (controller-actions controllers route)
start-dispatches (map #(start-controller ctx %) start)
stop-dispatches (map #(stop-controller ctx %) stop)
dispatch-n (cond
(and (seq start) (seq stop)) (conj stop-dispatches
[::start-controllers start-dispatches])
(seq start) start-dispatches
(seq stop) stop-dispatches)]
{:update-controllers (concat start stop)
:dispatch-n dispatch-n}))
(rf/reg-fx :update-controllers
(fn [new-controllers]
(swap! state/controllers update-controllers new-controllers)))
| null | https://raw.githubusercontent.com/ingesolvoll/kee-frame/e77a672ca1a913b7eedd5b2e65af47a497755426/src/kee_frame/controller.cljc | clojure | Another dispatch to make sure all controller stop commands are processed before the starts | (ns ^:no-doc kee-frame.controller
(:require
#?(:cljs
[cljs.core.match :refer [match]])
#?(:clj
[clojure.core.match :refer [match]])
[kee-frame.state :as state]
[kee-frame.spec :as spec]
[clojure.spec.alpha :as s]
[expound.alpha :as e]
[taoensso.timbre :as log]
[re-frame.core :as rf]))
(defn process-params [params route]
(cond
(vector? params) (get-in route params)
(ifn? params) (params route)))
(defn validate-and-dispatch! [dispatch]
(when dispatch
(log/debug "Dispatch returned from controller function " dispatch)
(do
(when-not (s/valid? ::spec/event-vector dispatch)
(e/expound ::spec/event-vector dispatch)
(throw (ex-info "Invalid dispatch value"
(s/explain-data ::spec/event-vector dispatch))))
dispatch)))
(defn stop-controller [ctx {:keys [stop] :as controller}]
(log/debug {:type :controller-stop
:controller controller
:ctx ctx})
(cond
(vector? stop) stop
(ifn? stop) (validate-and-dispatch! (stop ctx))))
(defn start-controller [ctx {:keys [last-params start] :as controller}]
(log/debug {:type :controller-start
:controller controller
:ctx ctx})
(when start
(cond
(vector? start) (conj start last-params)
(ifn? start) (validate-and-dispatch! (start ctx last-params)))))
(defn controller-actions [controllers route]
(reduce (fn [actions {:keys [id last-params params start stop]}]
(let [current-params (process-params params route)
controller {:id id
:start start
:stop stop
:last-params current-params}]
(match [last-params current-params (= last-params current-params)]
[_ _ true] actions
[nil _ false] (update actions :start conj controller)
[_ nil false] (update actions :stop conj controller)
[_ _ false] (-> actions
(update :stop conj controller)
(update :start conj controller)))))
{}
controllers))
(defn update-controllers [controllers new-controllers]
(let [id->new-controller (->> new-controllers
(map (juxt :id identity))
(into {}))]
(map (fn [{:keys [id] :as controller}]
(if-let [updated-controller (id->new-controller id)]
(assoc controller :last-params (:last-params updated-controller))
controller))
controllers)))
(rf/reg-event-fx ::start-controllers
(fn [_ [_ dispatches]]
{:dispatch-n dispatches}))
(defn controller-effects [controllers ctx route]
(let [{:keys [start stop]} (controller-actions controllers route)
start-dispatches (map #(start-controller ctx %) start)
stop-dispatches (map #(stop-controller ctx %) stop)
dispatch-n (cond
(and (seq start) (seq stop)) (conj stop-dispatches
[::start-controllers start-dispatches])
(seq start) start-dispatches
(seq stop) stop-dispatches)]
{:update-controllers (concat start stop)
:dispatch-n dispatch-n}))
(rf/reg-fx :update-controllers
(fn [new-controllers]
(swap! state/controllers update-controllers new-controllers)))
|
23659715890064f7beff584fd5af95a79ab8541ef98f3d19543b2a0217251e6d | toothbrush/dotfs | HelperParsers.hs | module System.DotFS.Core.HelperParsers where
import System.DotFS.Core.Datatypes
import Control.Monad (join)
import Text.Parsec
import Text.Parsec.String
eatEverything :: VarParser String
eatEverything = many anyChar
-- new combinator: (source: -January/003123.html)
many1Till :: Show end => VarParser a -> VarParser end -> VarParser [a]
many1Till p end = do notFollowedBy' end
p1 <- p
ps <- manyTill p end
return (p1:ps) where
notFollowedBy' :: Show a => GenParser tok st a -> GenParser tok st ()
notFollowedBy' p = try $ join $ do a <- try p
return (unexpected (show a))
<|> return (return ())
-- combinator that outputs the state tupled with the parse result
includeState :: GenParser s st a -> GenParser s st (a,st)
includeState p = do{ res <- p
; state <- getState
; return (res,state)
}
-- parseTest adepted to accept an initial state
parseTest p st inp = case runParser (includeState p) st "" inp of
(Left err) -> do{ putStr "parse error at "
; print err
}
(Right (x,state)) -> case x of
Vanilla -> putStrLn "Vanilla"
Annotated h b -> putStrLn "Annotated"
| null | https://raw.githubusercontent.com/toothbrush/dotfs/36c7e62bda235728ffbb501fe1d2c34210a870a8/System/DotFS/Core/HelperParsers.hs | haskell | new combinator: (source: -January/003123.html)
combinator that outputs the state tupled with the parse result
parseTest adepted to accept an initial state | module System.DotFS.Core.HelperParsers where
import System.DotFS.Core.Datatypes
import Control.Monad (join)
import Text.Parsec
import Text.Parsec.String
eatEverything :: VarParser String
eatEverything = many anyChar
many1Till :: Show end => VarParser a -> VarParser end -> VarParser [a]
many1Till p end = do notFollowedBy' end
p1 <- p
ps <- manyTill p end
return (p1:ps) where
notFollowedBy' :: Show a => GenParser tok st a -> GenParser tok st ()
notFollowedBy' p = try $ join $ do a <- try p
return (unexpected (show a))
<|> return (return ())
includeState :: GenParser s st a -> GenParser s st (a,st)
includeState p = do{ res <- p
; state <- getState
; return (res,state)
}
parseTest p st inp = case runParser (includeState p) st "" inp of
(Left err) -> do{ putStr "parse error at "
; print err
}
(Right (x,state)) -> case x of
Vanilla -> putStrLn "Vanilla"
Annotated h b -> putStrLn "Annotated"
|
0b347b289895b88f3cc6fb5ee8ce11972b4613e6af199ae909cf49f3b209f8a9 | LambdaScientist/CLaSH-by-example | TestBusSignals.hs | # LANGUAGE NoImplicitPrelude #
# LANGUAGE RecordWildCards #
module InAndOut.TestBusSignals where
import CLaSH.Prelude
import SAFE.TestingTools
import SAFE.CommonClash
import InAndOut.Models.BusSignals
import Text.PrettyPrint.HughesPJClass
import GHC.Generics (Generic)
import Control.DeepSeq
configurationList :: [Config]
configurationList = [configOne, configTwo, configThree, configFour]
where
startSt = St 0
inputOne = PIn 0 0 0
configOne = Config inputOne startSt
inputTwo = PIn 0 0 1
configTwo = Config inputTwo startSt
inputThree = PIn 0 1 0
configThree = Config inputThree startSt
inputFour = PIn 1 1 1
configFour = Config inputFour startSt
---TESTING
data Config = Config { input :: PIn
, startSt :: St
}deriving(Eq,Show)
instance Pretty Config where
pPrint Config{..} = text "Config:"
$+$ text "input =" <+> pPrint input
$+$ text "startSt =" <+> pPrint startSt
instance Transition Config where
runOneTest = runOneTest'
instance NFData Config where
rnf a = seq a ()
setupTest :: Config -> Signal St
setupTest (Config pin st) = topEntity' st sPin
where
sPin = signal pin
setupAndRun :: [[TestResult]]
setupAndRun = runConfigList setupTest configurationList
ppSetupAndRun :: Doc
ppSetupAndRun = pPrint setupAndRun
| null | https://raw.githubusercontent.com/LambdaScientist/CLaSH-by-example/e783cd2f2408e67baf7f36c10398c27036a78ef3/HaskellClashExamples/src/InAndOut/TestBusSignals.hs | haskell | -TESTING | # LANGUAGE NoImplicitPrelude #
# LANGUAGE RecordWildCards #
module InAndOut.TestBusSignals where
import CLaSH.Prelude
import SAFE.TestingTools
import SAFE.CommonClash
import InAndOut.Models.BusSignals
import Text.PrettyPrint.HughesPJClass
import GHC.Generics (Generic)
import Control.DeepSeq
configurationList :: [Config]
configurationList = [configOne, configTwo, configThree, configFour]
where
startSt = St 0
inputOne = PIn 0 0 0
configOne = Config inputOne startSt
inputTwo = PIn 0 0 1
configTwo = Config inputTwo startSt
inputThree = PIn 0 1 0
configThree = Config inputThree startSt
inputFour = PIn 1 1 1
configFour = Config inputFour startSt
data Config = Config { input :: PIn
, startSt :: St
}deriving(Eq,Show)
instance Pretty Config where
pPrint Config{..} = text "Config:"
$+$ text "input =" <+> pPrint input
$+$ text "startSt =" <+> pPrint startSt
instance Transition Config where
runOneTest = runOneTest'
instance NFData Config where
rnf a = seq a ()
setupTest :: Config -> Signal St
setupTest (Config pin st) = topEntity' st sPin
where
sPin = signal pin
setupAndRun :: [[TestResult]]
setupAndRun = runConfigList setupTest configurationList
ppSetupAndRun :: Doc
ppSetupAndRun = pPrint setupAndRun
|
bc90c673af4c5391449514254e42f219b4a7a778d3189e3314170dd6aafeaca6 | mfikes/fifth-postulate | ns445.cljs | (ns fifth-postulate.ns445)
(defn solve-for01 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for02 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for03 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for04 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for05 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for06 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for07 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for08 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for09 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for10 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for11 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for12 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for13 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for14 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for15 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for16 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for17 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for18 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for19 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
| null | https://raw.githubusercontent.com/mfikes/fifth-postulate/22cfd5f8c2b4a2dead1c15a96295bfeb4dba235e/src/fifth_postulate/ns445.cljs | clojure | (ns fifth-postulate.ns445)
(defn solve-for01 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for02 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for03 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for04 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for05 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for06 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for07 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for08 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for09 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for10 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for11 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for12 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for13 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for14 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for15 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for16 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for17 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for18 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
(defn solve-for19 [xs v]
(for [ndx0 (range 0 (- (count xs) 3))
ndx1 (range (inc ndx0) (- (count xs) 2))
ndx2 (range (inc ndx1) (- (count xs) 1))
ndx3 (range (inc ndx2) (count xs))
:when (= v (+ (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3)))]
(list (xs ndx0) (xs ndx1) (xs ndx2) (xs ndx3))))
| |
da14d971d5c923b66c0cb9e039700184c842b3fb512ea95fd153af46287541ab | TyOverby/mono | main.ml | open! Core
open! Bonsai_web
open Bonsai.Let_syntax
let component =
let%sub is_connected, set_is_connected =
Bonsai.state [%here] (module Bool) ~default_model:false
in
let%sub not_connected_warning_box =
Bonsai_web_ui_not_connected_warning_box.(
component ~create_message:message_for_async_durable is_connected)
in
return
(let%map not_connected_warning_box = not_connected_warning_box
and is_connected = is_connected
and set_is_connected = set_is_connected in
Vdom.Node.div
[ not_connected_warning_box
; Vdom.Node.button
~attr:(Vdom.Attr.on_click (fun _ -> set_is_connected (not is_connected)))
[ Vdom.Node.text "toggle is_connected" ]
; Vdom.Node.text
"This button simulates connecting and disconnecting from the server. This \
example does not actually affect the connection, since it's sole purpose is \
to demonstrate the appearance of the warning box that is displayed at the \
bottom-right of the page"
])
;;
let (_ : _ Start.Handle.t) =
Start.start Start.Result_spec.just_the_view ~bind_to_element_with_id:"app" component
;;
| null | https://raw.githubusercontent.com/TyOverby/mono/9f361de248f67441dd1486419ba19044b6fa4fad/app/bonsai-examples/not_connected_warning_box/main.ml | ocaml | open! Core
open! Bonsai_web
open Bonsai.Let_syntax
let component =
let%sub is_connected, set_is_connected =
Bonsai.state [%here] (module Bool) ~default_model:false
in
let%sub not_connected_warning_box =
Bonsai_web_ui_not_connected_warning_box.(
component ~create_message:message_for_async_durable is_connected)
in
return
(let%map not_connected_warning_box = not_connected_warning_box
and is_connected = is_connected
and set_is_connected = set_is_connected in
Vdom.Node.div
[ not_connected_warning_box
; Vdom.Node.button
~attr:(Vdom.Attr.on_click (fun _ -> set_is_connected (not is_connected)))
[ Vdom.Node.text "toggle is_connected" ]
; Vdom.Node.text
"This button simulates connecting and disconnecting from the server. This \
example does not actually affect the connection, since it's sole purpose is \
to demonstrate the appearance of the warning box that is displayed at the \
bottom-right of the page"
])
;;
let (_ : _ Start.Handle.t) =
Start.start Start.Result_spec.just_the_view ~bind_to_element_with_id:"app" component
;;
| |
0af42570c01cb5c963d46f37d6f46e4bd91f36bfa932bbd4ec8e031655a74322 | fwcd/curry-language-server | Handlers.hs | module Curry.LanguageServer.Handlers (handlers) where
import Curry.LanguageServer.Handlers.CodeAction (codeActionHandler)
import Curry.LanguageServer.Handlers.CodeLens (codeLensHandler)
import Curry.LanguageServer.Handlers.Command (commandHandler)
import Curry.LanguageServer.Handlers.Completion (completionHandler)
import Curry.LanguageServer.Handlers.Definition (definitionHandler)
import Curry.LanguageServer.Handlers.DocumentSymbols (documentSymbolHandler)
import Curry.LanguageServer.Handlers.Hover (hoverHandler)
import Curry.LanguageServer.Handlers.Initialized (initializedHandler)
import Curry.LanguageServer.Handlers.SignatureHelp (signatureHelpHandler)
import Curry.LanguageServer.Handlers.TextDocument (didOpenHandler, didChangeHandler, didSaveHandler, didCloseHandler)
import Curry.LanguageServer.Handlers.WorkspaceSymbols (workspaceSymbolHandler)
import Curry.LanguageServer.Monad (LSM)
import qualified Language.LSP.Server as S
handlers :: S.Handlers LSM
handlers = mconcat
[ -- Request handlers
completionHandler
, commandHandler
, definitionHandler
, documentSymbolHandler
, hoverHandler
, workspaceSymbolHandler
, codeActionHandler
, codeLensHandler
, signatureHelpHandler
-- Notification handlers
, initializedHandler
, didOpenHandler
, didChangeHandler
, didSaveHandler
, didCloseHandler
]
| null | https://raw.githubusercontent.com/fwcd/curry-language-server/1e4867e951c1323dd2e94cd6c61741a8079df322/src/Curry/LanguageServer/Handlers.hs | haskell | Request handlers
Notification handlers | module Curry.LanguageServer.Handlers (handlers) where
import Curry.LanguageServer.Handlers.CodeAction (codeActionHandler)
import Curry.LanguageServer.Handlers.CodeLens (codeLensHandler)
import Curry.LanguageServer.Handlers.Command (commandHandler)
import Curry.LanguageServer.Handlers.Completion (completionHandler)
import Curry.LanguageServer.Handlers.Definition (definitionHandler)
import Curry.LanguageServer.Handlers.DocumentSymbols (documentSymbolHandler)
import Curry.LanguageServer.Handlers.Hover (hoverHandler)
import Curry.LanguageServer.Handlers.Initialized (initializedHandler)
import Curry.LanguageServer.Handlers.SignatureHelp (signatureHelpHandler)
import Curry.LanguageServer.Handlers.TextDocument (didOpenHandler, didChangeHandler, didSaveHandler, didCloseHandler)
import Curry.LanguageServer.Handlers.WorkspaceSymbols (workspaceSymbolHandler)
import Curry.LanguageServer.Monad (LSM)
import qualified Language.LSP.Server as S
handlers :: S.Handlers LSM
handlers = mconcat
completionHandler
, commandHandler
, definitionHandler
, documentSymbolHandler
, hoverHandler
, workspaceSymbolHandler
, codeActionHandler
, codeLensHandler
, signatureHelpHandler
, initializedHandler
, didOpenHandler
, didChangeHandler
, didSaveHandler
, didCloseHandler
]
|
90d0df80ab262861638da60863191659e3be784ab6b04d4b78780adb76e7cbe5 | foshardware/lsc | Entropy.hs | Copyright 2018 - < >
SPDX - License - Identifier : GPL-3.0 - or - later
module LSC.Entropy
( nonDeterministic
, entropyVectorInt, entropyVector32
, Permutation, randomPermutation
, module System.Random.MWC
) where
import Control.Monad.Primitive
import Data.ByteString hiding (replicate)
import Data.Serialize.Get
import Data.Vector
import Data.Vector.Mutable (unsafeSwap)
import Data.Word
import Prelude hiding (replicate, sequence_)
import System.Entropy
import System.IO
import System.Random.MWC
nonDeterministic :: PrimBase m => Maybe Handle -> (Gen (PrimState m) -> m a) -> IO a
nonDeterministic Nothing action = do
v <- entropyVector32 258
unsafePrimToIO $ action =<< initialize v
nonDeterministic (Just handle) action = do
seed <- hGet handle $ 258 * 4
v <- either fail pure $ replicateM 258 getWord32be `runGet` seed
unsafePrimToIO $ action =<< initialize v
# INLINABLE nonDeterministic #
type Permutation = Vector Int
-- | This function does not reach all possible permutations for lists
consisting of more than 969 elements . Any PRNGs possible states
-- are bound by its possible seed values.
In the case of MWC8222 the period is 2 ^ 8222 which allows for
-- not more than 969! different states.
--
-- seed bits: 8222
maximum list length : 969
--
969 ! = ~ 2 ^ 8222
--
Monotonicity of n ! / ( 2^n ):
--
desired seed bits : 256909
desired list length : 20000
--
20000 ! = ~ 2 ^ 256909
--
randomPermutation :: PrimBase m => Int -> Gen (PrimState m) -> m Permutation
randomPermutation n gen = do
v <- unsafeThaw $ generate n id
sequence_ $ generate (n - 1) $ \ i -> unsafeSwap v i =<< uniformR (i, n - 1) gen
unsafeFreeze v
# INLINABLE randomPermutation #
entropyVector32 :: Int -> IO (Vector Word32)
entropyVector32 n = do
seed <- getEntropy $ 4 * n
either fail pure $ replicateM n getWord32be `runGet` seed
entropyVectorInt :: Int -> IO (Vector Int)
entropyVectorInt n = do
seed <- getEntropy $ 8 * n
either fail (pure . fmap fromIntegral) $ replicateM n getInt64be `runGet` seed
| null | https://raw.githubusercontent.com/foshardware/lsc/006c245a89b0a0056286205917438c7d031d04b9/src/LSC/Entropy.hs | haskell | | This function does not reach all possible permutations for lists
are bound by its possible seed values.
not more than 969! different states.
seed bits: 8222
| Copyright 2018 - < >
SPDX - License - Identifier : GPL-3.0 - or - later
module LSC.Entropy
( nonDeterministic
, entropyVectorInt, entropyVector32
, Permutation, randomPermutation
, module System.Random.MWC
) where
import Control.Monad.Primitive
import Data.ByteString hiding (replicate)
import Data.Serialize.Get
import Data.Vector
import Data.Vector.Mutable (unsafeSwap)
import Data.Word
import Prelude hiding (replicate, sequence_)
import System.Entropy
import System.IO
import System.Random.MWC
nonDeterministic :: PrimBase m => Maybe Handle -> (Gen (PrimState m) -> m a) -> IO a
nonDeterministic Nothing action = do
v <- entropyVector32 258
unsafePrimToIO $ action =<< initialize v
nonDeterministic (Just handle) action = do
seed <- hGet handle $ 258 * 4
v <- either fail pure $ replicateM 258 getWord32be `runGet` seed
unsafePrimToIO $ action =<< initialize v
# INLINABLE nonDeterministic #
type Permutation = Vector Int
consisting of more than 969 elements . Any PRNGs possible states
In the case of MWC8222 the period is 2 ^ 8222 which allows for
maximum list length : 969
969 ! = ~ 2 ^ 8222
Monotonicity of n ! / ( 2^n ):
desired seed bits : 256909
desired list length : 20000
20000 ! = ~ 2 ^ 256909
randomPermutation :: PrimBase m => Int -> Gen (PrimState m) -> m Permutation
randomPermutation n gen = do
v <- unsafeThaw $ generate n id
sequence_ $ generate (n - 1) $ \ i -> unsafeSwap v i =<< uniformR (i, n - 1) gen
unsafeFreeze v
# INLINABLE randomPermutation #
entropyVector32 :: Int -> IO (Vector Word32)
entropyVector32 n = do
seed <- getEntropy $ 4 * n
either fail pure $ replicateM n getWord32be `runGet` seed
entropyVectorInt :: Int -> IO (Vector Int)
entropyVectorInt n = do
seed <- getEntropy $ 8 * n
either fail (pure . fmap fromIntegral) $ replicateM n getInt64be `runGet` seed
|
9ae8fff3c8559c5b0a6005cb455a3d3fb362c4143c822c2a85edc26dc3a939fb | BinaryAnalysisPlatform/bap-plugins | simple.ml | open Bap.Std
open Core_kernel
open Options
open Ctxt
let simplify ctxt sub_path =
let arch = Project.arch ctxt.project in
let mem_to_reg = Mem_to_reg.analyze in
let fold_consts =
Fold_consts.analyze ~fixsp:false arch in
let o = ctxt.options in
match (o.mem_to_reg, o.fold_consts) with
| true,true ->
let res = sub_path in
let res2 = mem_to_reg res in
let res3 = fold_consts res2 in
res3
| true,false -> sub_path |> mem_to_reg |> Sub.ssa
| false,true -> sub_path |> fold_consts (* fold_consts ssa's by default *)
| _,_ -> sub_path |> Sub.ssa
| null | https://raw.githubusercontent.com/BinaryAnalysisPlatform/bap-plugins/2e9aa5c7c24ef494d0e7db1b43c5ceedcb4196a8/minos/simple.ml | ocaml | fold_consts ssa's by default | open Bap.Std
open Core_kernel
open Options
open Ctxt
let simplify ctxt sub_path =
let arch = Project.arch ctxt.project in
let mem_to_reg = Mem_to_reg.analyze in
let fold_consts =
Fold_consts.analyze ~fixsp:false arch in
let o = ctxt.options in
match (o.mem_to_reg, o.fold_consts) with
| true,true ->
let res = sub_path in
let res2 = mem_to_reg res in
let res3 = fold_consts res2 in
res3
| true,false -> sub_path |> mem_to_reg |> Sub.ssa
| _,_ -> sub_path |> Sub.ssa
|
7ff0c11ab6063e21fc3864f912b55751875d6506614069a61927e5a7d1feab8a | ygrek/mldonkey | list2.mli | Copyright 2001 , 2002 b8_bavard , b8_fee_carabine ,
This file is part of mldonkey .
mldonkey is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
mldonkey is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with mldonkey ; if not , write to the Free Software
Foundation , Inc. , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
This file is part of mldonkey.
mldonkey is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
mldonkey is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mldonkey; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*)
val removeq : 'a -> 'a list -> 'a list
(*d [removeq ele list] returns a copy of [list] where all memory occurences
of [ele] have been removed. *)
val remove : 'a -> 'a list -> 'a list
(*d [remove ele list] returns a copy of [list] where all structural occurences
of [ele] have been removed. *)
val removeq_first : 'a -> 'a list -> 'a list
d [ removeq_first ele list ] returns a copy of [ list ] where the first memory
occurence of [ ele ] has been removed .
occurence of [ele] has been removed. *)
val remove_first : 'a -> 'a list -> 'a list
d [ remove_first ele list ] returns a copy of [ list ] where the first
structural occurence of [ ele ] has been removed .
structural occurence of [ele] has been removed. *)
val cut: int -> 'a list -> 'a list * 'a list
val tail_map : ('a -> 'b) -> 'a list -> 'b list
val assoc_inv : 'a -> ('b * 'a) list -> 'b
val safe_iter : ('a -> unit) -> 'a list -> unit
val min : 'a list -> 'a
val max : 'a list -> 'a
val shuffle: 'a list -> 'a list
(** [filter_map f l] *)
val filter_map : ('a -> 'b option) -> 'a list -> 'b list
* [ iteri f l ] call [ f ] on each element of [ l ] with the corresponding index , starting from zero
val iteri : (int -> 'a -> unit) -> 'a list -> unit
| null | https://raw.githubusercontent.com/ygrek/mldonkey/333868a12bb6cd25fed49391dd2c3a767741cb51/src/utils/cdk/list2.mli | ocaml | d [removeq ele list] returns a copy of [list] where all memory occurences
of [ele] have been removed.
d [remove ele list] returns a copy of [list] where all structural occurences
of [ele] have been removed.
* [filter_map f l] | Copyright 2001 , 2002 b8_bavard , b8_fee_carabine ,
This file is part of mldonkey .
mldonkey is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
mldonkey is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with mldonkey ; if not , write to the Free Software
Foundation , Inc. , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
This file is part of mldonkey.
mldonkey is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
mldonkey is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mldonkey; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*)
val removeq : 'a -> 'a list -> 'a list
val remove : 'a -> 'a list -> 'a list
val removeq_first : 'a -> 'a list -> 'a list
d [ removeq_first ele list ] returns a copy of [ list ] where the first memory
occurence of [ ele ] has been removed .
occurence of [ele] has been removed. *)
val remove_first : 'a -> 'a list -> 'a list
d [ remove_first ele list ] returns a copy of [ list ] where the first
structural occurence of [ ele ] has been removed .
structural occurence of [ele] has been removed. *)
val cut: int -> 'a list -> 'a list * 'a list
val tail_map : ('a -> 'b) -> 'a list -> 'b list
val assoc_inv : 'a -> ('b * 'a) list -> 'b
val safe_iter : ('a -> unit) -> 'a list -> unit
val min : 'a list -> 'a
val max : 'a list -> 'a
val shuffle: 'a list -> 'a list
val filter_map : ('a -> 'b option) -> 'a list -> 'b list
* [ iteri f l ] call [ f ] on each element of [ l ] with the corresponding index , starting from zero
val iteri : (int -> 'a -> unit) -> 'a list -> unit
|
1d3bdd77f226edd65fee7b88f0f23942086e96f1caef5f2d0851de5f86d79a58 | erlware/erlware_commons | ec_semver.erl | %%% vi:ts=4 sw=4 et
%%%-------------------------------------------------------------------
( C ) 2011 , Erlware LLC
%%% @doc
Helper functions for working with semver versioning strings .
%%% See / for the spec.
%%% @end
%%%-------------------------------------------------------------------
-module(ec_semver).
-export([parse/1,
format/1,
eql/2,
gt/2,
gte/2,
lt/2,
lte/2,
pes/2,
between/3]).
%% For internal use by the ec_semver_parser peg
-export([internal_parse_version/1]).
-export_type([semver/0,
version_string/0,
any_version/0]).
%%%===================================================================
%%% Public Types
%%%===================================================================
-type version_element() :: non_neg_integer() | binary().
-type major_minor_patch_minpatch() ::
version_element()
| {version_element(), version_element()}
| {version_element(), version_element(), version_element()}
| {version_element(), version_element(),
version_element(), version_element()}.
-type alpha_part() :: integer() | binary() | string().
-type alpha_info() :: {PreRelease::[alpha_part()],
BuildVersion::[alpha_part()]}.
-type semver() :: {major_minor_patch_minpatch(), alpha_info()}.
-type version_string() :: string() | binary().
-type any_version() :: version_string() | semver().
%%%===================================================================
%%% API
%%%===================================================================
@doc parse a string or binary into a valid semver representation
-spec parse(any_version()) -> semver().
parse(Version) when erlang:is_list(Version) ->
case ec_semver_parser:parse(Version) of
{fail, _} ->
{erlang:iolist_to_binary(Version), {[],[]}};
Good ->
Good
end;
parse(Version) when erlang:is_binary(Version) ->
case ec_semver_parser:parse(Version) of
{fail, _} ->
{Version, {[],[]}};
Good ->
Good
end;
parse(Version) ->
Version.
-spec format(semver()) -> iolist().
format({Maj, {AlphaPart, BuildPart}})
when erlang:is_integer(Maj);
erlang:is_binary(Maj) ->
[format_version_part(Maj),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min, Patch}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min), ".",
format_version_part(Patch),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min, Patch, MinPatch}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min), ".",
format_version_part(Patch), ".",
format_version_part(MinPatch),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)].
-spec format_version_part(integer() | binary()) -> iolist().
format_version_part(Vsn)
when erlang:is_integer(Vsn) ->
erlang:integer_to_list(Vsn);
format_version_part(Vsn)
when erlang:is_binary(Vsn) ->
Vsn.
@doc test for quality between semver versions
-spec eql(any_version(), any_version()) -> boolean().
eql(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
NVsnA =:= NVsnB.
@doc Test that VsnA is greater than VsnB
-spec gt(any_version(), any_version()) -> boolean().
gt(VsnA, VsnB) ->
{MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
{MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
((MMPA > MMPB)
orelse
((MMPA =:= MMPB)
andalso
((AlphaA =:= [] andalso AlphaB =/= [])
orelse
((not (AlphaB =:= [] andalso AlphaA =/= []))
andalso
(AlphaA > AlphaB))))
orelse
((MMPA =:= MMPB)
andalso
(AlphaA =:= AlphaB)
andalso
((PatchB =:= [] andalso PatchA =/= [])
orelse
PatchA > PatchB))).
@doc Test that VsnA is greater than or equal to VsnB
-spec gte(any_version(), any_version()) -> boolean().
gte(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
gt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
@doc Test that VsnA is less than VsnB
-spec lt(any_version(), any_version()) -> boolean().
lt(VsnA, VsnB) ->
{MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
{MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
((MMPA < MMPB)
orelse
((MMPA =:= MMPB)
andalso
((AlphaB =:= [] andalso AlphaA =/= [])
orelse
((not (AlphaA =:= [] andalso AlphaB =/= []))
andalso
(AlphaA < AlphaB))))
orelse
((MMPA =:= MMPB)
andalso
(AlphaA =:= AlphaB)
andalso
((PatchA =:= [] andalso PatchB =/= [])
orelse
PatchA < PatchB))).
@doc Test that VsnA is less than or equal to VsnB
-spec lte(any_version(), any_version()) -> boolean().
lte(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
lt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
@doc Test that VsnMatch is greater than or equal to Vsn1 and
less than or equal to Vsn2
-spec between(any_version(), any_version(), any_version()) -> boolean().
between(Vsn1, Vsn2, VsnMatch) ->
NVsnA = normalize(parse(Vsn1)),
NVsnB = normalize(parse(Vsn2)),
NVsnMatch = normalize(parse(VsnMatch)),
gte(NVsnMatch, NVsnA) andalso
lte(NVsnMatch, NVsnB).
@doc check that VsnA is Approximately greater than VsnB
%%
Specifying " > = 2.6.5 " is an optimistic version constraint . All
versions greater than the one specified , including major releases
%% (e.g. 3.0.0) are allowed.
%%
Conversely , specifying " ~ > 2.6 " is pessimistic about future major
revisions and " ~ > 2.6.5 " is pessimistic about future minor
%% revisions.
%%
" ~ > 2.6 " matches cookbooks > = 2.6.0 AND & lt ; 3.0.0
" ~ > 2.6.5 " matches cookbooks > = 2.6.5 AND & lt ; 2.7.0
pes(VsnA, VsnB) ->
internal_pes(parse(VsnA), parse(VsnB)).
%%%===================================================================
%%% Friend Functions
%%%===================================================================
@doc helper function for the peg grammar to parse the iolist into a semver
-spec internal_parse_version(iolist()) -> semver().
internal_parse_version([MMP, AlphaPart, BuildPart, _]) ->
{parse_major_minor_patch_minpatch(MMP), {parse_alpha_part(AlphaPart),
parse_alpha_part(BuildPart)}}.
%% @doc helper function for the peg grammar to parse the iolist into a major_minor_patch
-spec parse_major_minor_patch_minpatch(iolist()) -> major_minor_patch_minpatch().
parse_major_minor_patch_minpatch([MajVsn, [], [], []]) ->
strip_maj_version(MajVsn);
parse_major_minor_patch_minpatch([MajVsn, [<<".">>, MinVsn], [], []]) ->
{strip_maj_version(MajVsn), MinVsn};
parse_major_minor_patch_minpatch([MajVsn,
[<<".">>, MinVsn],
[<<".">>, PatchVsn], []]) ->
{strip_maj_version(MajVsn), MinVsn, PatchVsn};
parse_major_minor_patch_minpatch([MajVsn,
[<<".">>, MinVsn],
[<<".">>, PatchVsn],
[<<".">>, MinPatch]]) ->
{strip_maj_version(MajVsn), MinVsn, PatchVsn, MinPatch}.
%% @doc helper function for the peg grammar to parse the iolist into an alpha part
-spec parse_alpha_part(iolist()) -> [alpha_part()].
parse_alpha_part([]) ->
[];
parse_alpha_part([_, AV1, Rest]) ->
[erlang:iolist_to_binary(AV1) |
[format_alpha_part(Part) || Part <- Rest]].
@doc according to semver alpha parts that can be treated like
%% numbers must be. We implement that here by taking the alpha part
%% and trying to convert it to a number, if it succeeds we use
%% it. Otherwise we do not.
-spec format_alpha_part(iolist()) -> integer() | binary().
format_alpha_part([<<".">>, AlphaPart]) ->
Bin = erlang:iolist_to_binary(AlphaPart),
try
erlang:list_to_integer(erlang:binary_to_list(Bin))
catch
error:badarg ->
Bin
end.
%%%===================================================================
%%% Internal Functions
%%%===================================================================
-spec strip_maj_version(iolist()) -> version_element().
strip_maj_version([<<"v">>, MajVsn]) ->
MajVsn;
strip_maj_version([[], MajVsn]) ->
MajVsn;
strip_maj_version(MajVsn) ->
MajVsn.
-spec to_list(integer() | binary() | string()) -> string() | binary().
to_list(Detail) when erlang:is_integer(Detail) ->
erlang:integer_to_list(Detail);
to_list(Detail) when erlang:is_list(Detail); erlang:is_binary(Detail) ->
Detail.
-spec format_vsn_rest(binary() | string(), [integer() | binary()]) -> iolist().
format_vsn_rest(_TypeMark, []) ->
[];
format_vsn_rest(TypeMark, [Head | Rest]) ->
[TypeMark, Head |
[[".", to_list(Detail)] || Detail <- Rest]].
%% @doc normalize the semver so they can be compared
-spec normalize(semver()) -> semver().
normalize({Vsn, Rest})
when erlang:is_binary(Vsn);
erlang:is_integer(Vsn) ->
{{Vsn, 0, 0, 0}, Rest};
normalize({{Maj, Min}, Rest}) ->
{{Maj, Min, 0, 0}, Rest};
normalize({{Maj, Min, Patch}, Rest}) ->
{{Maj, Min, Patch, 0}, Rest};
normalize(Other = {{_, _, _, _}, {_,_}}) ->
Other.
@doc to do the pessimistic compare we need a parsed semver . This is
%% the internal implementation of the of the pessimistic run. The
%% external just ensures that versions are parsed.
-spec internal_pes(semver(), semver()) -> boolean().
internal_pes(VsnA, {{LM, LMI}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI) ->
gte(VsnA, {{LM, LMI, 0}, Alpha}) andalso
lt(VsnA, {{LM + 1, 0, 0, 0}, {[], []}});
internal_pes(VsnA, {{LM, LMI, LP}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI),
erlang:is_integer(LP) ->
gte(VsnA, {{LM, LMI, LP}, Alpha})
andalso
lt(VsnA, {{LM, LMI + 1, 0, 0}, {[], []}});
internal_pes(VsnA, {{LM, LMI, LP, LMP}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI),
erlang:is_integer(LP),
erlang:is_integer(LMP) ->
gte(VsnA, {{LM, LMI, LP, LMP}, Alpha})
andalso
lt(VsnA, {{LM, LMI, LP + 1, 0}, {[], []}});
internal_pes(Vsn, LVsn) ->
gte(Vsn, LVsn).
%%%===================================================================
%%% Test Functions
%%%===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
eql_test() ->
?assertMatch(true, eql("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, eql("v1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, eql("1",
"1.0.0")),
?assertMatch(true, eql("v1",
"v1.0.0")),
?assertMatch(true, eql("1.0",
"1.0.0")),
?assertMatch(true, eql("1.0.0",
"1")),
?assertMatch(true, eql("1.0.0.0",
"1")),
?assertMatch(true, eql("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"1.0.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"v1.0.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-pre-alpha.1",
"1.0.0-pre-alpha.1")),
?assertMatch(true, eql("aa", "aa")),
?assertMatch(true, eql("AA.BB", "AA.BB")),
?assertMatch(true, eql("BBB-super", "BBB-super")),
?assertMatch(true, not eql("1.0.0",
"1.0.1")),
?assertMatch(true, not eql("1.0.0-alpha",
"1.0.1+alpha")),
?assertMatch(true, not eql("1.0.0+build.1",
"1.0.1+build.2")),
?assertMatch(true, not eql("1.0.0.0+build.1",
"1.0.0.1+build.2")),
?assertMatch(true, not eql("FFF", "BBB")),
?assertMatch(true, not eql("1", "1BBBB")).
gt_test() ->
?assertMatch(true, gt("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gt("1.0.0.1-alpha.1",
"1.0.0.1-alpha")),
?assertMatch(true, gt("1.0.0.4-alpha.1",
"1.0.0.2-alpha")),
?assertMatch(true, gt("1.0.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gt("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, gt("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, gt("1.0.0-pre-alpha.14",
"1.0.0-pre-alpha.3")),
?assertMatch(true, gt("1.0.0-beta.11",
"1.0.0.0-beta.2")),
?assertMatch(true, gt("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, gt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, gt("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, gt("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, gt("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
"1.3.7.0+build")),
?assertMatch(true, gt("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, gt("aa.cc",
"aa.bb")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0.0-alpha.1")),
?assertMatch(true, not gt("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, not gt("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, not gt("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, not gt("1.0.0-pre-alpha.3",
"1.0.0-pre-alpha.14")),
?assertMatch(true, not gt("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, not gt("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, not gt("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, not gt("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, not gt("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gt("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, not gt("1",
"1.0.0")),
?assertMatch(true, not gt("aa.bb",
"aa.bb")),
?assertMatch(true, not gt("aa.cc",
"aa.dd")),
?assertMatch(true, not gt("1.0",
"1.0.0")),
?assertMatch(true, not gt("1.0.0",
"1")),
?assertMatch(true, not gt("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, not gt("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")).
lt_test() ->
?assertMatch(true, lt("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, lt("1.0.0-alpha",
"1.0.0.0-alpha.1")),
?assertMatch(true, lt("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, lt("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, lt("1.0.0-pre-alpha.3",
"1.0.0-pre-alpha.14")),
?assertMatch(true, lt("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, lt("1.0.0.1-beta.11",
"1.0.0.1-rc.1")),
?assertMatch(true, lt("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, lt("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, lt("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, lt("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, lt("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, lt("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, not lt("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, not lt("1",
"1.0.0")),
?assertMatch(true, lt("1",
"1.0.0.1")),
?assertMatch(true, lt("AA.DD",
"AA.EE")),
?assertMatch(true, not lt("1.0",
"1.0.0")),
?assertMatch(true, not lt("1.0.0.0",
"1")),
?assertMatch(true, not lt("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, not lt("AA.DD", "AA.CC")),
?assertMatch(true, not lt("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, not lt("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, not lt("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, not lt("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, not lt("1.0.0-pre-alpha.14",
"1.0.0-pre-alpha.3")),
?assertMatch(true, not lt("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, not lt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, not lt("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, not lt("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, not lt("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, not lt("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, not lt("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")).
gte_test() ->
?assertMatch(true, gte("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, gte("1",
"1.0.0")),
?assertMatch(true, gte("1.0",
"1.0.0")),
?assertMatch(true, gte("1.0.0",
"1")),
?assertMatch(true, gte("1.0.0.0",
"1")),
?assertMatch(true, gte("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, gte("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, gte("1.0.0-alpha.1+build.1",
"1.0.0.0-alpha.1+build.1")),
?assertMatch(true, gte("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha")),
?assertMatch(true, gte("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, gte("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, gte("aa.bb", "aa.bb")),
?assertMatch(true, gte("dd", "aa")),
?assertMatch(true, gte("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, gte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, gte("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, gte("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, gte("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, gte("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, gte("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gte("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, not gte("1.0.0-pre-alpha",
"1.0.0-pre-alpha.1")),
?assertMatch(true, not gte("CC", "DD")),
?assertMatch(true, not gte("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, not gte("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, not gte("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, not gte("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, not gte("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, not gte("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, not gte("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, not gte("1.0.0",
"1.0.0+build.1")),
?assertMatch(true, not gte("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gte("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")).
lte_test() ->
?assertMatch(true, lte("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, lte("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, lte("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, lte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha.11")),
?assertMatch(true, lte("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, lte("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, lte("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, lte("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, lte("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, lte("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, lte("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, lte("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, lte("1",
"1.0.0")),
?assertMatch(true, lte("1.0",
"1.0.0")),
?assertMatch(true, lte("1.0.0",
"1")),
?assertMatch(true, lte("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, lte("1.0.0.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, lte("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, lte("aa","cc")),
?assertMatch(true, lte("cc","cc")),
?assertMatch(true, not lte("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, not lte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha")),
?assertMatch(true, not lte("cc", "aa")),
?assertMatch(true, not lte("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, not lte("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, not lte("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, not lte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, not lte("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, not lte("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, not lte("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, not lte("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, not lte("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")).
between_test() ->
?assertMatch(true, between("1.0.0-alpha",
"1.0.0-alpha.3",
"1.0.0-alpha.2")),
?assertMatch(true, between("1.0.0-alpha.1",
"1.0.0-beta.2",
"1.0.0-alpha.25")),
?assertMatch(true, between("1.0.0-beta.2",
"1.0.0-beta.11",
"1.0.0-beta.7")),
?assertMatch(true, between("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha.11",
"1.0.0-pre-alpha.7")),
?assertMatch(true, between("1.0.0-beta.11",
"1.0.0-rc.3",
"1.0.0-rc.1")),
?assertMatch(true, between("1.0.0-rc.1",
"1.0.0-rc.1+build.3",
"1.0.0-rc.1+build.1")),
?assertMatch(true, between("1.0.0.0-rc.1",
"1.0.0-rc.1+build.3",
"1.0.0-rc.1+build.1")),
?assertMatch(true, between("1.0.0-rc.1+build.1",
"1.0.0",
"1.0.0-rc.33")),
?assertMatch(true, between("1.0.0",
"1.0.0+0.3.7",
"1.0.0+0.2")),
?assertMatch(true, between("1.0.0+0.3.7",
"1.3.7+build",
"1.2")),
?assertMatch(true, between("1.3.7+build",
"1.3.7+build.2.b8f12d7",
"1.3.7+build.1")),
?assertMatch(true, between("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a",
"1.3.7+build.10.a36faa")),
?assertMatch(true, between("1.0.0-alpha",
"1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, between("1",
"1.0.0",
"1.0.0")),
?assertMatch(true, between("1.0",
"1.0.0",
"1.0.0")),
?assertMatch(true, between("1.0",
"1.0.0.0",
"1.0.0.0")),
?assertMatch(true, between("1.0.0",
"1",
"1")),
?assertMatch(true, between("1.0+alpha.1",
"1.0.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, between("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, between("aaa",
"ddd",
"cc")),
?assertMatch(true, not between("1.0.0-alpha.1",
"1.0.0-alpha.22",
"1.0.0")),
?assertMatch(true, not between("1.0.0-pre-alpha.1",
"1.0.0-pre-alpha.22",
"1.0.0")),
?assertMatch(true, not between("1.0.0",
"1.0.0-alpha.1",
"2.0")),
?assertMatch(true, not between("1.0.0-beta.1",
"1.0.0-beta.11",
"1.0.0-alpha")),
?assertMatch(true, not between("1.0.0-beta.11", "1.0.0-rc.1",
"1.0.0-rc.22")),
?assertMatch(true, not between("aaa", "ddd", "zzz")).
pes_test() ->
?assertMatch(true, pes("1.0.0-rc.0", "1.0.0-rc.0")),
?assertMatch(true, pes("1.0.0-rc.1", "1.0.0-rc.0")),
?assertMatch(true, pes("1.0.0", "1.0.0-rc.0")),
?assertMatch(false, pes("1.0.0-rc.0", "1.0.0-rc.1")),
?assertMatch(true, pes("2.6.0", "2.6")),
?assertMatch(true, pes("2.7", "2.6")),
?assertMatch(true, pes("2.8", "2.6")),
?assertMatch(true, pes("2.9", "2.6")),
?assertMatch(true, pes("A.B", "A.A")),
?assertMatch(true, not pes("3.0.0", "2.6")),
?assertMatch(true, not pes("2.5", "2.6")),
?assertMatch(true, pes("2.6.5", "2.6.5")),
?assertMatch(true, pes("2.6.6", "2.6.5")),
?assertMatch(true, pes("2.6.7", "2.6.5")),
?assertMatch(true, pes("2.6.8", "2.6.5")),
?assertMatch(true, pes("2.6.9", "2.6.5")),
?assertMatch(true, pes("2.6.0.9", "2.6.0.5")),
?assertMatch(true, not pes("2.7", "2.6.5")),
?assertMatch(true, not pes("2.1.7", "2.1.6.5")),
?assertMatch(true, not pes("A.A", "A.B")),
?assertMatch(true, not pes("2.5", "2.6.5")).
parse_test() ->
?assertEqual({1, {[],[]}}, parse(<<"1">>)),
?assertEqual({{1,2,34},{[],[]}}, parse(<<"1.2.34">>)),
?assertEqual({<<"a">>, {[],[]}}, parse(<<"a">>)),
?assertEqual({{<<"a">>,<<"b">>}, {[],[]}}, parse(<<"a.b">>)),
?assertEqual({1, {[],[]}}, parse(<<"1">>)),
?assertEqual({{1,2}, {[],[]}}, parse(<<"1.2">>)),
?assertEqual({{1,2,2}, {[],[]}}, parse(<<"1.2.2">>)),
?assertEqual({{1,99,2}, {[],[]}}, parse(<<"1.99.2">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>],[]}}, parse(<<"1.99.2-alpha">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>,1], []}}, parse(<<"1.99.2-alpha.1">>)),
?assertEqual({{1,99,2}, {[<<"pre-alpha">>,1], []}}, parse(<<"1.99.2-pre-alpha.1">>)),
?assertEqual({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}},
parse(<<"1.99.2+build.1.a36">>)),
?assertEqual({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}},
parse(<<"1.99.2.44+build.1.a36">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}},
parse("1.99.2-alpha.1+build.1.a36")),
?assertEqual({{1,99,2}, {[<<"pre-alpha">>, 1], [<<"build">>, 1, <<"a36">>]}},
parse("1.99.2-pre-alpha.1+build.1.a36")).
version_format_test() ->
?assertEqual(["1", [], []], format({1, {[],[]}})),
?assertEqual(["1", ".", "2", ".", "34", [], []], format({{1,2,34},{[],[]}})),
?assertEqual(<<"a">>, erlang:iolist_to_binary(format({<<"a">>, {[],[]}}))),
?assertEqual(<<"a.b">>, erlang:iolist_to_binary(format({{<<"a">>,<<"b">>}, {[],[]}}))),
?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))),
?assertEqual(<<"1.2">>, erlang:iolist_to_binary(format({{1,2}, {[],[]}}))),
?assertEqual(<<"1.2.2">>, erlang:iolist_to_binary(format({{1,2,2}, {[],[]}}))),
?assertEqual(<<"1.99.2">>, erlang:iolist_to_binary(format({{1,99,2}, {[],[]}}))),
?assertEqual(<<"1.99.2-alpha">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>],[]}}))),
?assertEqual(<<"1.99.2-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>,1], []}}))),
?assertEqual(<<"1.99.2-pre-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"pre-alpha">>,1], []}}))),
?assertEqual(<<"1.99.2+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2.44+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2-alpha.1+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2-pre-alpha.1+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[<<"pre-alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))).
-endif.
| null | https://raw.githubusercontent.com/erlware/erlware_commons/eeb25f4b7f4d9f423a0470461d225fb6a61217d2/src/ec_semver.erl | erlang | vi:ts=4 sw=4 et
-------------------------------------------------------------------
@doc
See / for the spec.
@end
-------------------------------------------------------------------
For internal use by the ec_semver_parser peg
===================================================================
Public Types
===================================================================
===================================================================
API
===================================================================
(e.g. 3.0.0) are allowed.
revisions.
===================================================================
Friend Functions
===================================================================
@doc helper function for the peg grammar to parse the iolist into a major_minor_patch
@doc helper function for the peg grammar to parse the iolist into an alpha part
numbers must be. We implement that here by taking the alpha part
and trying to convert it to a number, if it succeeds we use
it. Otherwise we do not.
===================================================================
Internal Functions
===================================================================
@doc normalize the semver so they can be compared
the internal implementation of the of the pessimistic run. The
external just ensures that versions are parsed.
===================================================================
Test Functions
=================================================================== | ( C ) 2011 , Erlware LLC
Helper functions for working with semver versioning strings .
-module(ec_semver).
-export([parse/1,
format/1,
eql/2,
gt/2,
gte/2,
lt/2,
lte/2,
pes/2,
between/3]).
-export([internal_parse_version/1]).
-export_type([semver/0,
version_string/0,
any_version/0]).
-type version_element() :: non_neg_integer() | binary().
-type major_minor_patch_minpatch() ::
version_element()
| {version_element(), version_element()}
| {version_element(), version_element(), version_element()}
| {version_element(), version_element(),
version_element(), version_element()}.
-type alpha_part() :: integer() | binary() | string().
-type alpha_info() :: {PreRelease::[alpha_part()],
BuildVersion::[alpha_part()]}.
-type semver() :: {major_minor_patch_minpatch(), alpha_info()}.
-type version_string() :: string() | binary().
-type any_version() :: version_string() | semver().
@doc parse a string or binary into a valid semver representation
-spec parse(any_version()) -> semver().
parse(Version) when erlang:is_list(Version) ->
case ec_semver_parser:parse(Version) of
{fail, _} ->
{erlang:iolist_to_binary(Version), {[],[]}};
Good ->
Good
end;
parse(Version) when erlang:is_binary(Version) ->
case ec_semver_parser:parse(Version) of
{fail, _} ->
{Version, {[],[]}};
Good ->
Good
end;
parse(Version) ->
Version.
-spec format(semver()) -> iolist().
format({Maj, {AlphaPart, BuildPart}})
when erlang:is_integer(Maj);
erlang:is_binary(Maj) ->
[format_version_part(Maj),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min, Patch}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min), ".",
format_version_part(Patch),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)];
format({{Maj, Min, Patch, MinPatch}, {AlphaPart, BuildPart}}) ->
[format_version_part(Maj), ".",
format_version_part(Min), ".",
format_version_part(Patch), ".",
format_version_part(MinPatch),
format_vsn_rest(<<"-">>, AlphaPart),
format_vsn_rest(<<"+">>, BuildPart)].
-spec format_version_part(integer() | binary()) -> iolist().
format_version_part(Vsn)
when erlang:is_integer(Vsn) ->
erlang:integer_to_list(Vsn);
format_version_part(Vsn)
when erlang:is_binary(Vsn) ->
Vsn.
@doc test for quality between semver versions
-spec eql(any_version(), any_version()) -> boolean().
eql(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
NVsnA =:= NVsnB.
@doc Test that VsnA is greater than VsnB
-spec gt(any_version(), any_version()) -> boolean().
gt(VsnA, VsnB) ->
{MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
{MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
((MMPA > MMPB)
orelse
((MMPA =:= MMPB)
andalso
((AlphaA =:= [] andalso AlphaB =/= [])
orelse
((not (AlphaB =:= [] andalso AlphaA =/= []))
andalso
(AlphaA > AlphaB))))
orelse
((MMPA =:= MMPB)
andalso
(AlphaA =:= AlphaB)
andalso
((PatchB =:= [] andalso PatchA =/= [])
orelse
PatchA > PatchB))).
@doc Test that VsnA is greater than or equal to VsnB
-spec gte(any_version(), any_version()) -> boolean().
gte(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
gt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
@doc Test that VsnA is less than VsnB
-spec lt(any_version(), any_version()) -> boolean().
lt(VsnA, VsnB) ->
{MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
{MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
((MMPA < MMPB)
orelse
((MMPA =:= MMPB)
andalso
((AlphaB =:= [] andalso AlphaA =/= [])
orelse
((not (AlphaA =:= [] andalso AlphaB =/= []))
andalso
(AlphaA < AlphaB))))
orelse
((MMPA =:= MMPB)
andalso
(AlphaA =:= AlphaB)
andalso
((PatchA =:= [] andalso PatchB =/= [])
orelse
PatchA < PatchB))).
@doc Test that VsnA is less than or equal to VsnB
-spec lte(any_version(), any_version()) -> boolean().
lte(VsnA, VsnB) ->
NVsnA = normalize(parse(VsnA)),
NVsnB = normalize(parse(VsnB)),
lt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
@doc Test that VsnMatch is greater than or equal to Vsn1 and
less than or equal to Vsn2
-spec between(any_version(), any_version(), any_version()) -> boolean().
between(Vsn1, Vsn2, VsnMatch) ->
NVsnA = normalize(parse(Vsn1)),
NVsnB = normalize(parse(Vsn2)),
NVsnMatch = normalize(parse(VsnMatch)),
gte(NVsnMatch, NVsnA) andalso
lte(NVsnMatch, NVsnB).
@doc check that VsnA is Approximately greater than VsnB
Specifying " > = 2.6.5 " is an optimistic version constraint . All
versions greater than the one specified , including major releases
Conversely , specifying " ~ > 2.6 " is pessimistic about future major
revisions and " ~ > 2.6.5 " is pessimistic about future minor
" ~ > 2.6 " matches cookbooks > = 2.6.0 AND & lt ; 3.0.0
" ~ > 2.6.5 " matches cookbooks > = 2.6.5 AND & lt ; 2.7.0
pes(VsnA, VsnB) ->
internal_pes(parse(VsnA), parse(VsnB)).
@doc helper function for the peg grammar to parse the iolist into a semver
-spec internal_parse_version(iolist()) -> semver().
internal_parse_version([MMP, AlphaPart, BuildPart, _]) ->
{parse_major_minor_patch_minpatch(MMP), {parse_alpha_part(AlphaPart),
parse_alpha_part(BuildPart)}}.
-spec parse_major_minor_patch_minpatch(iolist()) -> major_minor_patch_minpatch().
parse_major_minor_patch_minpatch([MajVsn, [], [], []]) ->
strip_maj_version(MajVsn);
parse_major_minor_patch_minpatch([MajVsn, [<<".">>, MinVsn], [], []]) ->
{strip_maj_version(MajVsn), MinVsn};
parse_major_minor_patch_minpatch([MajVsn,
[<<".">>, MinVsn],
[<<".">>, PatchVsn], []]) ->
{strip_maj_version(MajVsn), MinVsn, PatchVsn};
parse_major_minor_patch_minpatch([MajVsn,
[<<".">>, MinVsn],
[<<".">>, PatchVsn],
[<<".">>, MinPatch]]) ->
{strip_maj_version(MajVsn), MinVsn, PatchVsn, MinPatch}.
-spec parse_alpha_part(iolist()) -> [alpha_part()].
parse_alpha_part([]) ->
[];
parse_alpha_part([_, AV1, Rest]) ->
[erlang:iolist_to_binary(AV1) |
[format_alpha_part(Part) || Part <- Rest]].
@doc according to semver alpha parts that can be treated like
-spec format_alpha_part(iolist()) -> integer() | binary().
format_alpha_part([<<".">>, AlphaPart]) ->
Bin = erlang:iolist_to_binary(AlphaPart),
try
erlang:list_to_integer(erlang:binary_to_list(Bin))
catch
error:badarg ->
Bin
end.
-spec strip_maj_version(iolist()) -> version_element().
strip_maj_version([<<"v">>, MajVsn]) ->
MajVsn;
strip_maj_version([[], MajVsn]) ->
MajVsn;
strip_maj_version(MajVsn) ->
MajVsn.
-spec to_list(integer() | binary() | string()) -> string() | binary().
to_list(Detail) when erlang:is_integer(Detail) ->
erlang:integer_to_list(Detail);
to_list(Detail) when erlang:is_list(Detail); erlang:is_binary(Detail) ->
Detail.
-spec format_vsn_rest(binary() | string(), [integer() | binary()]) -> iolist().
format_vsn_rest(_TypeMark, []) ->
[];
format_vsn_rest(TypeMark, [Head | Rest]) ->
[TypeMark, Head |
[[".", to_list(Detail)] || Detail <- Rest]].
-spec normalize(semver()) -> semver().
normalize({Vsn, Rest})
when erlang:is_binary(Vsn);
erlang:is_integer(Vsn) ->
{{Vsn, 0, 0, 0}, Rest};
normalize({{Maj, Min}, Rest}) ->
{{Maj, Min, 0, 0}, Rest};
normalize({{Maj, Min, Patch}, Rest}) ->
{{Maj, Min, Patch, 0}, Rest};
normalize(Other = {{_, _, _, _}, {_,_}}) ->
Other.
@doc to do the pessimistic compare we need a parsed semver . This is
-spec internal_pes(semver(), semver()) -> boolean().
internal_pes(VsnA, {{LM, LMI}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI) ->
gte(VsnA, {{LM, LMI, 0}, Alpha}) andalso
lt(VsnA, {{LM + 1, 0, 0, 0}, {[], []}});
internal_pes(VsnA, {{LM, LMI, LP}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI),
erlang:is_integer(LP) ->
gte(VsnA, {{LM, LMI, LP}, Alpha})
andalso
lt(VsnA, {{LM, LMI + 1, 0, 0}, {[], []}});
internal_pes(VsnA, {{LM, LMI, LP, LMP}, Alpha})
when erlang:is_integer(LM),
erlang:is_integer(LMI),
erlang:is_integer(LP),
erlang:is_integer(LMP) ->
gte(VsnA, {{LM, LMI, LP, LMP}, Alpha})
andalso
lt(VsnA, {{LM, LMI, LP + 1, 0}, {[], []}});
internal_pes(Vsn, LVsn) ->
gte(Vsn, LVsn).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
eql_test() ->
?assertMatch(true, eql("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, eql("v1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, eql("1",
"1.0.0")),
?assertMatch(true, eql("v1",
"v1.0.0")),
?assertMatch(true, eql("1.0",
"1.0.0")),
?assertMatch(true, eql("1.0.0",
"1")),
?assertMatch(true, eql("1.0.0.0",
"1")),
?assertMatch(true, eql("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"1.0.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-alpha.1+build.1",
"v1.0.0.0-alpha.1+build.1")),
?assertMatch(true, eql("1.0-pre-alpha.1",
"1.0.0-pre-alpha.1")),
?assertMatch(true, eql("aa", "aa")),
?assertMatch(true, eql("AA.BB", "AA.BB")),
?assertMatch(true, eql("BBB-super", "BBB-super")),
?assertMatch(true, not eql("1.0.0",
"1.0.1")),
?assertMatch(true, not eql("1.0.0-alpha",
"1.0.1+alpha")),
?assertMatch(true, not eql("1.0.0+build.1",
"1.0.1+build.2")),
?assertMatch(true, not eql("1.0.0.0+build.1",
"1.0.0.1+build.2")),
?assertMatch(true, not eql("FFF", "BBB")),
?assertMatch(true, not eql("1", "1BBBB")).
gt_test() ->
?assertMatch(true, gt("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gt("1.0.0.1-alpha.1",
"1.0.0.1-alpha")),
?assertMatch(true, gt("1.0.0.4-alpha.1",
"1.0.0.2-alpha")),
?assertMatch(true, gt("1.0.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gt("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, gt("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, gt("1.0.0-pre-alpha.14",
"1.0.0-pre-alpha.3")),
?assertMatch(true, gt("1.0.0-beta.11",
"1.0.0.0-beta.2")),
?assertMatch(true, gt("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, gt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, gt("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, gt("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, gt("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
"1.3.7.0+build")),
?assertMatch(true, gt("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, gt("aa.cc",
"aa.bb")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0.0-alpha.1")),
?assertMatch(true, not gt("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, not gt("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, not gt("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, not gt("1.0.0-pre-alpha.3",
"1.0.0-pre-alpha.14")),
?assertMatch(true, not gt("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, not gt("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, not gt("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, not gt("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, not gt("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gt("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, not gt("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, not gt("1",
"1.0.0")),
?assertMatch(true, not gt("aa.bb",
"aa.bb")),
?assertMatch(true, not gt("aa.cc",
"aa.dd")),
?assertMatch(true, not gt("1.0",
"1.0.0")),
?assertMatch(true, not gt("1.0.0",
"1")),
?assertMatch(true, not gt("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, not gt("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")).
lt_test() ->
?assertMatch(true, lt("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, lt("1.0.0-alpha",
"1.0.0.0-alpha.1")),
?assertMatch(true, lt("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, lt("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, lt("1.0.0-pre-alpha.3",
"1.0.0-pre-alpha.14")),
?assertMatch(true, lt("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, lt("1.0.0.1-beta.11",
"1.0.0.1-rc.1")),
?assertMatch(true, lt("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, lt("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, lt("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, lt("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, lt("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, lt("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, not lt("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, not lt("1",
"1.0.0")),
?assertMatch(true, lt("1",
"1.0.0.1")),
?assertMatch(true, lt("AA.DD",
"AA.EE")),
?assertMatch(true, not lt("1.0",
"1.0.0")),
?assertMatch(true, not lt("1.0.0.0",
"1")),
?assertMatch(true, not lt("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, not lt("AA.DD", "AA.CC")),
?assertMatch(true, not lt("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, not lt("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, not lt("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, not lt("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, not lt("1.0.0-pre-alpha.14",
"1.0.0-pre-alpha.3")),
?assertMatch(true, not lt("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, not lt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, not lt("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, not lt("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, not lt("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, not lt("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, not lt("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")).
gte_test() ->
?assertMatch(true, gte("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, gte("1",
"1.0.0")),
?assertMatch(true, gte("1.0",
"1.0.0")),
?assertMatch(true, gte("1.0.0",
"1")),
?assertMatch(true, gte("1.0.0.0",
"1")),
?assertMatch(true, gte("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, gte("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, gte("1.0.0-alpha.1+build.1",
"1.0.0.0-alpha.1+build.1")),
?assertMatch(true, gte("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, gte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha")),
?assertMatch(true, gte("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, gte("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, gte("aa.bb", "aa.bb")),
?assertMatch(true, gte("dd", "aa")),
?assertMatch(true, gte("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, gte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, gte("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, gte("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, gte("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, gte("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, gte("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gte("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, not gte("1.0.0-pre-alpha",
"1.0.0-pre-alpha.1")),
?assertMatch(true, not gte("CC", "DD")),
?assertMatch(true, not gte("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, not gte("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, not gte("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, not gte("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, not gte("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, not gte("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, not gte("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, not gte("1.0.0",
"1.0.0+build.1")),
?assertMatch(true, not gte("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, not gte("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")).
lte_test() ->
?assertMatch(true, lte("1.0.0-alpha",
"1.0.0-alpha.1")),
?assertMatch(true, lte("1.0.0-alpha.1",
"1.0.0-beta.2")),
?assertMatch(true, lte("1.0.0-beta.2",
"1.0.0-beta.11")),
?assertMatch(true, lte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha.11")),
?assertMatch(true, lte("1.0.0-beta.11",
"1.0.0-rc.1")),
?assertMatch(true, lte("1.0.0-rc.1",
"1.0.0-rc.1+build.1")),
?assertMatch(true, lte("1.0.0-rc.1+build.1",
"1.0.0")),
?assertMatch(true, lte("1.0.0",
"1.0.0+0.3.7")),
?assertMatch(true, lte("1.0.0+0.3.7",
"1.3.7+build")),
?assertMatch(true, lte("1.3.7+build",
"1.3.7+build.2.b8f12d7")),
?assertMatch(true, lte("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a")),
?assertMatch(true, lte("1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, lte("1",
"1.0.0")),
?assertMatch(true, lte("1.0",
"1.0.0")),
?assertMatch(true, lte("1.0.0",
"1")),
?assertMatch(true, lte("1.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, lte("1.0.0.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, lte("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, lte("aa","cc")),
?assertMatch(true, lte("cc","cc")),
?assertMatch(true, not lte("1.0.0-alpha.1",
"1.0.0-alpha")),
?assertMatch(true, not lte("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha")),
?assertMatch(true, not lte("cc", "aa")),
?assertMatch(true, not lte("1.0.0-beta.2",
"1.0.0-alpha.1")),
?assertMatch(true, not lte("1.0.0-beta.11",
"1.0.0-beta.2")),
?assertMatch(true, not lte("1.0.0-rc.1", "1.0.0-beta.11")),
?assertMatch(true, not lte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
?assertMatch(true, not lte("1.0.0", "1.0.0-rc.1+build.1")),
?assertMatch(true, not lte("1.0.0+0.3.7", "1.0.0")),
?assertMatch(true, not lte("1.3.7+build", "1.0.0+0.3.7")),
?assertMatch(true, not lte("1.3.7+build.2.b8f12d7",
"1.3.7+build")),
?assertMatch(true, not lte("1.3.7+build.11.e0f985a",
"1.3.7+build.2.b8f12d7")).
between_test() ->
?assertMatch(true, between("1.0.0-alpha",
"1.0.0-alpha.3",
"1.0.0-alpha.2")),
?assertMatch(true, between("1.0.0-alpha.1",
"1.0.0-beta.2",
"1.0.0-alpha.25")),
?assertMatch(true, between("1.0.0-beta.2",
"1.0.0-beta.11",
"1.0.0-beta.7")),
?assertMatch(true, between("1.0.0-pre-alpha.2",
"1.0.0-pre-alpha.11",
"1.0.0-pre-alpha.7")),
?assertMatch(true, between("1.0.0-beta.11",
"1.0.0-rc.3",
"1.0.0-rc.1")),
?assertMatch(true, between("1.0.0-rc.1",
"1.0.0-rc.1+build.3",
"1.0.0-rc.1+build.1")),
?assertMatch(true, between("1.0.0.0-rc.1",
"1.0.0-rc.1+build.3",
"1.0.0-rc.1+build.1")),
?assertMatch(true, between("1.0.0-rc.1+build.1",
"1.0.0",
"1.0.0-rc.33")),
?assertMatch(true, between("1.0.0",
"1.0.0+0.3.7",
"1.0.0+0.2")),
?assertMatch(true, between("1.0.0+0.3.7",
"1.3.7+build",
"1.2")),
?assertMatch(true, between("1.3.7+build",
"1.3.7+build.2.b8f12d7",
"1.3.7+build.1")),
?assertMatch(true, between("1.3.7+build.2.b8f12d7",
"1.3.7+build.11.e0f985a",
"1.3.7+build.10.a36faa")),
?assertMatch(true, between("1.0.0-alpha",
"1.0.0-alpha",
"1.0.0-alpha")),
?assertMatch(true, between("1",
"1.0.0",
"1.0.0")),
?assertMatch(true, between("1.0",
"1.0.0",
"1.0.0")),
?assertMatch(true, between("1.0",
"1.0.0.0",
"1.0.0.0")),
?assertMatch(true, between("1.0.0",
"1",
"1")),
?assertMatch(true, between("1.0+alpha.1",
"1.0.0+alpha.1",
"1.0.0+alpha.1")),
?assertMatch(true, between("1.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1",
"1.0.0-alpha.1+build.1")),
?assertMatch(true, between("aaa",
"ddd",
"cc")),
?assertMatch(true, not between("1.0.0-alpha.1",
"1.0.0-alpha.22",
"1.0.0")),
?assertMatch(true, not between("1.0.0-pre-alpha.1",
"1.0.0-pre-alpha.22",
"1.0.0")),
?assertMatch(true, not between("1.0.0",
"1.0.0-alpha.1",
"2.0")),
?assertMatch(true, not between("1.0.0-beta.1",
"1.0.0-beta.11",
"1.0.0-alpha")),
?assertMatch(true, not between("1.0.0-beta.11", "1.0.0-rc.1",
"1.0.0-rc.22")),
?assertMatch(true, not between("aaa", "ddd", "zzz")).
pes_test() ->
?assertMatch(true, pes("1.0.0-rc.0", "1.0.0-rc.0")),
?assertMatch(true, pes("1.0.0-rc.1", "1.0.0-rc.0")),
?assertMatch(true, pes("1.0.0", "1.0.0-rc.0")),
?assertMatch(false, pes("1.0.0-rc.0", "1.0.0-rc.1")),
?assertMatch(true, pes("2.6.0", "2.6")),
?assertMatch(true, pes("2.7", "2.6")),
?assertMatch(true, pes("2.8", "2.6")),
?assertMatch(true, pes("2.9", "2.6")),
?assertMatch(true, pes("A.B", "A.A")),
?assertMatch(true, not pes("3.0.0", "2.6")),
?assertMatch(true, not pes("2.5", "2.6")),
?assertMatch(true, pes("2.6.5", "2.6.5")),
?assertMatch(true, pes("2.6.6", "2.6.5")),
?assertMatch(true, pes("2.6.7", "2.6.5")),
?assertMatch(true, pes("2.6.8", "2.6.5")),
?assertMatch(true, pes("2.6.9", "2.6.5")),
?assertMatch(true, pes("2.6.0.9", "2.6.0.5")),
?assertMatch(true, not pes("2.7", "2.6.5")),
?assertMatch(true, not pes("2.1.7", "2.1.6.5")),
?assertMatch(true, not pes("A.A", "A.B")),
?assertMatch(true, not pes("2.5", "2.6.5")).
parse_test() ->
?assertEqual({1, {[],[]}}, parse(<<"1">>)),
?assertEqual({{1,2,34},{[],[]}}, parse(<<"1.2.34">>)),
?assertEqual({<<"a">>, {[],[]}}, parse(<<"a">>)),
?assertEqual({{<<"a">>,<<"b">>}, {[],[]}}, parse(<<"a.b">>)),
?assertEqual({1, {[],[]}}, parse(<<"1">>)),
?assertEqual({{1,2}, {[],[]}}, parse(<<"1.2">>)),
?assertEqual({{1,2,2}, {[],[]}}, parse(<<"1.2.2">>)),
?assertEqual({{1,99,2}, {[],[]}}, parse(<<"1.99.2">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>],[]}}, parse(<<"1.99.2-alpha">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>,1], []}}, parse(<<"1.99.2-alpha.1">>)),
?assertEqual({{1,99,2}, {[<<"pre-alpha">>,1], []}}, parse(<<"1.99.2-pre-alpha.1">>)),
?assertEqual({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}},
parse(<<"1.99.2+build.1.a36">>)),
?assertEqual({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}},
parse(<<"1.99.2.44+build.1.a36">>)),
?assertEqual({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}},
parse("1.99.2-alpha.1+build.1.a36")),
?assertEqual({{1,99,2}, {[<<"pre-alpha">>, 1], [<<"build">>, 1, <<"a36">>]}},
parse("1.99.2-pre-alpha.1+build.1.a36")).
version_format_test() ->
?assertEqual(["1", [], []], format({1, {[],[]}})),
?assertEqual(["1", ".", "2", ".", "34", [], []], format({{1,2,34},{[],[]}})),
?assertEqual(<<"a">>, erlang:iolist_to_binary(format({<<"a">>, {[],[]}}))),
?assertEqual(<<"a.b">>, erlang:iolist_to_binary(format({{<<"a">>,<<"b">>}, {[],[]}}))),
?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))),
?assertEqual(<<"1.2">>, erlang:iolist_to_binary(format({{1,2}, {[],[]}}))),
?assertEqual(<<"1.2.2">>, erlang:iolist_to_binary(format({{1,2,2}, {[],[]}}))),
?assertEqual(<<"1.99.2">>, erlang:iolist_to_binary(format({{1,99,2}, {[],[]}}))),
?assertEqual(<<"1.99.2-alpha">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>],[]}}))),
?assertEqual(<<"1.99.2-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>,1], []}}))),
?assertEqual(<<"1.99.2-pre-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"pre-alpha">>,1], []}}))),
?assertEqual(<<"1.99.2+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2.44+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2-alpha.1+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1.99.2-pre-alpha.1+build.1.a36">>,
erlang:iolist_to_binary(format({{1,99,2}, {[<<"pre-alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))).
-endif.
|
96a1b1a269e21da74001c5a3efae9dd492d4f01603af794fe07725e8230ce81a | SamB/coq | pptactic.mli | (************************************************************************)
v * The Coq Proof Assistant / The Coq Development Team
< O _ _ _ , , * CNRS - Ecole Polytechnique - INRIA Futurs - Universite Paris Sud
\VV/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(*i $Id$ i*)
open Pp
open Genarg
open Tacexpr
open Pretyping
open Proof_type
open Topconstr
open Rawterm
open Ppextend
open Environ
open Evd
val pr_or_var : ('a -> std_ppcmds) -> 'a or_var -> std_ppcmds
val pr_or_metaid : ('a -> std_ppcmds) -> 'a or_metaid -> std_ppcmds
val pr_and_short_name : ('a -> std_ppcmds) -> 'a and_short_name -> std_ppcmds
val pr_or_by_notation : ('a -> std_ppcmds) -> 'a or_by_notation -> std_ppcmds
type 'a raw_extra_genarg_printer =
(constr_expr -> std_ppcmds) ->
(constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
type 'a glob_extra_genarg_printer =
(rawconstr_and_expr -> std_ppcmds) ->
(rawconstr_and_expr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
type 'a extra_genarg_printer =
(Term.constr -> std_ppcmds) ->
(Term.constr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
(* if the boolean is false then the extension applies only to old syntax *)
val declare_extra_genarg_pprule :
('c raw_abstract_argument_type * 'c raw_extra_genarg_printer) ->
('a glob_abstract_argument_type * 'a glob_extra_genarg_printer) ->
('b typed_abstract_argument_type * 'b extra_genarg_printer) -> unit
type grammar_terminals = string option list
(* if the boolean is false then the extension applies only to old syntax *)
val declare_extra_tactic_pprule :
string * argument_type list * (int * grammar_terminals) -> unit
val exists_extra_tactic_pprule : string -> argument_type list -> bool
val pr_raw_generic :
(constr_expr -> std_ppcmds) ->
(constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) ->
(Libnames.reference -> std_ppcmds) -> constr_expr generic_argument ->
std_ppcmds
val pr_raw_extend:
(constr_expr -> std_ppcmds) -> (constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) -> int ->
string -> raw_generic_argument list -> std_ppcmds
val pr_glob_extend:
(rawconstr_and_expr -> std_ppcmds) -> (rawconstr_and_expr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) -> int ->
string -> glob_generic_argument list -> std_ppcmds
val pr_extend :
(open_constr -> std_ppcmds) -> (open_constr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) -> int ->
string -> typed_generic_argument list -> std_ppcmds
val pr_raw_tactic : env -> raw_tactic_expr -> std_ppcmds
val pr_raw_tactic_level : env -> tolerability -> raw_tactic_expr -> std_ppcmds
val pr_glob_tactic : env -> glob_tactic_expr -> std_ppcmds
val pr_tactic : env -> Proof_type.tactic_expr -> std_ppcmds
val pr_hintbases : string list option -> std_ppcmds
val pr_auto_using : ('constr -> std_ppcmds) -> 'constr list -> std_ppcmds
val pr_bindings :
('constr -> std_ppcmds) ->
('constr -> std_ppcmds) -> 'constr bindings -> std_ppcmds
| null | https://raw.githubusercontent.com/SamB/coq/8f84aba9ae83a4dc43ea6e804227ae8cae8086b1/parsing/pptactic.mli | ocaml | **********************************************************************
// * This file is distributed under the terms of the
* GNU Lesser General Public License Version 2.1
**********************************************************************
i $Id$ i
if the boolean is false then the extension applies only to old syntax
if the boolean is false then the extension applies only to old syntax | v * The Coq Proof Assistant / The Coq Development Team
< O _ _ _ , , * CNRS - Ecole Polytechnique - INRIA Futurs - Universite Paris Sud
\VV/ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
open Pp
open Genarg
open Tacexpr
open Pretyping
open Proof_type
open Topconstr
open Rawterm
open Ppextend
open Environ
open Evd
val pr_or_var : ('a -> std_ppcmds) -> 'a or_var -> std_ppcmds
val pr_or_metaid : ('a -> std_ppcmds) -> 'a or_metaid -> std_ppcmds
val pr_and_short_name : ('a -> std_ppcmds) -> 'a and_short_name -> std_ppcmds
val pr_or_by_notation : ('a -> std_ppcmds) -> 'a or_by_notation -> std_ppcmds
type 'a raw_extra_genarg_printer =
(constr_expr -> std_ppcmds) ->
(constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
type 'a glob_extra_genarg_printer =
(rawconstr_and_expr -> std_ppcmds) ->
(rawconstr_and_expr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
type 'a extra_genarg_printer =
(Term.constr -> std_ppcmds) ->
(Term.constr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) ->
'a -> std_ppcmds
val declare_extra_genarg_pprule :
('c raw_abstract_argument_type * 'c raw_extra_genarg_printer) ->
('a glob_abstract_argument_type * 'a glob_extra_genarg_printer) ->
('b typed_abstract_argument_type * 'b extra_genarg_printer) -> unit
type grammar_terminals = string option list
val declare_extra_tactic_pprule :
string * argument_type list * (int * grammar_terminals) -> unit
val exists_extra_tactic_pprule : string -> argument_type list -> bool
val pr_raw_generic :
(constr_expr -> std_ppcmds) ->
(constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) ->
(Libnames.reference -> std_ppcmds) -> constr_expr generic_argument ->
std_ppcmds
val pr_raw_extend:
(constr_expr -> std_ppcmds) -> (constr_expr -> std_ppcmds) ->
(tolerability -> raw_tactic_expr -> std_ppcmds) -> int ->
string -> raw_generic_argument list -> std_ppcmds
val pr_glob_extend:
(rawconstr_and_expr -> std_ppcmds) -> (rawconstr_and_expr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) -> int ->
string -> glob_generic_argument list -> std_ppcmds
val pr_extend :
(open_constr -> std_ppcmds) -> (open_constr -> std_ppcmds) ->
(tolerability -> glob_tactic_expr -> std_ppcmds) -> int ->
string -> typed_generic_argument list -> std_ppcmds
val pr_raw_tactic : env -> raw_tactic_expr -> std_ppcmds
val pr_raw_tactic_level : env -> tolerability -> raw_tactic_expr -> std_ppcmds
val pr_glob_tactic : env -> glob_tactic_expr -> std_ppcmds
val pr_tactic : env -> Proof_type.tactic_expr -> std_ppcmds
val pr_hintbases : string list option -> std_ppcmds
val pr_auto_using : ('constr -> std_ppcmds) -> 'constr list -> std_ppcmds
val pr_bindings :
('constr -> std_ppcmds) ->
('constr -> std_ppcmds) -> 'constr bindings -> std_ppcmds
|
f5fd74d5062763dd0de35180d13769e74df2cffa455a5ecf8bfcc6a034bf4288 | bobzhang/fan | gcomb.mli |
(** the output is reversed, you have to reverse the list output
if you care about the order *)
val slist0 :
f:('a list -> 'b) ->
('c Streamf.t -> 'a) -> 'c Streamf.t -> 'b
val slist1 :
f:('a list -> 'b) ->
('c Streamf.t -> 'a) -> 'c Streamf.t -> 'b
val slist0sep :
err:('a -> string) ->
f:('b list -> 'c) ->
('d Streamf.t -> 'b) ->
('d Streamf.t -> 'a) -> 'd Streamf.t -> 'c
val slist1sep :
err:('a -> string) ->
f:('b list -> 'c) ->
('d Streamf.t -> 'b) ->
('d Streamf.t -> 'a) -> 'd Streamf.t -> 'c
val tryp : ('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
val peek : ('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
val orp :
?msg:string ->
('a Streamf.t -> 'b) ->
('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
| null | https://raw.githubusercontent.com/bobzhang/fan/7ed527d96c5a006da43d3813f32ad8a5baa31b7f/src/treeparser/gcomb.mli | ocaml | * the output is reversed, you have to reverse the list output
if you care about the order |
val slist0 :
f:('a list -> 'b) ->
('c Streamf.t -> 'a) -> 'c Streamf.t -> 'b
val slist1 :
f:('a list -> 'b) ->
('c Streamf.t -> 'a) -> 'c Streamf.t -> 'b
val slist0sep :
err:('a -> string) ->
f:('b list -> 'c) ->
('d Streamf.t -> 'b) ->
('d Streamf.t -> 'a) -> 'd Streamf.t -> 'c
val slist1sep :
err:('a -> string) ->
f:('b list -> 'c) ->
('d Streamf.t -> 'b) ->
('d Streamf.t -> 'a) -> 'd Streamf.t -> 'c
val tryp : ('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
val peek : ('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
val orp :
?msg:string ->
('a Streamf.t -> 'b) ->
('a Streamf.t -> 'b) -> 'a Streamf.t -> 'b
|
09e370c546dc4816e9c90751c91f8a9e5ee8025a73717f8cd1e23b0197056599 | schemedoc/ffi-cookbook | time-chicken.scm | (import (chicken foreign))
(define (libc-time)
((foreign-lambda unsigned-long "time" (c-pointer void)) #f))
(display (libc-time))
(newline)
| null | https://raw.githubusercontent.com/schemedoc/ffi-cookbook/75d3594135b5a4c5deea9a064a1aef5a95312f85/libc/time-chicken.scm | scheme | (import (chicken foreign))
(define (libc-time)
((foreign-lambda unsigned-long "time" (c-pointer void)) #f))
(display (libc-time))
(newline)
| |
358f40ec98a31f46c2722e00627292732f71937a0b31458d87a009d09c13a334 | mekispeter/haskell2019spring | Natural.hs |
Functional Programming for Logicians , 2019 Spring
May 13 Session
Functional Programming for Logicians, 2019 Spring
May 13 Session
-}
module Natural where
-- Instead of using Int or Integer for indices, which allow for negative
-- numbers, we define our own version of natural numbers. The type definition
-- is straightforward, but we need to instantiate quite a lot of classes.
data Natural = Zero | Succ Natural
deriving (Eq, Ord)
instance Enum Natural where
succ = Succ
pred Zero = error
"Zero has no predecessor!"
pred (Succ n) = n
fromEnum Zero = 0
fromEnum (Succ n) = fromEnum n + 1
toEnum n
| n < 0 = error
"No negative naturals!"
| n == 0 = Zero
| otherwise = Succ $ toEnum (n-1)
instance Num Natural where
n + Zero = n
n + Succ m = Succ (n + m)
n * Zero = Zero
n * Succ m = n * m + n
n - Zero = n
n - Succ m
| n <= m = error "No negative naturals!"
| otherwise = pred (n - m)
abs n = n
signum Zero = Zero
signum n = Succ Zero
fromInteger n
| n < 0 = error
"No negative naturals!"
| n == 0 = Zero
| otherwise = Succ $ fromInteger (n-1)
instance Show Natural where
show n = show $ fromEnum n
| null | https://raw.githubusercontent.com/mekispeter/haskell2019spring/18fa81092cdc7a15c8ef92a2eff8274dfe79d00d/src/haskell_may13/Natural.hs | haskell | Instead of using Int or Integer for indices, which allow for negative
numbers, we define our own version of natural numbers. The type definition
is straightforward, but we need to instantiate quite a lot of classes. |
Functional Programming for Logicians , 2019 Spring
May 13 Session
Functional Programming for Logicians, 2019 Spring
May 13 Session
-}
module Natural where
data Natural = Zero | Succ Natural
deriving (Eq, Ord)
instance Enum Natural where
succ = Succ
pred Zero = error
"Zero has no predecessor!"
pred (Succ n) = n
fromEnum Zero = 0
fromEnum (Succ n) = fromEnum n + 1
toEnum n
| n < 0 = error
"No negative naturals!"
| n == 0 = Zero
| otherwise = Succ $ toEnum (n-1)
instance Num Natural where
n + Zero = n
n + Succ m = Succ (n + m)
n * Zero = Zero
n * Succ m = n * m + n
n - Zero = n
n - Succ m
| n <= m = error "No negative naturals!"
| otherwise = pred (n - m)
abs n = n
signum Zero = Zero
signum n = Succ Zero
fromInteger n
| n < 0 = error
"No negative naturals!"
| n == 0 = Zero
| otherwise = Succ $ fromInteger (n-1)
instance Show Natural where
show n = show $ fromEnum n
|
2e9dc51dd2b123909229b352b7a1241d5c26334fac0805a7df506bcb0a2d133b | RefactoringTools/HaRe | GhcUtilsSpec.hs | # LANGUAGE ScopedTypeVariables #
# LANGUAGE CPP #
module GhcUtilsSpec (main, spec) where
import Test.Hspec
import TestUtils
import qualified GHC as GHC
import qualified Data . Generics as SYB
import qualified GHC.SYB.Utils as SYB
import Language . Haskell . GHC.ExactPrint . Utils
import Language . Haskell . Refact . Utils . Binds
import Language . Haskell . Refact . Utils . GhcUtils
import Language . Haskell . Refact . Utils . GhcVersionSpecific
import Language . Haskell . Refact . Utils . Monad
import Language . Haskell . Refact . Utils . MonadFunctions
import Language . Haskell . Refact . Utils . TypeUtils
import Language . Haskell . Refact . Utils . Utils
import Language . Haskell . Refact . Utils . Variables
import TestUtils
-- ---------------------------------------------------------------------
main :: IO ()
main = do
hspec spec
spec :: Spec
spec = do
describe "nothing happening here" $ do
it "need to delete this" $ do
"a" `shouldBe` "a"
describe " onelayerStaged " $ do
it " only descends one layer into a structure " $ do
let s ' = ( 2,[3,4],5 ) : : ( Int,[Int],Int )
let
worker ' ( i::Int ) = [ i ]
let = onelayerStaged [ ] ( [ ] ` SYB.mkQ ` worker ' ) s '
let g1 = SYB.gmapQ ( [ ] ` SYB.mkQ ` worker ' ) s '
let = SYB.gmapQl ( + + ) [ ] ( [ ] ` SYB.mkQ ` worker ' ) s '
( show ) ` shouldBe ` " [ [ 2],[],[5 ] ] "
( show g1 ) ` shouldBe ` " [ [ 2],[],[5 ] ] "
( show ) ` shouldBe ` " [ 2,5 ] "
-- ---------------------------------
it " Finds a GHC.Name at top level only " $ do
let
comp = do
parseSourceFileGhc " ./DupDef / Dd1.hs "
renamed < - getRefactRenamed
parsed < - getRefactParsed
let mn = locToRdrName ( 4,1 ) parsed
let Just ( ln'@(GHC.L l _ ) ) = mn
n = rdrName2NamePure ln '
ln = GHC.L l n
let mx = locToRdrName ( 4,10 ) parsed
let ( Just ( lx'@(GHC.L l2 _ ) ) ) = mx
x = rdrName2NamePure nm lx '
lx = GHC.L l2 x
let = hsBinds renamed
duplicatedDecls = definingDeclsNames [ n ] declsr True False
res = findEntity ln duplicatedDecls
res2 = findEntity n duplicatedDecls
resx = findEntity lx duplicatedDecls
resx2 = findEntity x ( nn::GHC.Name ) = [ showGhc nn ]
g = onelayerStaged SYB.Renamer [ " -1 " ] ( [ " -10 " ] ` SYB.mkQ ` worker ) duplicatedDecls
# if _ _ GLASGOW_HASKELL _ _ < = 710
worker2 ( ( GHC.L _ ( GHC.FunBind ( GHC.L _ n ' ) _ _ _ _ _ ) ): : ( GHC.HsBind GHC.Name ) )
# else
worker2 ( ( GHC.L _ ( GHC.FunBind ( GHC.L _ n ' ) _ _ _ _ ) ): : ( GHC.HsBind GHC.Name ) )
# endif
| n = = n ' = [ " found " ]
worker2 _ = [ ]
g2 = onelayerStaged [ " -1 " ] ( [ " -10 " ] ` SYB.mkQ ` worker2 ) duplicatedDecls
return ( res , res2,resx , , , g2,ln , lx )
( ( r , , rx2,d , gg , ) < - ct $ runRefactGhc comp initialState testOptions
-- ( SYB.showData SYB.Renamer 0 d ) ` shouldBe ` " "
( showGhcQual d ) ` shouldBe ` " [ DupDef.Dd1.toplevel x = DupDef . Dd1.c GHC.Num . * x ] "
( showGhcQual _ l ) ` shouldBe ` " DupDef.Dd1.toplevel "
( showGhc _ x ) ` shouldBe ` " x "
( show gg ) ` shouldBe ` " [ [ \"-10\"],[\"-10\ " ] ] "
( show gg2 ) ` shouldBe ` " [ [ \"found\"],[\"-10\ " ] ] "
r ` shouldBe ` True
r2 ` shouldBe ` True
rx ` shouldBe ` False
rx2 ` shouldBe ` True
describe "onelayerStaged" $ do
it "only descends one layer into a structure" $ do
let s' = (2,[3,4],5) :: (Int,[Int],Int)
let
worker' (i::Int) = [i]
let g = onelayerStaged SYB.Renamer [] ([] `SYB.mkQ` worker') s'
let g1 = SYB.gmapQ ([] `SYB.mkQ` worker') s'
let g2 = SYB.gmapQl (++) [] ([] `SYB.mkQ` worker') s'
(show g) `shouldBe` "[[2],[],[5]]"
(show g1) `shouldBe` "[[2],[],[5]]"
(show g2) `shouldBe` "[2,5]"
-- ---------------------------------
it "Finds a GHC.Name at top level only" $ do
let
comp = do
parseSourceFileGhc "./DupDef/Dd1.hs"
renamed <- getRefactRenamed
parsed <- getRefactParsed
nm <- getRefactNameMap
let mn = locToRdrName (4,1) parsed
let Just (ln'@(GHC.L l _)) = mn
n = rdrName2NamePure nm ln'
ln = GHC.L l n
let mx = locToRdrName (4,10) parsed
let (Just (lx'@(GHC.L l2 _))) = mx
x = rdrName2NamePure nm lx'
lx = GHC.L l2 x
let declsr = hsBinds renamed
duplicatedDecls = definingDeclsNames [n] declsr True False
res = findEntity ln duplicatedDecls
res2 = findEntity n duplicatedDecls
resx = findEntity lx duplicatedDecls
resx2 = findEntity x duplicatedDecls
worker (nn::GHC.Name) = [showGhc nn]
g = onelayerStaged SYB.Renamer ["-1"] (["-10"] `SYB.mkQ` worker) duplicatedDecls
#if __GLASGOW_HASKELL__ <= 710
worker2 ((GHC.L _ (GHC.FunBind (GHC.L _ n') _ _ _ _ _))::GHC.Located (GHC.HsBind GHC.Name))
#else
worker2 ((GHC.L _ (GHC.FunBind (GHC.L _ n') _ _ _ _))::GHC.Located (GHC.HsBind GHC.Name))
#endif
| n == n' = ["found"]
worker2 _ = []
g2 = onelayerStaged SYB.Renamer ["-1"] (["-10"] `SYB.mkQ` worker2) duplicatedDecls
return (res,res2,resx,resx2,duplicatedDecls,g,g2,ln,lx)
((r,r2,rx,rx2,d,gg,gg2,_l,_x),_s) <- ct $ runRefactGhc comp initialState testOptions
-- (SYB.showData SYB.Renamer 0 d) `shouldBe` ""
(showGhcQual d) `shouldBe` "[DupDef.Dd1.toplevel x = DupDef.Dd1.c GHC.Num.* x]"
(showGhcQual _l) `shouldBe` "DupDef.Dd1.toplevel"
(showGhc _x) `shouldBe` "x"
(show gg) `shouldBe` "[[\"-10\"],[\"-10\"]]"
(show gg2) `shouldBe` "[[\"found\"],[\"-10\"]]"
r `shouldBe` True
r2 `shouldBe` True
rx `shouldBe` False
rx2 `shouldBe` True
-}
-- ---------------------------------------------------------------------
| null | https://raw.githubusercontent.com/RefactoringTools/HaRe/ef5dee64c38fb104e6e5676095946279fbce381c/test/GhcUtilsSpec.hs | haskell | ---------------------------------------------------------------------
---------------------------------
( SYB.showData SYB.Renamer 0 d ) ` shouldBe ` " "
---------------------------------
(SYB.showData SYB.Renamer 0 d) `shouldBe` ""
--------------------------------------------------------------------- | # LANGUAGE ScopedTypeVariables #
# LANGUAGE CPP #
module GhcUtilsSpec (main, spec) where
import Test.Hspec
import TestUtils
import qualified GHC as GHC
import qualified Data . Generics as SYB
import qualified GHC.SYB.Utils as SYB
import Language . Haskell . GHC.ExactPrint . Utils
import Language . Haskell . Refact . Utils . Binds
import Language . Haskell . Refact . Utils . GhcUtils
import Language . Haskell . Refact . Utils . GhcVersionSpecific
import Language . Haskell . Refact . Utils . Monad
import Language . Haskell . Refact . Utils . MonadFunctions
import Language . Haskell . Refact . Utils . TypeUtils
import Language . Haskell . Refact . Utils . Utils
import Language . Haskell . Refact . Utils . Variables
import TestUtils
main :: IO ()
main = do
hspec spec
spec :: Spec
spec = do
describe "nothing happening here" $ do
it "need to delete this" $ do
"a" `shouldBe` "a"
describe " onelayerStaged " $ do
it " only descends one layer into a structure " $ do
let s ' = ( 2,[3,4],5 ) : : ( Int,[Int],Int )
let
worker ' ( i::Int ) = [ i ]
let = onelayerStaged [ ] ( [ ] ` SYB.mkQ ` worker ' ) s '
let g1 = SYB.gmapQ ( [ ] ` SYB.mkQ ` worker ' ) s '
let = SYB.gmapQl ( + + ) [ ] ( [ ] ` SYB.mkQ ` worker ' ) s '
( show ) ` shouldBe ` " [ [ 2],[],[5 ] ] "
( show g1 ) ` shouldBe ` " [ [ 2],[],[5 ] ] "
( show ) ` shouldBe ` " [ 2,5 ] "
it " Finds a GHC.Name at top level only " $ do
let
comp = do
parseSourceFileGhc " ./DupDef / Dd1.hs "
renamed < - getRefactRenamed
parsed < - getRefactParsed
let mn = locToRdrName ( 4,1 ) parsed
let Just ( ln'@(GHC.L l _ ) ) = mn
n = rdrName2NamePure ln '
ln = GHC.L l n
let mx = locToRdrName ( 4,10 ) parsed
let ( Just ( lx'@(GHC.L l2 _ ) ) ) = mx
x = rdrName2NamePure nm lx '
lx = GHC.L l2 x
let = hsBinds renamed
duplicatedDecls = definingDeclsNames [ n ] declsr True False
res = findEntity ln duplicatedDecls
res2 = findEntity n duplicatedDecls
resx = findEntity lx duplicatedDecls
resx2 = findEntity x ( nn::GHC.Name ) = [ showGhc nn ]
g = onelayerStaged SYB.Renamer [ " -1 " ] ( [ " -10 " ] ` SYB.mkQ ` worker ) duplicatedDecls
# if _ _ GLASGOW_HASKELL _ _ < = 710
worker2 ( ( GHC.L _ ( GHC.FunBind ( GHC.L _ n ' ) _ _ _ _ _ ) ): : ( GHC.HsBind GHC.Name ) )
# else
worker2 ( ( GHC.L _ ( GHC.FunBind ( GHC.L _ n ' ) _ _ _ _ ) ): : ( GHC.HsBind GHC.Name ) )
# endif
| n = = n ' = [ " found " ]
worker2 _ = [ ]
g2 = onelayerStaged [ " -1 " ] ( [ " -10 " ] ` SYB.mkQ ` worker2 ) duplicatedDecls
return ( res , res2,resx , , , g2,ln , lx )
( ( r , , rx2,d , gg , ) < - ct $ runRefactGhc comp initialState testOptions
( showGhcQual d ) ` shouldBe ` " [ DupDef.Dd1.toplevel x = DupDef . Dd1.c GHC.Num . * x ] "
( showGhcQual _ l ) ` shouldBe ` " DupDef.Dd1.toplevel "
( showGhc _ x ) ` shouldBe ` " x "
( show gg ) ` shouldBe ` " [ [ \"-10\"],[\"-10\ " ] ] "
( show gg2 ) ` shouldBe ` " [ [ \"found\"],[\"-10\ " ] ] "
r ` shouldBe ` True
r2 ` shouldBe ` True
rx ` shouldBe ` False
rx2 ` shouldBe ` True
describe "onelayerStaged" $ do
it "only descends one layer into a structure" $ do
let s' = (2,[3,4],5) :: (Int,[Int],Int)
let
worker' (i::Int) = [i]
let g = onelayerStaged SYB.Renamer [] ([] `SYB.mkQ` worker') s'
let g1 = SYB.gmapQ ([] `SYB.mkQ` worker') s'
let g2 = SYB.gmapQl (++) [] ([] `SYB.mkQ` worker') s'
(show g) `shouldBe` "[[2],[],[5]]"
(show g1) `shouldBe` "[[2],[],[5]]"
(show g2) `shouldBe` "[2,5]"
it "Finds a GHC.Name at top level only" $ do
let
comp = do
parseSourceFileGhc "./DupDef/Dd1.hs"
renamed <- getRefactRenamed
parsed <- getRefactParsed
nm <- getRefactNameMap
let mn = locToRdrName (4,1) parsed
let Just (ln'@(GHC.L l _)) = mn
n = rdrName2NamePure nm ln'
ln = GHC.L l n
let mx = locToRdrName (4,10) parsed
let (Just (lx'@(GHC.L l2 _))) = mx
x = rdrName2NamePure nm lx'
lx = GHC.L l2 x
let declsr = hsBinds renamed
duplicatedDecls = definingDeclsNames [n] declsr True False
res = findEntity ln duplicatedDecls
res2 = findEntity n duplicatedDecls
resx = findEntity lx duplicatedDecls
resx2 = findEntity x duplicatedDecls
worker (nn::GHC.Name) = [showGhc nn]
g = onelayerStaged SYB.Renamer ["-1"] (["-10"] `SYB.mkQ` worker) duplicatedDecls
#if __GLASGOW_HASKELL__ <= 710
worker2 ((GHC.L _ (GHC.FunBind (GHC.L _ n') _ _ _ _ _))::GHC.Located (GHC.HsBind GHC.Name))
#else
worker2 ((GHC.L _ (GHC.FunBind (GHC.L _ n') _ _ _ _))::GHC.Located (GHC.HsBind GHC.Name))
#endif
| n == n' = ["found"]
worker2 _ = []
g2 = onelayerStaged SYB.Renamer ["-1"] (["-10"] `SYB.mkQ` worker2) duplicatedDecls
return (res,res2,resx,resx2,duplicatedDecls,g,g2,ln,lx)
((r,r2,rx,rx2,d,gg,gg2,_l,_x),_s) <- ct $ runRefactGhc comp initialState testOptions
(showGhcQual d) `shouldBe` "[DupDef.Dd1.toplevel x = DupDef.Dd1.c GHC.Num.* x]"
(showGhcQual _l) `shouldBe` "DupDef.Dd1.toplevel"
(showGhc _x) `shouldBe` "x"
(show gg) `shouldBe` "[[\"-10\"],[\"-10\"]]"
(show gg2) `shouldBe` "[[\"found\"],[\"-10\"]]"
r `shouldBe` True
r2 `shouldBe` True
rx `shouldBe` False
rx2 `shouldBe` True
-}
|
1dd8bf6c568276bcad68dd6f066b2db9de1bfd7d78eec223620ac2b791d08c8b | logsem/mitten_preorder | syntax.ml | open Sexplib
open Mode_theory
type uni_level = int
type t =
DeBruijn indices for variables
BINDS
BINDS 2
BINDS
BINDS
BINDS
| Uni of uni_level
| TyMod of m * t
| Mod of m * t
BINDS
| Axiom of string * t
type envhead =
| Ty of t
| Mo of m
type env = envhead list
exception Illformed
let rec nth lst id =
match lst with
| [] -> failwith "syntax shift mistake, context too short?"
| x :: xs -> if Int.equal id 0 then x else nth xs (id - 1)
let rec env_length lst =
match lst with
| [] -> 0
| Ty _ :: xs -> (env_length xs) + 1
| Mo _ :: xs -> (env_length xs)
let find_idx ~equal key xs =
let rec go i = function
| [] -> None
| x :: xs ->
if equal key x then Some i else go (i + 1) xs in
go 0 xs
let to_sexp env t =
let counter = ref 0 in
let rec int_of_syn = function
| Zero -> Some 0
| Suc t ->
begin
match int_of_syn t with
| Some i -> Some (i + 1)
| None -> None
end
| _ -> None in
let rec go env = function
(* need pp for cells to pretty print variables also for non trivial cells *)
| Var i -> if i >= List.length env
then Sexp.Atom ("free" ^ string_of_int i)
else List.nth env i
| Nat -> Sexp.Atom "Nat"
| Let (def, body) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List
[Sexp.Atom "let";
Sexp.List [var; go env def];
go (var :: env) body]
| Check (term, tp) -> Sexp.List [Sexp.Atom "check"; go env term; go env tp]
| Zero -> Sexp.Atom "zero"
| Suc t ->
begin
match int_of_syn t with
| Some i -> Sexp.Atom (string_of_int (i + 1))
| None -> Sexp.List [Sexp.Atom "suc"; go env t]
end
| NRec (motive, zero, suc, n) ->
incr counter;
let mvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let suc_var1 = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let suc_var2 = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List
[Sexp.Atom "nrec";
Sexp.List [mvar; go (mvar :: env) motive];
go env zero;
Sexp.List [suc_var1; suc_var2; go (suc_var2 :: suc_var1 :: env) suc];
go env n]
| Pi (mu, src, dest) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "Pi"; mod_to_sexp mu; go env src; Sexp.List [var; Sexp.Atom "->"; go (var :: env) dest]]
| Lam t ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "lam"; Sexp.List [var; go (var :: env) t]]
| Ap (mu, t1, t2) ->
Sexp.List [Sexp.Atom "ap"; mod_to_sexp mu; go env t1; go env t2]
| Sig (fst, snd) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "Sig"; go env fst; Sexp.List [var; go (var :: env) snd]]
| Pair (t1, t2) ->
Sexp.List [Sexp.Atom "pair"; go env t1; go env t2]
| Fst t -> Sexp.List [Sexp.Atom "fst"; go env t]
| Snd t -> Sexp.List [Sexp.Atom "snd"; go env t]
| Uni i -> Sexp.List [Sexp.Atom "U"; Sexp.Atom (string_of_int i)]
| TyMod (mu, tp) -> Sexp.List [Sexp.Atom "<"; mod_to_sexp mu; Sexp.Atom "|"; go env tp; Sexp.Atom ">"]
| Mod (mu, tm) -> Sexp.List [Sexp.Atom "mod"; mod_to_sexp mu; go env tm]
| Letmod (mu, nu, tymot, deptm, tm) ->
incr counter;
let mvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let tm_var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "let"; mod_to_sexp mu; Sexp.Atom "mod"; mod_to_sexp nu; Sexp.Atom "<-"; go env tm ; Sexp.Atom "in"; Sexp.List [go (tm_var :: env) deptm]; Sexp.Atom "at"; go (mvar :: env) tymot]
| Id (ty, le, ri) -> Sexp.List [Sexp.Atom "Id"; go env ty; go env le; go env ri]
| Refl term -> Sexp.List [Sexp.Atom "Refl"; go env term]
| J (mot, refltm, eq) ->
incr counter;
let rivar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let levar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let prfvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "J"; go (prfvar :: levar :: rivar :: env) mot; go (levar :: env) refltm; go env eq]
| Axiom (str, _) -> Sexp.Atom str in
go env t
let pp t = to_sexp [] t |> Sexp.to_string_hum
| null | https://raw.githubusercontent.com/logsem/mitten_preorder/54cd337c4b0f5fbb01cc80e10c123c59ca74dc57/src/lib/syntax.ml | ocaml | need pp for cells to pretty print variables also for non trivial cells | open Sexplib
open Mode_theory
type uni_level = int
type t =
DeBruijn indices for variables
BINDS
BINDS 2
BINDS
BINDS
BINDS
| Uni of uni_level
| TyMod of m * t
| Mod of m * t
BINDS
| Axiom of string * t
type envhead =
| Ty of t
| Mo of m
type env = envhead list
exception Illformed
let rec nth lst id =
match lst with
| [] -> failwith "syntax shift mistake, context too short?"
| x :: xs -> if Int.equal id 0 then x else nth xs (id - 1)
let rec env_length lst =
match lst with
| [] -> 0
| Ty _ :: xs -> (env_length xs) + 1
| Mo _ :: xs -> (env_length xs)
let find_idx ~equal key xs =
let rec go i = function
| [] -> None
| x :: xs ->
if equal key x then Some i else go (i + 1) xs in
go 0 xs
let to_sexp env t =
let counter = ref 0 in
let rec int_of_syn = function
| Zero -> Some 0
| Suc t ->
begin
match int_of_syn t with
| Some i -> Some (i + 1)
| None -> None
end
| _ -> None in
let rec go env = function
| Var i -> if i >= List.length env
then Sexp.Atom ("free" ^ string_of_int i)
else List.nth env i
| Nat -> Sexp.Atom "Nat"
| Let (def, body) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List
[Sexp.Atom "let";
Sexp.List [var; go env def];
go (var :: env) body]
| Check (term, tp) -> Sexp.List [Sexp.Atom "check"; go env term; go env tp]
| Zero -> Sexp.Atom "zero"
| Suc t ->
begin
match int_of_syn t with
| Some i -> Sexp.Atom (string_of_int (i + 1))
| None -> Sexp.List [Sexp.Atom "suc"; go env t]
end
| NRec (motive, zero, suc, n) ->
incr counter;
let mvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let suc_var1 = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let suc_var2 = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List
[Sexp.Atom "nrec";
Sexp.List [mvar; go (mvar :: env) motive];
go env zero;
Sexp.List [suc_var1; suc_var2; go (suc_var2 :: suc_var1 :: env) suc];
go env n]
| Pi (mu, src, dest) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "Pi"; mod_to_sexp mu; go env src; Sexp.List [var; Sexp.Atom "->"; go (var :: env) dest]]
| Lam t ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "lam"; Sexp.List [var; go (var :: env) t]]
| Ap (mu, t1, t2) ->
Sexp.List [Sexp.Atom "ap"; mod_to_sexp mu; go env t1; go env t2]
| Sig (fst, snd) ->
incr counter;
let var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "Sig"; go env fst; Sexp.List [var; go (var :: env) snd]]
| Pair (t1, t2) ->
Sexp.List [Sexp.Atom "pair"; go env t1; go env t2]
| Fst t -> Sexp.List [Sexp.Atom "fst"; go env t]
| Snd t -> Sexp.List [Sexp.Atom "snd"; go env t]
| Uni i -> Sexp.List [Sexp.Atom "U"; Sexp.Atom (string_of_int i)]
| TyMod (mu, tp) -> Sexp.List [Sexp.Atom "<"; mod_to_sexp mu; Sexp.Atom "|"; go env tp; Sexp.Atom ">"]
| Mod (mu, tm) -> Sexp.List [Sexp.Atom "mod"; mod_to_sexp mu; go env tm]
| Letmod (mu, nu, tymot, deptm, tm) ->
incr counter;
let mvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let tm_var = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "let"; mod_to_sexp mu; Sexp.Atom "mod"; mod_to_sexp nu; Sexp.Atom "<-"; go env tm ; Sexp.Atom "in"; Sexp.List [go (tm_var :: env) deptm]; Sexp.Atom "at"; go (mvar :: env) tymot]
| Id (ty, le, ri) -> Sexp.List [Sexp.Atom "Id"; go env ty; go env le; go env ri]
| Refl term -> Sexp.List [Sexp.Atom "Refl"; go env term]
| J (mot, refltm, eq) ->
incr counter;
let rivar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let levar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
incr counter;
let prfvar = Sexp.Atom ("x" ^ string_of_int (! counter)) in
Sexp.List [Sexp.Atom "J"; go (prfvar :: levar :: rivar :: env) mot; go (levar :: env) refltm; go env eq]
| Axiom (str, _) -> Sexp.Atom str in
go env t
let pp t = to_sexp [] t |> Sexp.to_string_hum
|
81ce9337b7d7c36683ae4ac2a3f710d97ecd5a8b359a8f8f837480af4059a6df | input-output-hk/cardano-sl | PollSpec.hs | # LANGUAGE RecordWildCards #
-- | Specification for submodules of Pos.Chain.Update
module Test.Pos.Update.PollSpec
( spec
) where
import Universum
import Control.Lens (at)
import qualified Data.HashSet as HS
import Test.Hspec (Spec, describe)
import Test.Hspec.QuickCheck (modifyMaxSuccess, prop)
import Test.QuickCheck (Arbitrary (..), Gen, Property, conjoin,
forAll, listOf, suchThat, (===))
import Test.QuickCheck.Arbitrary.Generic (genericArbitrary,
genericShrink)
import Pos.Chain.Update (ApplicationName, BlockVersion (..),
BlockVersionData (..), SoftwareVersion (..), UpId,
UpdateProposal (..), applyBVM)
import qualified Pos.Chain.Update as Poll
import Pos.Core (StakeholderId, addressHash)
import Pos.Crypto (hash)
import qualified Pos.DB.Update as Poll
import Pos.Infra.Slotting.Types (SlottingData)
import qualified Pos.Util.Modifier as MM
import Test.Pos.Binary.Helpers ()
import Test.Pos.Chain.Update.Arbitrary ()
import Test.Pos.DB.Update.Arbitrary ()
import Test.Pos.Util.QuickCheck.Property (formsMonoid)
spec :: Spec
spec = describe "Poll" $ do
let smaller n = modifyMaxSuccess (const n)
describe "modifyPollModifier" $ smaller 30 $ do
prop
"poll modifiers form a commutative monoid under 'modifyPollModifier'"
modifyPollFormsMonoid
describe "PollState" $ smaller 30 $ do
prop
"applying two poll modifiers in sequence to the poll state is equivalent\
\ to combining them and applying the resulting modifier"
modifyPollStateWithModifiers
describe "PurePoll" $ smaller 30 $ do
prop
"applying a series of modifications to a modifier and then applying it to\
\ a poll state is the same as applying the modifications directly to the\
\ poll state"
applyActions
prop "Adding and then deleting a block version's state to 'PollState' is\
\ equivalent to doing nothing"
putDelBVState
prop "Setting and then deleting the last confirmed version of an application\
\ is equivalent to doing nothing"
setDeleteConfirmedSV
prop "Adding and then deleting a confirmed proposal is the same as doing\
\ nothing"
addDeleteConfirmedProposal
prop "Inserting an active proposal and then deleting it is the same as doing\
\ nothing"
insertDeleteProposal
modifyPollFormsMonoid
:: Poll.PollModifier
-> Poll.PollModifier
-> Poll.PollModifier
-> Property
modifyPollFormsMonoid = formsMonoid
modifyPollStateWithModifiers
:: Poll.PollState
-> Poll.PollModifier
-> Poll.PollModifier
-> Property
modifyPollStateWithModifiers pst pm1 pm2 =
Poll.modifyPollState pm2 (Poll.modifyPollState pm1 pst) ===
Poll.modifyPollState (pm1 <> pm2) pst
data PollAction
= PutBVState BlockVersion Poll.BlockVersionState
| DelBVState BlockVersion
| SetAdoptedBV BlockVersion
| SetLastConfirmedSV SoftwareVersion
| DelConfirmedSV ApplicationName
| AddConfirmedProposal Poll.ConfirmedProposalState
| DelConfirmedProposal SoftwareVersion
| InsertActiveProposal Poll.ProposalState
| DeactivateProposal UpId
| SetSlottingData SlottingData
| SetEpochProposers (HashSet StakeholderId)
deriving (Show, Eq, Generic)
instance Arbitrary PollAction where
arbitrary = genericArbitrary
shrink = genericShrink
actionToMonad :: Poll.MonadPoll m => PollAction -> m ()
actionToMonad (PutBVState bv bvs) = Poll.putBVState bv bvs
actionToMonad (DelBVState bv) = Poll.delBVState bv
actionToMonad (SetAdoptedBV bv) = Poll.setAdoptedBV bv
actionToMonad (SetLastConfirmedSV sv) = Poll.setLastConfirmedSV sv
actionToMonad (DelConfirmedSV an) = Poll.delConfirmedSV an
actionToMonad (AddConfirmedProposal cps) = Poll.addConfirmedProposal cps
actionToMonad (DelConfirmedProposal sv) = Poll.delConfirmedProposal sv
actionToMonad (InsertActiveProposal ps) = Poll.insertActiveProposal ps
actionToMonad (DeactivateProposal ui) = Poll.deactivateProposal ui
actionToMonad (SetSlottingData sd) = Poll.setSlottingData sd
actionToMonad (SetEpochProposers hs) = Poll.setEpochProposers hs
applyActionToModifier
:: PollAction
-> Poll.PollState
-> Poll.PollModifier
-> Poll.PollModifier
applyActionToModifier (PutBVState bv bvs) _ = Poll.pmBVsL %~ MM.insert bv bvs
applyActionToModifier (DelBVState bv) _ = Poll.pmBVsL %~ MM.delete bv
applyActionToModifier (SetAdoptedBV bv) pst = \pm -> do
let adoptedBVData = snd $
fromMaybe (pst ^. Poll.psAdoptedBV) (Poll.pmAdoptedBVFull pm)
case MM.lookup innerLookupFun bv (Poll.pmBVs pm) of
Nothing -> pm
Just (Poll.bvsModifier -> bvm) ->
pm { Poll.pmAdoptedBVFull = Just (bv, applyBVM bvm adoptedBVData) }
where
innerLookupFun k = pst ^. Poll.psBlockVersions . at k
applyActionToModifier (SetLastConfirmedSV SoftwareVersion {..}) _ =
Poll.pmConfirmedL %~ MM.insert svAppName svNumber
applyActionToModifier (DelConfirmedSV an) _ = Poll.pmConfirmedL %~ MM.delete an
applyActionToModifier (AddConfirmedProposal cps) _ =
Poll.pmConfirmedPropsL %~ MM.insert (Poll.cpsSoftwareVersion cps) cps
applyActionToModifier (DelConfirmedProposal sv) _ = Poll.pmConfirmedPropsL %~ MM.delete sv
applyActionToModifier (InsertActiveProposal ps) pst = \p ->
let up@UnsafeUpdateProposal{..} = Poll.psProposal ps
upId = hash up
p' = case MM.lookup innerLookupFun upId (Poll.pmActiveProps p) of
Nothing -> p
Just _ -> p & Poll.pmEpochProposersL %~ fmap (HS.insert (addressHash upFrom))
in p' & (Poll.pmActivePropsL %~ MM.insert upId ps)
where
innerLookupFun k = pst ^. Poll.psActiveProposals . at k
applyActionToModifier (DeactivateProposal ui) pst = \p ->
let proposal = MM.lookup innerLookupFun ui (Poll.pmActiveProps p)
in case proposal of
Nothing -> p
Just ps ->
let up = Poll.psProposal ps
upId = hash up
in p & (Poll.pmActivePropsL %~ MM.delete upId)
where
innerLookupFun k = pst ^. Poll.psActiveProposals . at k
applyActionToModifier (SetSlottingData sd) _ = Poll.pmSlottingDataL .~ (Just sd)
applyActionToModifier (SetEpochProposers hs) _ = Poll.pmEpochProposersL .~ (Just hs)
applyActions :: Poll.PollState -> [PollAction] -> Property
applyActions ps actionList =
let pollSts = fmap (actionToMonad @Poll.PurePoll) actionList
-- 'resultModifiers' has a 'mempty' poll modifier up front, so 'newPollStates'
has two ' ps 's in the head of the list . As such another ' ps ' is added
-- at the head of 'resultPStates' to make up for that.
resultModifiers =
scanl (\pmod act -> applyActionToModifier act ps pmod) mempty actionList
resultPStates = ps : scanl Poll.execPurePollWithLogger ps pollSts
newPollStates = scanl (flip Poll.modifyPollState) ps resultModifiers
in conjoin $ zipWith (===) resultPStates newPollStates
-- | Type synonym used for convenience.
type PollStateTestInfo = (BlockVersion, BlockVersionData)
| Empty ' PollState ' to be used in tests . Since all fields of the datatype except
the second ( psAdoptedBV ) have an instance for ' Monoid ' , it is passed as an argument
-- that each property will supply.
emptyPollSt :: PollStateTestInfo -> Poll.PollState
emptyPollSt bvInfo = Poll.PollState
mempty
bvInfo
mempty
mempty
mempty
mempty
mempty
mempty
mempty
mempty
| Apply a sequence of ' PollAction 's from left to right .
perform :: [PollAction] -> Poll.PurePoll ()
perform = foldl (>>) (return ()) . map actionToMonad
-- | Operational equivalence operator in the 'PurePoll' monad. To be used when
equivalence between two sequences of actions in ' PurePoll ' is to be tested / proved .
(==^)
:: [PollAction]
-> [PollAction]
-> Gen PollAction
-> PollStateTestInfo
-> Property
p1 ==^ p2 = \prefixGen bvInfo ->
forAll ((listOf prefixGen) :: Gen [PollAction]) $ \prefix ->
forAll (arbitrary :: Gen [PollAction]) $ \suffix ->
let applyAction x =
Poll.execPurePollWithLogger (emptyPollSt bvInfo)
(perform $ prefix ++ x ++ suffix)
in applyAction p1 === applyAction p2
A note on the following tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The reason these tests have to pass a custom generator for the prefix of the action list
to ' (= = ^ ) ' is that in each case , there is a particular sequence of actions for which
the property does not hold . Using the next test as an example :
Let ' bvs , bvs ´ : : BlockVersionState ' such that ' bvs /= bvs ´ ' . This sequence of actions
in the ' PurePoll ' monad :
[ PutBVState bv bvs ´ , PutBVState bv bvs , DelBVState bv ]
is not , in operational semantics terms , equal to the sequence
[ PutBVState bv bvs ´ ]
It is instead equivalent to
[ ]
Because these actions are performed from left to right , performing an insertion with the
same key twice in a row without deleting it in between those two insertions means only
the last insertion actually matters for these tests .
As such , prefixes with an insertion with the same key as the action being tested in the
property will cause it to fail .
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The reason these tests have to pass a custom generator for the prefix of the action list
to '(==^)' is that in each case, there is a particular sequence of actions for which
the property does not hold. Using the next test as an example:
Let 'bvs, bvs´ :: BlockVersionState' such that 'bvs /= bvs´'. This sequence of actions
in the 'PurePoll' monad:
[PutBVState bv bvs´, PutBVState bv bvs, DelBVState bv]
is not, in operational semantics terms, equal to the sequence
[PutBVState bv bvs´]
It is instead equivalent to
[]
Because these actions are performed from left to right, performing an insertion with the
same key twice in a row without deleting it in between those two insertions means only
the last insertion actually matters for these tests.
As such, prefixes with an insertion with the same key as the action being tested in the
property will cause it to fail.
-}
putDelBVState
:: BlockVersion
-> Poll.BlockVersionState
-> PollStateTestInfo
-> Property
putDelBVState bv bvs =
let actionPrefixGen = arbitrary `suchThat` (\case
PutBVState bv' _ -> bv' /= bv
_ -> True)
in ([PutBVState bv bvs, DelBVState bv] ==^ []) actionPrefixGen
setDeleteConfirmedSV
:: SoftwareVersion
-> PollStateTestInfo
-> Property
setDeleteConfirmedSV sv =
let appName = svAppName sv
actionPrefixGen = arbitrary `suchThat` (\case
SetLastConfirmedSV sv' -> svAppName sv' /= appName
_ -> True)
in ([SetLastConfirmedSV sv, DelConfirmedSV appName] ==^ []) actionPrefixGen
addDeleteConfirmedProposal
:: Poll.ConfirmedProposalState
-> PollStateTestInfo
-> Property
addDeleteConfirmedProposal cps =
let softwareVersion = Poll.cpsSoftwareVersion cps
actionPrefixGen = arbitrary `suchThat` (\case
AddConfirmedProposal cps' -> Poll.cpsSoftwareVersion cps' /= softwareVersion
_ -> True)
in ([AddConfirmedProposal cps, DelConfirmedProposal softwareVersion] ==^
[]) actionPrefixGen
insertDeleteProposal
:: Poll.ProposalState
-> PollStateTestInfo
-> Property
insertDeleteProposal ps =
let getUpId p = hash $ Poll.psProposal p
upId = getUpId ps
actionPrefixGen = arbitrary `suchThat` (\case
InsertActiveProposal ps' -> upId /= getUpId ps'
_ -> True)
in ([InsertActiveProposal ps, DeactivateProposal upId] ==^ [])
actionPrefixGen
| null | https://raw.githubusercontent.com/input-output-hk/cardano-sl/1499214d93767b703b9599369a431e67d83f10a2/lib/test/Test/Pos/Update/PollSpec.hs | haskell | | Specification for submodules of Pos.Chain.Update
'resultModifiers' has a 'mempty' poll modifier up front, so 'newPollStates'
at the head of 'resultPStates' to make up for that.
| Type synonym used for convenience.
that each property will supply.
| Operational equivalence operator in the 'PurePoll' monad. To be used when | # LANGUAGE RecordWildCards #
module Test.Pos.Update.PollSpec
( spec
) where
import Universum
import Control.Lens (at)
import qualified Data.HashSet as HS
import Test.Hspec (Spec, describe)
import Test.Hspec.QuickCheck (modifyMaxSuccess, prop)
import Test.QuickCheck (Arbitrary (..), Gen, Property, conjoin,
forAll, listOf, suchThat, (===))
import Test.QuickCheck.Arbitrary.Generic (genericArbitrary,
genericShrink)
import Pos.Chain.Update (ApplicationName, BlockVersion (..),
BlockVersionData (..), SoftwareVersion (..), UpId,
UpdateProposal (..), applyBVM)
import qualified Pos.Chain.Update as Poll
import Pos.Core (StakeholderId, addressHash)
import Pos.Crypto (hash)
import qualified Pos.DB.Update as Poll
import Pos.Infra.Slotting.Types (SlottingData)
import qualified Pos.Util.Modifier as MM
import Test.Pos.Binary.Helpers ()
import Test.Pos.Chain.Update.Arbitrary ()
import Test.Pos.DB.Update.Arbitrary ()
import Test.Pos.Util.QuickCheck.Property (formsMonoid)
spec :: Spec
spec = describe "Poll" $ do
let smaller n = modifyMaxSuccess (const n)
describe "modifyPollModifier" $ smaller 30 $ do
prop
"poll modifiers form a commutative monoid under 'modifyPollModifier'"
modifyPollFormsMonoid
describe "PollState" $ smaller 30 $ do
prop
"applying two poll modifiers in sequence to the poll state is equivalent\
\ to combining them and applying the resulting modifier"
modifyPollStateWithModifiers
describe "PurePoll" $ smaller 30 $ do
prop
"applying a series of modifications to a modifier and then applying it to\
\ a poll state is the same as applying the modifications directly to the\
\ poll state"
applyActions
prop "Adding and then deleting a block version's state to 'PollState' is\
\ equivalent to doing nothing"
putDelBVState
prop "Setting and then deleting the last confirmed version of an application\
\ is equivalent to doing nothing"
setDeleteConfirmedSV
prop "Adding and then deleting a confirmed proposal is the same as doing\
\ nothing"
addDeleteConfirmedProposal
prop "Inserting an active proposal and then deleting it is the same as doing\
\ nothing"
insertDeleteProposal
modifyPollFormsMonoid
:: Poll.PollModifier
-> Poll.PollModifier
-> Poll.PollModifier
-> Property
modifyPollFormsMonoid = formsMonoid
modifyPollStateWithModifiers
:: Poll.PollState
-> Poll.PollModifier
-> Poll.PollModifier
-> Property
modifyPollStateWithModifiers pst pm1 pm2 =
Poll.modifyPollState pm2 (Poll.modifyPollState pm1 pst) ===
Poll.modifyPollState (pm1 <> pm2) pst
data PollAction
= PutBVState BlockVersion Poll.BlockVersionState
| DelBVState BlockVersion
| SetAdoptedBV BlockVersion
| SetLastConfirmedSV SoftwareVersion
| DelConfirmedSV ApplicationName
| AddConfirmedProposal Poll.ConfirmedProposalState
| DelConfirmedProposal SoftwareVersion
| InsertActiveProposal Poll.ProposalState
| DeactivateProposal UpId
| SetSlottingData SlottingData
| SetEpochProposers (HashSet StakeholderId)
deriving (Show, Eq, Generic)
instance Arbitrary PollAction where
arbitrary = genericArbitrary
shrink = genericShrink
actionToMonad :: Poll.MonadPoll m => PollAction -> m ()
actionToMonad (PutBVState bv bvs) = Poll.putBVState bv bvs
actionToMonad (DelBVState bv) = Poll.delBVState bv
actionToMonad (SetAdoptedBV bv) = Poll.setAdoptedBV bv
actionToMonad (SetLastConfirmedSV sv) = Poll.setLastConfirmedSV sv
actionToMonad (DelConfirmedSV an) = Poll.delConfirmedSV an
actionToMonad (AddConfirmedProposal cps) = Poll.addConfirmedProposal cps
actionToMonad (DelConfirmedProposal sv) = Poll.delConfirmedProposal sv
actionToMonad (InsertActiveProposal ps) = Poll.insertActiveProposal ps
actionToMonad (DeactivateProposal ui) = Poll.deactivateProposal ui
actionToMonad (SetSlottingData sd) = Poll.setSlottingData sd
actionToMonad (SetEpochProposers hs) = Poll.setEpochProposers hs
applyActionToModifier
:: PollAction
-> Poll.PollState
-> Poll.PollModifier
-> Poll.PollModifier
applyActionToModifier (PutBVState bv bvs) _ = Poll.pmBVsL %~ MM.insert bv bvs
applyActionToModifier (DelBVState bv) _ = Poll.pmBVsL %~ MM.delete bv
applyActionToModifier (SetAdoptedBV bv) pst = \pm -> do
let adoptedBVData = snd $
fromMaybe (pst ^. Poll.psAdoptedBV) (Poll.pmAdoptedBVFull pm)
case MM.lookup innerLookupFun bv (Poll.pmBVs pm) of
Nothing -> pm
Just (Poll.bvsModifier -> bvm) ->
pm { Poll.pmAdoptedBVFull = Just (bv, applyBVM bvm adoptedBVData) }
where
innerLookupFun k = pst ^. Poll.psBlockVersions . at k
applyActionToModifier (SetLastConfirmedSV SoftwareVersion {..}) _ =
Poll.pmConfirmedL %~ MM.insert svAppName svNumber
applyActionToModifier (DelConfirmedSV an) _ = Poll.pmConfirmedL %~ MM.delete an
applyActionToModifier (AddConfirmedProposal cps) _ =
Poll.pmConfirmedPropsL %~ MM.insert (Poll.cpsSoftwareVersion cps) cps
applyActionToModifier (DelConfirmedProposal sv) _ = Poll.pmConfirmedPropsL %~ MM.delete sv
applyActionToModifier (InsertActiveProposal ps) pst = \p ->
let up@UnsafeUpdateProposal{..} = Poll.psProposal ps
upId = hash up
p' = case MM.lookup innerLookupFun upId (Poll.pmActiveProps p) of
Nothing -> p
Just _ -> p & Poll.pmEpochProposersL %~ fmap (HS.insert (addressHash upFrom))
in p' & (Poll.pmActivePropsL %~ MM.insert upId ps)
where
innerLookupFun k = pst ^. Poll.psActiveProposals . at k
applyActionToModifier (DeactivateProposal ui) pst = \p ->
let proposal = MM.lookup innerLookupFun ui (Poll.pmActiveProps p)
in case proposal of
Nothing -> p
Just ps ->
let up = Poll.psProposal ps
upId = hash up
in p & (Poll.pmActivePropsL %~ MM.delete upId)
where
innerLookupFun k = pst ^. Poll.psActiveProposals . at k
applyActionToModifier (SetSlottingData sd) _ = Poll.pmSlottingDataL .~ (Just sd)
applyActionToModifier (SetEpochProposers hs) _ = Poll.pmEpochProposersL .~ (Just hs)
applyActions :: Poll.PollState -> [PollAction] -> Property
applyActions ps actionList =
let pollSts = fmap (actionToMonad @Poll.PurePoll) actionList
has two ' ps 's in the head of the list . As such another ' ps ' is added
resultModifiers =
scanl (\pmod act -> applyActionToModifier act ps pmod) mempty actionList
resultPStates = ps : scanl Poll.execPurePollWithLogger ps pollSts
newPollStates = scanl (flip Poll.modifyPollState) ps resultModifiers
in conjoin $ zipWith (===) resultPStates newPollStates
type PollStateTestInfo = (BlockVersion, BlockVersionData)
| Empty ' PollState ' to be used in tests . Since all fields of the datatype except
the second ( psAdoptedBV ) have an instance for ' Monoid ' , it is passed as an argument
emptyPollSt :: PollStateTestInfo -> Poll.PollState
emptyPollSt bvInfo = Poll.PollState
mempty
bvInfo
mempty
mempty
mempty
mempty
mempty
mempty
mempty
mempty
| Apply a sequence of ' PollAction 's from left to right .
perform :: [PollAction] -> Poll.PurePoll ()
perform = foldl (>>) (return ()) . map actionToMonad
equivalence between two sequences of actions in ' PurePoll ' is to be tested / proved .
(==^)
:: [PollAction]
-> [PollAction]
-> Gen PollAction
-> PollStateTestInfo
-> Property
p1 ==^ p2 = \prefixGen bvInfo ->
forAll ((listOf prefixGen) :: Gen [PollAction]) $ \prefix ->
forAll (arbitrary :: Gen [PollAction]) $ \suffix ->
let applyAction x =
Poll.execPurePollWithLogger (emptyPollSt bvInfo)
(perform $ prefix ++ x ++ suffix)
in applyAction p1 === applyAction p2
A note on the following tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The reason these tests have to pass a custom generator for the prefix of the action list
to ' (= = ^ ) ' is that in each case , there is a particular sequence of actions for which
the property does not hold . Using the next test as an example :
Let ' bvs , bvs ´ : : BlockVersionState ' such that ' bvs /= bvs ´ ' . This sequence of actions
in the ' PurePoll ' monad :
[ PutBVState bv bvs ´ , PutBVState bv bvs , DelBVState bv ]
is not , in operational semantics terms , equal to the sequence
[ PutBVState bv bvs ´ ]
It is instead equivalent to
[ ]
Because these actions are performed from left to right , performing an insertion with the
same key twice in a row without deleting it in between those two insertions means only
the last insertion actually matters for these tests .
As such , prefixes with an insertion with the same key as the action being tested in the
property will cause it to fail .
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The reason these tests have to pass a custom generator for the prefix of the action list
to '(==^)' is that in each case, there is a particular sequence of actions for which
the property does not hold. Using the next test as an example:
Let 'bvs, bvs´ :: BlockVersionState' such that 'bvs /= bvs´'. This sequence of actions
in the 'PurePoll' monad:
[PutBVState bv bvs´, PutBVState bv bvs, DelBVState bv]
is not, in operational semantics terms, equal to the sequence
[PutBVState bv bvs´]
It is instead equivalent to
[]
Because these actions are performed from left to right, performing an insertion with the
same key twice in a row without deleting it in between those two insertions means only
the last insertion actually matters for these tests.
As such, prefixes with an insertion with the same key as the action being tested in the
property will cause it to fail.
-}
putDelBVState
:: BlockVersion
-> Poll.BlockVersionState
-> PollStateTestInfo
-> Property
putDelBVState bv bvs =
let actionPrefixGen = arbitrary `suchThat` (\case
PutBVState bv' _ -> bv' /= bv
_ -> True)
in ([PutBVState bv bvs, DelBVState bv] ==^ []) actionPrefixGen
setDeleteConfirmedSV
:: SoftwareVersion
-> PollStateTestInfo
-> Property
setDeleteConfirmedSV sv =
let appName = svAppName sv
actionPrefixGen = arbitrary `suchThat` (\case
SetLastConfirmedSV sv' -> svAppName sv' /= appName
_ -> True)
in ([SetLastConfirmedSV sv, DelConfirmedSV appName] ==^ []) actionPrefixGen
addDeleteConfirmedProposal
:: Poll.ConfirmedProposalState
-> PollStateTestInfo
-> Property
addDeleteConfirmedProposal cps =
let softwareVersion = Poll.cpsSoftwareVersion cps
actionPrefixGen = arbitrary `suchThat` (\case
AddConfirmedProposal cps' -> Poll.cpsSoftwareVersion cps' /= softwareVersion
_ -> True)
in ([AddConfirmedProposal cps, DelConfirmedProposal softwareVersion] ==^
[]) actionPrefixGen
insertDeleteProposal
:: Poll.ProposalState
-> PollStateTestInfo
-> Property
insertDeleteProposal ps =
let getUpId p = hash $ Poll.psProposal p
upId = getUpId ps
actionPrefixGen = arbitrary `suchThat` (\case
InsertActiveProposal ps' -> upId /= getUpId ps'
_ -> True)
in ([InsertActiveProposal ps, DeactivateProposal upId] ==^ [])
actionPrefixGen
|
46b04adca83f2988555f2f97ed23e520bccb0786e86cee9921af9fe63dc2aadf | arunisaac/ccwl | utils.scm | ;;; ccwl --- Concise Common Workflow Language
Copyright © 2021 , 2022 Arun Isaac < >
;;;
This file is part of ccwl .
;;;
;;; ccwl is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
;;; (at your option) any later version.
;;;
;;; ccwl is distributed in the hope that it will be useful, but
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;;; General Public License for more details.
;;;
You should have received a copy of the GNU General Public License
along with ccwl . If not , see < / > .
(use-modules (rnrs conditions)
(rnrs exceptions)
(srfi srfi-1)
(srfi srfi-64)
(srfi srfi-71)
(ccwl conditions)
(ccwl utils))
(define plist-ref
(@@ (ccwl utils) plist-ref))
(test-begin "utils")
(test-equal "pairify"
'((1 . 2) (3 . 4) (5 . 6))
(pairify (list 1 2 3 4 5 6)))
(test-equal "plist-ref"
2
(plist-ref (list #:spam 1 #:ham 2 #:eggs 3)
#:ham))
(test-equal "plist-ref with absent key"
#f
(plist-ref (list #:spam 1 #:ham 2 #:eggs 3)
#:foo))
(test-equal "group-keyword-arguments"
'(#:spam 1 #:ham (1 2 3) #:eggs (0))
((@@ (ccwl utils) group-keyword-arguments)
(list #:spam 1 #:ham 1 2 3 #:eggs 0)
(list #:spam)))
;; We cannot use test-equal to compare syntax objects, since
;; test-equal does not preserve the lexical contexts of the test
;; expressions.
(test-assert "unsyntax-keywords"
(equal? (list #:ham #'1 #:eggs #'2)
((@@ (ccwl utils) unsyntax-keywords)
(list #'#:ham #'1 #'#:eggs #'2))))
(test-equal "lambda**"
'(1 2 123 (1 2 3))
((lambda** (a b #:key foo #:key* bar)
(list a b foo bar))
1 2 #:foo 123 #:bar 1 2 3))
(test-equal "lambda** with default values"
'(1 2 123 9 (321 456) (7) (3 2 1))
((lambda** (foo aal #:key vale (pal 9) #:key* naal (irandu 7) (sol 3 2 1))
(list foo aal vale pal naal irandu sol))
1 2 #:vale 123 #:naal 321 456))
(test-equal "default default value of lambda** unary argument should be #f"
#f
((lambda** (#:key foo)
foo)))
(test-equal "default default value of lambda** n-ary argument should be the empty list"
'()
((lambda** (#:key* foo)
foo)))
(test-assert "lambda** should raise an &unrecognized-keyword-assertion on unrecognized keywords in arguments with syntax objects as irritants"
(guard (exception
(else (and (unrecognized-keyword-assertion? exception)
;; We check with NOT keyword? because we have no
;; way of directly checking for syntax?.
(not (any keyword? (condition-irritants exception))))))
(macroexpand
'(lambda** (#:key foo #:foo bar)
foo))))
(test-equal "Allow other keys in lambda**"
1
((lambda** (#:key foo #:allow-other-keys)
foo)
#:foo 1 #:bar 2))
(test-assert "Unrecognized keyword argument passed to lambda** should raise an &unrecognized-keyword-assertion condition"
(guard (exception
(else (unrecognized-keyword-assertion? exception)))
((lambda** (spam ham #:key eggs)
spam)
1 2 #:foo 123)))
(test-assert "Unary lambda** keyword argument passed multiple arguments should raise an &invalid-keyword-arity-assertion condition"
(guard (exception
(else (invalid-keyword-arity-assertion? exception)))
((lambda** (spam ham #:key eggs)
(list spam ham eggs))
1 2 #:eggs 123 345)))
(test-assert "Wrong number of positional arguments to lambda** should raise an &invalid-positional-arguments-arity-assertion condition"
(guard (exception
(else (invalid-positional-arguments-arity-assertion? exception)))
((lambda** (spam ham #:key eggs)
spam)
1 #:eggs 123)))
(test-assert "syntax-lambda**"
(equal? (list #'1 #'2 #'123 (list #'1 #'2 #'3))
((syntax-lambda** (a b #:key foo #:key* bar)
(list a b foo bar))
#'1 #'2 #'#:foo #'123 #'#:bar #'1 #'2 #'3)))
(test-assert "syntax-lambda** with default values"
(equal? (list #'1 #'2 #'123 9 #'(321 456) '(7) '(3 2 1))
((syntax-lambda** (foo aal #:key vale (pal 9) #:key* naal (irandu 7) (sol 3 2 1))
(list foo aal vale pal naal irandu sol))
#'1 #'2 #'#:vale #'123 #'#:naal #'321 #'456)))
(test-equal "default default value of syntax-lambda** unary argument should be #f"
#f
((syntax-lambda** (#:key foo)
foo)))
(test-equal "default default value of syntax-lambda** n-ary argument should be the empty list"
'()
((syntax-lambda** (#:key* foo)
foo)))
;; We cannot use test-equal to compare syntax objects, since
;; test-equal does not preserve the lexical contexts of the test
;; expressions.
(test-assert "Allow other keys in syntax-lambda**"
(equal? #'1
((syntax-lambda** (#:key foo #:allow-other-keys)
foo)
#'#:foo #'1 #'#:bar #'2)))
(test-assert "syntax-lambda** should raise an &unrecognized-keyword-assertion on unrecognized keywords in arguments"
(guard (exception
(else (unrecognized-keyword-assertion? exception)))
(macroexpand
'(syntax-lambda** (#:key foo #:foo bar)
foo))))
(test-assert "Unrecognized keyword argument passed to syntax-lambda** should raise an &unrecognized-keyword-assertion condition with syntax objects as irritants"
(guard (exception
(else (and (unrecognized-keyword-assertion? exception)
;; We check with NOT keyword? because we have no
;; way of directly checking for syntax?.
(not (any keyword? (condition-irritants exception))))))
((syntax-lambda** (spam ham #:key eggs)
spam)
#'1 #'2 #'#:foo #'123)))
(test-assert "Unary syntax-lambda** keyword argument passed multiple arguments should raise an &invalid-keyword-arity-assertion condition"
(guard (exception
(else (and (invalid-keyword-arity-assertion? exception)
;; We check with NOT keyword? because we have no
;; way of directly checking for syntax?.
(not (any keyword? (condition-irritants exception))))))
((syntax-lambda** (spam ham #:key eggs)
(list spam ham eggs))
#'1 #'2 #'#:eggs #'123 #'345)))
(test-assert "Wrong number of positional arguments to syntax-lambda** should raise an &invalid-positional-arguments-arity-assertion condition"
(guard (exception
(else (invalid-positional-arguments-arity-assertion? exception)))
((syntax-lambda** (spam ham #:key eggs)
spam)
#'1 #'#:eggs #'123)))
(test-equal "filter-mapi"
'(1 3 5 7 9)
(filter-mapi (lambda (item index)
(and (even? index)
(1+ item)))
(iota 10)))
(test-equal "mapn"
'((0 1 4 9 16)
(0 1 8 27 64))
(let ((squares cubes (mapn (lambda (n)
(values (expt n 2)
(expt n 3)))
(iota 5))))
(list squares cubes)))
(test-equal "append-mapn"
'((0 0 1 1 2 4 3 9 4 16)
(0 0 1 1 2 8 3 27 4 64))
(let ((squares cubes (append-mapn (lambda (n)
(values (list n (expt n 2))
(list n (expt n 3))))
(iota 5))))
(list squares cubes)))
(test-equal "foldn"
'(45 285)
(let ((sum sum-of-squares
(foldn (lambda (n sum sum-of-squares)
(values (+ sum n)
(+ sum-of-squares (expt n 2))))
(iota 10)
0 0)))
(list sum sum-of-squares)))
(test-end "utils")
| null | https://raw.githubusercontent.com/arunisaac/ccwl/b2e3a9fd8b3c0c2a76684a78aaff80a759641120/tests/utils.scm | scheme | ccwl --- Concise Common Workflow Language
ccwl is free software: you can redistribute it and/or modify it
(at your option) any later version.
ccwl is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
We cannot use test-equal to compare syntax objects, since
test-equal does not preserve the lexical contexts of the test
expressions.
We check with NOT keyword? because we have no
way of directly checking for syntax?.
We cannot use test-equal to compare syntax objects, since
test-equal does not preserve the lexical contexts of the test
expressions.
We check with NOT keyword? because we have no
way of directly checking for syntax?.
We check with NOT keyword? because we have no
way of directly checking for syntax?. | Copyright © 2021 , 2022 Arun Isaac < >
This file is part of ccwl .
under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 3 of the License , or
You should have received a copy of the GNU General Public License
along with ccwl . If not , see < / > .
(use-modules (rnrs conditions)
(rnrs exceptions)
(srfi srfi-1)
(srfi srfi-64)
(srfi srfi-71)
(ccwl conditions)
(ccwl utils))
(define plist-ref
(@@ (ccwl utils) plist-ref))
(test-begin "utils")
(test-equal "pairify"
'((1 . 2) (3 . 4) (5 . 6))
(pairify (list 1 2 3 4 5 6)))
(test-equal "plist-ref"
2
(plist-ref (list #:spam 1 #:ham 2 #:eggs 3)
#:ham))
(test-equal "plist-ref with absent key"
#f
(plist-ref (list #:spam 1 #:ham 2 #:eggs 3)
#:foo))
(test-equal "group-keyword-arguments"
'(#:spam 1 #:ham (1 2 3) #:eggs (0))
((@@ (ccwl utils) group-keyword-arguments)
(list #:spam 1 #:ham 1 2 3 #:eggs 0)
(list #:spam)))
(test-assert "unsyntax-keywords"
(equal? (list #:ham #'1 #:eggs #'2)
((@@ (ccwl utils) unsyntax-keywords)
(list #'#:ham #'1 #'#:eggs #'2))))
(test-equal "lambda**"
'(1 2 123 (1 2 3))
((lambda** (a b #:key foo #:key* bar)
(list a b foo bar))
1 2 #:foo 123 #:bar 1 2 3))
(test-equal "lambda** with default values"
'(1 2 123 9 (321 456) (7) (3 2 1))
((lambda** (foo aal #:key vale (pal 9) #:key* naal (irandu 7) (sol 3 2 1))
(list foo aal vale pal naal irandu sol))
1 2 #:vale 123 #:naal 321 456))
(test-equal "default default value of lambda** unary argument should be #f"
#f
((lambda** (#:key foo)
foo)))
(test-equal "default default value of lambda** n-ary argument should be the empty list"
'()
((lambda** (#:key* foo)
foo)))
(test-assert "lambda** should raise an &unrecognized-keyword-assertion on unrecognized keywords in arguments with syntax objects as irritants"
(guard (exception
(else (and (unrecognized-keyword-assertion? exception)
(not (any keyword? (condition-irritants exception))))))
(macroexpand
'(lambda** (#:key foo #:foo bar)
foo))))
(test-equal "Allow other keys in lambda**"
1
((lambda** (#:key foo #:allow-other-keys)
foo)
#:foo 1 #:bar 2))
(test-assert "Unrecognized keyword argument passed to lambda** should raise an &unrecognized-keyword-assertion condition"
(guard (exception
(else (unrecognized-keyword-assertion? exception)))
((lambda** (spam ham #:key eggs)
spam)
1 2 #:foo 123)))
(test-assert "Unary lambda** keyword argument passed multiple arguments should raise an &invalid-keyword-arity-assertion condition"
(guard (exception
(else (invalid-keyword-arity-assertion? exception)))
((lambda** (spam ham #:key eggs)
(list spam ham eggs))
1 2 #:eggs 123 345)))
(test-assert "Wrong number of positional arguments to lambda** should raise an &invalid-positional-arguments-arity-assertion condition"
(guard (exception
(else (invalid-positional-arguments-arity-assertion? exception)))
((lambda** (spam ham #:key eggs)
spam)
1 #:eggs 123)))
(test-assert "syntax-lambda**"
(equal? (list #'1 #'2 #'123 (list #'1 #'2 #'3))
((syntax-lambda** (a b #:key foo #:key* bar)
(list a b foo bar))
#'1 #'2 #'#:foo #'123 #'#:bar #'1 #'2 #'3)))
(test-assert "syntax-lambda** with default values"
(equal? (list #'1 #'2 #'123 9 #'(321 456) '(7) '(3 2 1))
((syntax-lambda** (foo aal #:key vale (pal 9) #:key* naal (irandu 7) (sol 3 2 1))
(list foo aal vale pal naal irandu sol))
#'1 #'2 #'#:vale #'123 #'#:naal #'321 #'456)))
(test-equal "default default value of syntax-lambda** unary argument should be #f"
#f
((syntax-lambda** (#:key foo)
foo)))
(test-equal "default default value of syntax-lambda** n-ary argument should be the empty list"
'()
((syntax-lambda** (#:key* foo)
foo)))
(test-assert "Allow other keys in syntax-lambda**"
(equal? #'1
((syntax-lambda** (#:key foo #:allow-other-keys)
foo)
#'#:foo #'1 #'#:bar #'2)))
(test-assert "syntax-lambda** should raise an &unrecognized-keyword-assertion on unrecognized keywords in arguments"
(guard (exception
(else (unrecognized-keyword-assertion? exception)))
(macroexpand
'(syntax-lambda** (#:key foo #:foo bar)
foo))))
(test-assert "Unrecognized keyword argument passed to syntax-lambda** should raise an &unrecognized-keyword-assertion condition with syntax objects as irritants"
(guard (exception
(else (and (unrecognized-keyword-assertion? exception)
(not (any keyword? (condition-irritants exception))))))
((syntax-lambda** (spam ham #:key eggs)
spam)
#'1 #'2 #'#:foo #'123)))
(test-assert "Unary syntax-lambda** keyword argument passed multiple arguments should raise an &invalid-keyword-arity-assertion condition"
(guard (exception
(else (and (invalid-keyword-arity-assertion? exception)
(not (any keyword? (condition-irritants exception))))))
((syntax-lambda** (spam ham #:key eggs)
(list spam ham eggs))
#'1 #'2 #'#:eggs #'123 #'345)))
(test-assert "Wrong number of positional arguments to syntax-lambda** should raise an &invalid-positional-arguments-arity-assertion condition"
(guard (exception
(else (invalid-positional-arguments-arity-assertion? exception)))
((syntax-lambda** (spam ham #:key eggs)
spam)
#'1 #'#:eggs #'123)))
(test-equal "filter-mapi"
'(1 3 5 7 9)
(filter-mapi (lambda (item index)
(and (even? index)
(1+ item)))
(iota 10)))
(test-equal "mapn"
'((0 1 4 9 16)
(0 1 8 27 64))
(let ((squares cubes (mapn (lambda (n)
(values (expt n 2)
(expt n 3)))
(iota 5))))
(list squares cubes)))
(test-equal "append-mapn"
'((0 0 1 1 2 4 3 9 4 16)
(0 0 1 1 2 8 3 27 4 64))
(let ((squares cubes (append-mapn (lambda (n)
(values (list n (expt n 2))
(list n (expt n 3))))
(iota 5))))
(list squares cubes)))
(test-equal "foldn"
'(45 285)
(let ((sum sum-of-squares
(foldn (lambda (n sum sum-of-squares)
(values (+ sum n)
(+ sum-of-squares (expt n 2))))
(iota 10)
0 0)))
(list sum sum-of-squares)))
(test-end "utils")
|
c3b7271724b420ac584646302d5fadaec80f0c3b9fb7f61d4b232024c74f54b5 | ejgallego/coq-lsp | memo.ml | module CS = Stats
module Stats = struct
type 'a t =
{ res : 'a
; cache_hit : bool
; memory : int
; time : float
}
let make ?(cache_hit = false) ~time res =
(* This is quite slow! *)
(* let memory = Obj.magic res |> Obj.reachable_words in *)
let memory = 0 in
{ res; cache_hit; memory; time }
end
module CacheStats = struct
let nhit, ntotal = (ref 0, ref 0)
let reset () =
nhit := 0;
ntotal := 0
let hit () =
incr nhit;
incr ntotal
let miss () = incr ntotal
let stats () =
if !ntotal = 0 then "no stats"
else
let hit_rate =
Stdlib.Float.of_int !nhit /. Stdlib.Float.of_int !ntotal *. 100.0
in
Format.asprintf "cache hit rate: %3.2f" hit_rate
end
module Interp = struct
(* Loc-independent command evalution and caching. *)
module VernacInput = struct
type t = Coq.State.t * Coq.Ast.t
This crutially relies on our ppx to ignore the CAst location
let equal (st1, v1) (st2, v2) =
if Coq.Ast.compare v1 v2 = 0 then
if Coq.State.compare st1 st2 = 0 then true else false
else false
let hash (st, v) = Hashtbl.hash (Coq.Ast.hash v, st)
end
type t = VernacInput.t
let input_info (st, v) =
Format.asprintf "stm: %d | st %d" (Coq.Ast.hash v) (Hashtbl.hash st)
module HC = Hashtbl.Make (VernacInput)
module Result = struct
(* We store the location as to compute an offset for cached results *)
type t = Loc.t * Coq.State.t Coq.Interp.interp_result
end
type cache = Result.t HC.t
let cache : cache ref = ref (HC.create 1000)
let stats () = Obj.reachable_words (Obj.magic cache)
let in_cache st stm =
let kind = CS.Kind.Hashing in
CS.record ~kind ~f:(HC.find_opt !cache) (st, stm)
(* XXX: Move elsewhere *)
let loc_offset (l1 : Loc.t) (l2 : Loc.t) =
let line_offset = l2.line_nb - l1.line_nb in
let bol_offset = l2.bol_pos - l1.bol_pos in
let line_last_offset = l2.line_nb_last - l1.line_nb_last in
let bol_last_offset = l2.bol_pos_last - l1.bol_pos_last in
let bp_offset = l2.bp - l1.bp in
let ep_offset = l2.ep - l1.ep in
( line_offset
, bol_offset
, line_last_offset
, bol_last_offset
, bp_offset
, ep_offset )
let loc_apply_offset
( line_offset
, bol_offset
, line_last_offset
, bol_last_offset
, bp_offset
, ep_offset ) (loc : Loc.t) =
{ loc with
line_nb = loc.line_nb + line_offset
; bol_pos = loc.bol_pos + bol_offset
; line_nb_last = loc.line_nb_last + line_last_offset
; bol_pos_last = loc.bol_pos_last + bol_last_offset
; bp = loc.bp + bp_offset
; ep = loc.ep + ep_offset
}
let adjust_offset ~stm_loc ~cached_loc res =
let offset = loc_offset cached_loc stm_loc in
let f = loc_apply_offset offset in
Coq.Protect.E.map_loc ~f res
let eval (st, stm) : _ Stats.t =
let stm_loc = Coq.Ast.loc stm |> Option.get in
match in_cache st stm with
| Some (cached_loc, res), time ->
if Debug.cache then Io.Log.trace "memo" "cache hit";
CacheStats.hit ();
let res = adjust_offset ~stm_loc ~cached_loc res in
Stats.make ~cache_hit:true ~time res
| None, time_hash -> (
if Debug.cache then Io.Log.trace "memo" "cache miss";
CacheStats.miss ();
let kind = CS.Kind.Exec in
let res, time_interp = CS.record ~kind ~f:(Coq.Interp.interp ~st) stm in
let time = time_hash +. time_interp in
match res.r with
| Coq.Protect.R.Interrupted ->
(* Don't cache interruptions *)
Stats.make ~time res
| Coq.Protect.R.Completed _ ->
let () = HC.add !cache (st, stm) (stm_loc, res) in
let time = time_hash +. time_interp in
Stats.make ~time res)
end
module Admit = struct
type t = Coq.State.t
module C = Hashtbl.Make (Coq.State)
let cache = C.create 1000
let eval v =
match C.find_opt cache v with
| None ->
let admitted_st = Coq.State.admit ~st:v in
C.add cache v admitted_st;
admitted_st
| Some admitted_st -> admitted_st
end
module Init = struct
module S = struct
type t = Coq.State.t * Coq.Workspace.t * Lang.LUri.File.t
let equal (s1, w1, u1) (s2, w2, u2) : bool =
if Lang.LUri.File.compare u1 u2 = 0 then
if Coq.Workspace.compare w1 w2 = 0 then
if Coq.State.compare s1 s2 = 0 then true else false
else false
else false
let hash (st, w, uri) =
Hashtbl.hash
(Coq.State.hash st, Coq.Workspace.hash w, Lang.LUri.File.hash uri)
end
type t = S.t
module C = Hashtbl.Make (S)
let cache = C.create 1000
let eval v =
match C.find_opt cache v with
| None ->
let root_state, workspace, uri = v in
let admitted_st = Coq.Init.doc_init ~root_state ~workspace ~uri in
C.add cache v admitted_st;
admitted_st
| Some res -> res
end
| null | https://raw.githubusercontent.com/ejgallego/coq-lsp/d4f2fd0d5068a0a7893c70cc967c72908f923ba7/fleche/memo.ml | ocaml | This is quite slow!
let memory = Obj.magic res |> Obj.reachable_words in
Loc-independent command evalution and caching.
We store the location as to compute an offset for cached results
XXX: Move elsewhere
Don't cache interruptions | module CS = Stats
module Stats = struct
type 'a t =
{ res : 'a
; cache_hit : bool
; memory : int
; time : float
}
let make ?(cache_hit = false) ~time res =
let memory = 0 in
{ res; cache_hit; memory; time }
end
module CacheStats = struct
let nhit, ntotal = (ref 0, ref 0)
let reset () =
nhit := 0;
ntotal := 0
let hit () =
incr nhit;
incr ntotal
let miss () = incr ntotal
let stats () =
if !ntotal = 0 then "no stats"
else
let hit_rate =
Stdlib.Float.of_int !nhit /. Stdlib.Float.of_int !ntotal *. 100.0
in
Format.asprintf "cache hit rate: %3.2f" hit_rate
end
module Interp = struct
module VernacInput = struct
type t = Coq.State.t * Coq.Ast.t
This crutially relies on our ppx to ignore the CAst location
let equal (st1, v1) (st2, v2) =
if Coq.Ast.compare v1 v2 = 0 then
if Coq.State.compare st1 st2 = 0 then true else false
else false
let hash (st, v) = Hashtbl.hash (Coq.Ast.hash v, st)
end
type t = VernacInput.t
let input_info (st, v) =
Format.asprintf "stm: %d | st %d" (Coq.Ast.hash v) (Hashtbl.hash st)
module HC = Hashtbl.Make (VernacInput)
module Result = struct
type t = Loc.t * Coq.State.t Coq.Interp.interp_result
end
type cache = Result.t HC.t
let cache : cache ref = ref (HC.create 1000)
let stats () = Obj.reachable_words (Obj.magic cache)
let in_cache st stm =
let kind = CS.Kind.Hashing in
CS.record ~kind ~f:(HC.find_opt !cache) (st, stm)
let loc_offset (l1 : Loc.t) (l2 : Loc.t) =
let line_offset = l2.line_nb - l1.line_nb in
let bol_offset = l2.bol_pos - l1.bol_pos in
let line_last_offset = l2.line_nb_last - l1.line_nb_last in
let bol_last_offset = l2.bol_pos_last - l1.bol_pos_last in
let bp_offset = l2.bp - l1.bp in
let ep_offset = l2.ep - l1.ep in
( line_offset
, bol_offset
, line_last_offset
, bol_last_offset
, bp_offset
, ep_offset )
let loc_apply_offset
( line_offset
, bol_offset
, line_last_offset
, bol_last_offset
, bp_offset
, ep_offset ) (loc : Loc.t) =
{ loc with
line_nb = loc.line_nb + line_offset
; bol_pos = loc.bol_pos + bol_offset
; line_nb_last = loc.line_nb_last + line_last_offset
; bol_pos_last = loc.bol_pos_last + bol_last_offset
; bp = loc.bp + bp_offset
; ep = loc.ep + ep_offset
}
let adjust_offset ~stm_loc ~cached_loc res =
let offset = loc_offset cached_loc stm_loc in
let f = loc_apply_offset offset in
Coq.Protect.E.map_loc ~f res
let eval (st, stm) : _ Stats.t =
let stm_loc = Coq.Ast.loc stm |> Option.get in
match in_cache st stm with
| Some (cached_loc, res), time ->
if Debug.cache then Io.Log.trace "memo" "cache hit";
CacheStats.hit ();
let res = adjust_offset ~stm_loc ~cached_loc res in
Stats.make ~cache_hit:true ~time res
| None, time_hash -> (
if Debug.cache then Io.Log.trace "memo" "cache miss";
CacheStats.miss ();
let kind = CS.Kind.Exec in
let res, time_interp = CS.record ~kind ~f:(Coq.Interp.interp ~st) stm in
let time = time_hash +. time_interp in
match res.r with
| Coq.Protect.R.Interrupted ->
Stats.make ~time res
| Coq.Protect.R.Completed _ ->
let () = HC.add !cache (st, stm) (stm_loc, res) in
let time = time_hash +. time_interp in
Stats.make ~time res)
end
module Admit = struct
type t = Coq.State.t
module C = Hashtbl.Make (Coq.State)
let cache = C.create 1000
let eval v =
match C.find_opt cache v with
| None ->
let admitted_st = Coq.State.admit ~st:v in
C.add cache v admitted_st;
admitted_st
| Some admitted_st -> admitted_st
end
module Init = struct
module S = struct
type t = Coq.State.t * Coq.Workspace.t * Lang.LUri.File.t
let equal (s1, w1, u1) (s2, w2, u2) : bool =
if Lang.LUri.File.compare u1 u2 = 0 then
if Coq.Workspace.compare w1 w2 = 0 then
if Coq.State.compare s1 s2 = 0 then true else false
else false
else false
let hash (st, w, uri) =
Hashtbl.hash
(Coq.State.hash st, Coq.Workspace.hash w, Lang.LUri.File.hash uri)
end
type t = S.t
module C = Hashtbl.Make (S)
let cache = C.create 1000
let eval v =
match C.find_opt cache v with
| None ->
let root_state, workspace, uri = v in
let admitted_st = Coq.Init.doc_init ~root_state ~workspace ~uri in
C.add cache v admitted_st;
admitted_st
| Some res -> res
end
|
0a5fe5b6109670b6a8b0be99830f30ceb8e9ca54ddd39fbf99e3eb29a00480ec | kosmikus/popl21-liquid-haskell-tutorial | SExec.hs | {-@ LIQUID "--reflection" @-}
{-@ LIQUID "--ple" @-}
{- LIQUID "--no-totality" @-}
module SExec where
import Memory
import qualified Data.Set as S
type Acc = Int
type Reg = Int
type Config = (Acc, Mem Reg)
data Code
= Load Int Code
| Store Reg Code
| Add Reg Code
| Free Reg Code
| Halt
@ exec : : c : Code - > ( a::Acc , { m : validMem m c a } ) - > Config @
exec :: Code -> Config -> Config
exec (Load n c) (a,m) = exec c (n,m)
exec (Store r c) (a,m) = exec c (a,set r a m)
exec (Add r c) (a,m) = exec c (a `add` get r m ,m)
exec (Free r c) (a,m) = exec c (a, unset r m)
exec Halt (a,m) = (a,m)
{-@ reflect validMem @-}
validMem :: Mem Int -> Code -> Acc -> Bool
validMem m (Add r c) a = if S.member r (memAddrs m) then validMem m c (a `add` get r m) else False
validMem m (Load n c) a = validMem m c n
validMem m (Store r c) a = validMem (set r a m) c a
validMem m (Free r c) a = validMem (unset r m) c a
validMem m _ a = True
@ type ValidCode = { c : Code | validMem MEmp c 0 } @
@ code : : ValidCode @
code :: Code
code = Store 0 (Load 42 (Store 42 ( Add 42 Halt)))
runExample :: Config
runExample = exec code (0,MEmp)
{-@ reflect add @-}
add :: Int -> Int -> Int
add = (+)
| null | https://raw.githubusercontent.com/kosmikus/popl21-liquid-haskell-tutorial/4353aa70e943d6da7821ef2fd5ef8cd6c56b39e7/SExec.hs | haskell | @ LIQUID "--reflection" @
@ LIQUID "--ple" @
LIQUID "--no-totality" @
@ reflect validMem @
@ reflect add @ |
module SExec where
import Memory
import qualified Data.Set as S
type Acc = Int
type Reg = Int
type Config = (Acc, Mem Reg)
data Code
= Load Int Code
| Store Reg Code
| Add Reg Code
| Free Reg Code
| Halt
@ exec : : c : Code - > ( a::Acc , { m : validMem m c a } ) - > Config @
exec :: Code -> Config -> Config
exec (Load n c) (a,m) = exec c (n,m)
exec (Store r c) (a,m) = exec c (a,set r a m)
exec (Add r c) (a,m) = exec c (a `add` get r m ,m)
exec (Free r c) (a,m) = exec c (a, unset r m)
exec Halt (a,m) = (a,m)
validMem :: Mem Int -> Code -> Acc -> Bool
validMem m (Add r c) a = if S.member r (memAddrs m) then validMem m c (a `add` get r m) else False
validMem m (Load n c) a = validMem m c n
validMem m (Store r c) a = validMem (set r a m) c a
validMem m (Free r c) a = validMem (unset r m) c a
validMem m _ a = True
@ type ValidCode = { c : Code | validMem MEmp c 0 } @
@ code : : ValidCode @
code :: Code
code = Store 0 (Load 42 (Store 42 ( Add 42 Halt)))
runExample :: Config
runExample = exec code (0,MEmp)
add :: Int -> Int -> Int
add = (+)
|
473cbd58ae2a15990a32b53d34878b758cdbbbd009293b7396bc5126f411f69c | gergoerdi/clash-intel8080 | TestBench.hs | module Hardware.Intel8080.TestBench where
import Clash.Prelude hiding ((^))
import Hardware.Intel8080
import Prelude (putChar, (^))
import Control.Monad
import Control.Monad.IO.Class
import Control.Monad.Writer
import Data.Array
import Data.Array.IO
import Data.Char
import Data.Word (Word8)
import qualified Data.List as L
import qualified Data.ByteString as BS
import Data.ByteString.Builder
import Text.Printf
prelude :: [Value]
prelude = mconcat
0x0000 : exit : MVI A , 0x0a
0x0002 : OUT 0
0x0004 : HLT
0x0005 : message : MVI A , 0x02
0x0007 : CMP C
0x0008 : JNZ 0x000f
0x000B : putChr : MOV A , E
0x000C : OUT 0
0x000E : RET
, [ 0x0e, 0x24 ] -- 0x000F: putStr: MVI C, '$'
0x0011 : loop : LDAX DE
0x0012 : CMP C
, [ 0xc2, 0x17, 0x00 ] -- 0x0013: JNZ next
0x0016 : RET
, [ 0xd3, 0x00 ] -- 0x0017: next: OUT 0
, [ 0x13 ] -- 0x0019: INX DE
0x001a : JMP loop
]
testOutPort :: (MonadIO m, MonadWriter Builder m) => Bool -> Value -> m Value
testOutPort verbose value = do
when verbose $ liftIO $ putChar . chr . fromIntegral $ value
tell $ word8 . fromIntegral $ value
return 0xff
banner :: (MonadIO m) => String -> m a -> m a
banner title act = do
liftIO $ printf "\n%s> %s <%s\n" (L.replicate 10 '-') title (L.replicate 10 '-')
x <- act
liftIO $ printf "\n%s--%s--%s\n" (L.replicate 10 '-') ('-' <$ title) (L.replicate 10 '-')
return x
load :: FilePath -> IO (IOArray Addr Value)
load romFile = do
bs <- fmap fromIntegral . BS.unpack <$> BS.readFile romFile
arr <- newArray (minBound, maxBound) 0x00
zipWithM_ (writeArray arr) [0x0000..] prelude
zipWithM_ (writeArray arr) [0x0100..] bs
return arr
runTest :: (IOArray Addr Value -> IO a) -> FilePath -> IO a
runTest body romFile = do
arr <- load romFile
body arr
| null | https://raw.githubusercontent.com/gergoerdi/clash-intel8080/7190e2e992db74cc32010ff42afc06bbce839450/test/Hardware/Intel8080/TestBench.hs | haskell | 0x000F: putStr: MVI C, '$'
0x0013: JNZ next
0x0017: next: OUT 0
0x0019: INX DE | module Hardware.Intel8080.TestBench where
import Clash.Prelude hiding ((^))
import Hardware.Intel8080
import Prelude (putChar, (^))
import Control.Monad
import Control.Monad.IO.Class
import Control.Monad.Writer
import Data.Array
import Data.Array.IO
import Data.Char
import Data.Word (Word8)
import qualified Data.List as L
import qualified Data.ByteString as BS
import Data.ByteString.Builder
import Text.Printf
prelude :: [Value]
prelude = mconcat
0x0000 : exit : MVI A , 0x0a
0x0002 : OUT 0
0x0004 : HLT
0x0005 : message : MVI A , 0x02
0x0007 : CMP C
0x0008 : JNZ 0x000f
0x000B : putChr : MOV A , E
0x000C : OUT 0
0x000E : RET
0x0011 : loop : LDAX DE
0x0012 : CMP C
0x0016 : RET
0x001a : JMP loop
]
testOutPort :: (MonadIO m, MonadWriter Builder m) => Bool -> Value -> m Value
testOutPort verbose value = do
when verbose $ liftIO $ putChar . chr . fromIntegral $ value
tell $ word8 . fromIntegral $ value
return 0xff
banner :: (MonadIO m) => String -> m a -> m a
banner title act = do
liftIO $ printf "\n%s> %s <%s\n" (L.replicate 10 '-') title (L.replicate 10 '-')
x <- act
liftIO $ printf "\n%s--%s--%s\n" (L.replicate 10 '-') ('-' <$ title) (L.replicate 10 '-')
return x
load :: FilePath -> IO (IOArray Addr Value)
load romFile = do
bs <- fmap fromIntegral . BS.unpack <$> BS.readFile romFile
arr <- newArray (minBound, maxBound) 0x00
zipWithM_ (writeArray arr) [0x0000..] prelude
zipWithM_ (writeArray arr) [0x0100..] bs
return arr
runTest :: (IOArray Addr Value -> IO a) -> FilePath -> IO a
runTest body romFile = do
arr <- load romFile
body arr
|
15bb01f9f171721efbead5ef75cea8d10f97c83ad630ad68f5b4fd4fac4a70a6 | wdebeaum/step | wheezing.lisp | ;;;;
;;;; w::wheezing
;;;;
(define-words :pos W::n
:words (
(w::wheezing
(senses
((LF-PARENT ONT::dyspnea)
(TEMPL mass-pred-TEMPL)
(syntax (W::morph (:forms (-none))))
)
)
)
))
| null | https://raw.githubusercontent.com/wdebeaum/step/f38c07d9cd3a58d0e0183159d4445de9a0eafe26/src/LexiconManager/Data/new/wheezing.lisp | lisp |
w::wheezing
|
(define-words :pos W::n
:words (
(w::wheezing
(senses
((LF-PARENT ONT::dyspnea)
(TEMPL mass-pred-TEMPL)
(syntax (W::morph (:forms (-none))))
)
)
)
))
|
5381725ac1f245878fccbc8e175ed4ad5ba601f09a30adea8fc6d918532491fe | VictorNicollet/Ohm | util.ml | Ohm is © 2012
type role = [ `Bot | `Web | `Put | `Reset ]
let pid = Unix.getpid ()
let _role = ref None
let role () =
let role =
let bot = ref false
and put = ref false
and cgi = ref false
and reset = ref false in
Arg.parse [
"--reset", Arg.Set reset, "force other processes to restart" ;
"--cgi", Arg.Set cgi, "run as FastCGI (default)" ;
"--put", Arg.Set put, "run as view/i18n uploader" ;
"--bot", Arg.Set bot, "run as bot" ;
] (fun _ -> ()) "Start an instance of the Ohm server" ;
if !bot then `Bot else
if !put then `Put else
if !reset then `Reset else `Web
in
_role := Some role ;
role
module Logging = struct
let open_channel =
let chanref = ref None in
fun () ->
match !chanref with
| None ->
let path = Configure.lock `Log in
let chan =
if path = "-" then stdout
else open_out_gen [Open_append] 0666 path
in
chanref := Some chan ; chan
| Some chan -> chan
let prefix =
let cache = ref None in
fun () ->
match !cache with Some prefix -> prefix | None ->
let prefix =
Printf.sprintf "[%s:%d]"
(match !_role with
| None -> "---"
| Some `Reset -> "RST"
| Some `Bot -> "BOT"
| Some `Web -> "WEB"
| Some `Put -> "PUT")
pid
in
if !_role <> None then cache := Some prefix ;
prefix
let output string =
try let channel = open_channel () in
let time = Unix.localtime (Unix.gettimeofday ()) in
let string =
Printf.sprintf "[%d/%02d/%02d %02d:%02d:%02d] %s %s\n"
(time.Unix.tm_year + 1900)
(1 + time.Unix.tm_mon)
(time.Unix.tm_mday)
(time.Unix.tm_hour)
(time.Unix.tm_min)
(time.Unix.tm_sec)
(prefix ())
string
in
output_string channel string ;
flush channel
with _ -> ()
end
let string_of_time time =
let time = Unix.gmtime time in
Printf.sprintf "%04d%02d%02d%02d%02d%02d"
(time.Unix.tm_year + 1900)
(1 + time.Unix.tm_mon)
(time.Unix.tm_mday)
(time.Unix.tm_hour)
(time.Unix.tm_min)
(time.Unix.tm_sec)
let log format =
Printf.ksprintf Logging.output format
let memoize f =
let h = Hashtbl.create 10 in
fun x ->
try Hashtbl.find h x with Not_found ->
let y = f x in Hashtbl.add h x y ; y
let get_binary_contents full =
try
let chan = open_in_bin full in
let size = in_channel_length chan in
let into = String.create size in
let rec readall i size =
let read = input chan into i size in
if read = size then () else
readall (i + read) (size - read)
in readall 0 size ;
close_in chan ;
Some into
with
| exn ->
log "Util.get_contents: could not open %s (%s)" full (Printexc.to_string exn);
None
let get_contents file =
try
let chan = open_in file in
let size = in_channel_length chan in
let into = String.create size in
let rec readall i size =
let read = input chan into i size in
if read = size then () else
readall (i + read) (size - read)
in readall 0 size ;
close_in chan ;
BatUTF8.validate into ;
Some into
with
| BatUTF8.Malformed_code ->
log "Util.get_contents: file %s is not valid utf-8" file ;
None
| exn ->
log "Util.get_contents: could not open %s (%s)" file (Printexc.to_string exn) ;
None
let get_view_contents file =
let view_dir = Configure.lock `Templates in
get_contents (view_dir ^ file)
let get_resource_contents file =
let resource_dir = Configure.lock `Resources in
get_contents (resource_dir ^ file)
let log_requests = false
let logreq =
if log_requests then log
else function format -> Printf.ksprintf ignore format
let logjson js =
Json.serialize js
let _uniq_b = ref 0
let _uniq_c = Unix.getpid ()
let seq_old = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789"
let seq_cdb = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
let base62 seq n i =
let r = String.make n seq.[0] in
let rec aux i k =
if i <> 0 then begin
r.[k] <- seq.[i mod 62] ;
aux (i / 62) (k - 1)
end
in aux i (n-1) ; r
let uniq () =
let a = int_of_float (Unix.time() -. 1286058501.0)
and b = incr _uniq_b ; !_uniq_b mod 238328
and c = _uniq_c
in (base62 seq_cdb 5 a)^(base62 seq_cdb 3 b)^(base62 seq_cdb 3 c)
let base62 = base62 seq_old
let base62_of_int =
base62 6
let base62_to_base34 =
let ascii_zero = Char.code '0' in
let ascii_a = Char.code 'a' in
let ascii_A = Char.code 'A' in
let to_int = function
| ('0' .. '9') as c -> Char.code c - ascii_zero
| ('a' .. 'z') as c -> Char.code c - ascii_a + 10
| ('A' .. 'Z') as c -> Char.code c - ascii_A + 10 + 26
| _ -> 0
in
let rec base62_extract s i l =
try if l = 0 then 0 else to_int s.[i] + 62 * base62_extract s (i+1) (l-1) with _ -> 0
in
let base34 = "0123456789ABCDEFGHJKLMNPQRSTUVWXYZ" in
let rec base34_write s i l value =
if l = 0 then () else (
s.[i] <- base34.[value mod 34] ;
base34_write s (i+1) (l-1) (value / 34)
)
in
fun input ->
let s = "xxxxx-xxxxx-xxxx" in
base34_write s 0 5 (base62_extract input 0 4) ;
base34_write s 6 5 (base62_extract input 4 4) ;
base34_write s 12 4 (base62_extract input 8 3) ;
s
let dec_of_hex_char = function
| '0' -> 0 | '1' -> 1 | '2' -> 2 | '3' -> 3
| '4' -> 4 | '5' -> 5 | '6' -> 6 | '7' -> 7
| '8' -> 8 | '9' -> 9 | 'a' -> 10 | 'b' -> 11
| 'c' -> 12 | 'd' -> 13 | 'e' -> 14 | 'f' -> 15
| _ -> 0
let sha1 string =
let hex = Sha1.to_hex (Sha1.string string) in
let len = String.length hex in
BatString.init
(len/2)
(fun i -> Char.chr (dec_of_hex_char hex.[2*i] * 16 + dec_of_hex_char hex.[2*i+1]))
let sha1_hmac (o_key_pad,i_key_pad) text =
BatBase64.str_encode (sha1 (o_key_pad ^ sha1 (i_key_pad ^ text)))
let utf8 string =
try BatUTF8.validate string ; Some string
with BatUTF8.Malformed_code -> None
let rec last = function
| [] -> None
| [x] -> Some x
| _::t -> last t
let first = function
| [] -> None
| h::_ -> Some h
let rec setdiff cmp a b = match a,b with
| _, [] -> a
| [], _ -> []
| ha :: ta, hb :: tb ->
let c = cmp ha hb in
if c = 0 then setdiff cmp ta tb
else if c < 0 then ha :: setdiff cmp ta tb
else setdiff cmp a tb
let rec setand cmp a b = match a,b with
| _, [] -> []
| [], _ -> []
| ha :: ta, hb :: tb ->
let c = cmp ha hb in
if c = 0 then ha :: (setand cmp ta tb)
else if c < 0 then setand cmp ta b else setand cmp a tb
let fold_accents text =
List.fold_left (fun text (reg,rep) -> Str.global_replace (Str.regexp reg) rep text) text
[ "à\\|À\\|â\\|Â\\|ä\\|Ä" , "a" ;
"é\\|É\\|ê\\|Ê\\|è\\|È\\|ë\\|Ë" , "e" ;
"ç\\|Ç" , "c" ;
"î\\|Î\\|ï\\|Ï" , "i" ;
"ù\\|Ù\\|û\\|Û\\|ü\\|Ü" , "u" ;
"ô\\|Ô\\|ö\\|Ö" , "o" ;
"œ\\|Œ" , "oe" ;
]
let uppercase s =
let s = String.copy s in
for i = 0 to String.length s - 1 do
let c = Char.code s.[i] in
if c >= 97 && c <= 122 then
s.[i] <- Char.chr (c - 32)
done ; s
let remove_bom text =
if BatString.starts_with text "\239\187\191" then String.sub text 3 (String.length text - 3)
else text
let fold_all text =
BatString.trim (uppercase (fold_accents (remove_bom text)))
let number list =
let rec aux acc = function
| [] -> []
| h :: t -> (acc , h) :: (aux (acc+1) t)
in aux 0 list
let clip size string =
if String.length string > size then String.sub string 0 size else string
let rec next_string string =
let n = String.length string in
if n = 0 then String.make 1 (Char.chr 0) else
let code = Char.code string.[n-1] in
if code = 255 then next_string (String.sub string 0 (n-1)) else
let copy = String.copy string in
copy.[n-1] <- Char.chr (code + 1) ;
copy
let every d f =
let next = ref 0. in
fun x ->
let now = Unix.gettimeofday () in
if now > !next then ( next := now +. d ; f x)
| null | https://raw.githubusercontent.com/VictorNicollet/Ohm/ca90c162f6c49927c893114491f29d44aaf71feb/src/util.ml | ocaml | Ohm is © 2012
type role = [ `Bot | `Web | `Put | `Reset ]
let pid = Unix.getpid ()
let _role = ref None
let role () =
let role =
let bot = ref false
and put = ref false
and cgi = ref false
and reset = ref false in
Arg.parse [
"--reset", Arg.Set reset, "force other processes to restart" ;
"--cgi", Arg.Set cgi, "run as FastCGI (default)" ;
"--put", Arg.Set put, "run as view/i18n uploader" ;
"--bot", Arg.Set bot, "run as bot" ;
] (fun _ -> ()) "Start an instance of the Ohm server" ;
if !bot then `Bot else
if !put then `Put else
if !reset then `Reset else `Web
in
_role := Some role ;
role
module Logging = struct
let open_channel =
let chanref = ref None in
fun () ->
match !chanref with
| None ->
let path = Configure.lock `Log in
let chan =
if path = "-" then stdout
else open_out_gen [Open_append] 0666 path
in
chanref := Some chan ; chan
| Some chan -> chan
let prefix =
let cache = ref None in
fun () ->
match !cache with Some prefix -> prefix | None ->
let prefix =
Printf.sprintf "[%s:%d]"
(match !_role with
| None -> "---"
| Some `Reset -> "RST"
| Some `Bot -> "BOT"
| Some `Web -> "WEB"
| Some `Put -> "PUT")
pid
in
if !_role <> None then cache := Some prefix ;
prefix
let output string =
try let channel = open_channel () in
let time = Unix.localtime (Unix.gettimeofday ()) in
let string =
Printf.sprintf "[%d/%02d/%02d %02d:%02d:%02d] %s %s\n"
(time.Unix.tm_year + 1900)
(1 + time.Unix.tm_mon)
(time.Unix.tm_mday)
(time.Unix.tm_hour)
(time.Unix.tm_min)
(time.Unix.tm_sec)
(prefix ())
string
in
output_string channel string ;
flush channel
with _ -> ()
end
let string_of_time time =
let time = Unix.gmtime time in
Printf.sprintf "%04d%02d%02d%02d%02d%02d"
(time.Unix.tm_year + 1900)
(1 + time.Unix.tm_mon)
(time.Unix.tm_mday)
(time.Unix.tm_hour)
(time.Unix.tm_min)
(time.Unix.tm_sec)
let log format =
Printf.ksprintf Logging.output format
let memoize f =
let h = Hashtbl.create 10 in
fun x ->
try Hashtbl.find h x with Not_found ->
let y = f x in Hashtbl.add h x y ; y
let get_binary_contents full =
try
let chan = open_in_bin full in
let size = in_channel_length chan in
let into = String.create size in
let rec readall i size =
let read = input chan into i size in
if read = size then () else
readall (i + read) (size - read)
in readall 0 size ;
close_in chan ;
Some into
with
| exn ->
log "Util.get_contents: could not open %s (%s)" full (Printexc.to_string exn);
None
let get_contents file =
try
let chan = open_in file in
let size = in_channel_length chan in
let into = String.create size in
let rec readall i size =
let read = input chan into i size in
if read = size then () else
readall (i + read) (size - read)
in readall 0 size ;
close_in chan ;
BatUTF8.validate into ;
Some into
with
| BatUTF8.Malformed_code ->
log "Util.get_contents: file %s is not valid utf-8" file ;
None
| exn ->
log "Util.get_contents: could not open %s (%s)" file (Printexc.to_string exn) ;
None
let get_view_contents file =
let view_dir = Configure.lock `Templates in
get_contents (view_dir ^ file)
let get_resource_contents file =
let resource_dir = Configure.lock `Resources in
get_contents (resource_dir ^ file)
let log_requests = false
let logreq =
if log_requests then log
else function format -> Printf.ksprintf ignore format
let logjson js =
Json.serialize js
let _uniq_b = ref 0
let _uniq_c = Unix.getpid ()
let seq_old = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ0123456789"
let seq_cdb = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
let base62 seq n i =
let r = String.make n seq.[0] in
let rec aux i k =
if i <> 0 then begin
r.[k] <- seq.[i mod 62] ;
aux (i / 62) (k - 1)
end
in aux i (n-1) ; r
let uniq () =
let a = int_of_float (Unix.time() -. 1286058501.0)
and b = incr _uniq_b ; !_uniq_b mod 238328
and c = _uniq_c
in (base62 seq_cdb 5 a)^(base62 seq_cdb 3 b)^(base62 seq_cdb 3 c)
let base62 = base62 seq_old
let base62_of_int =
base62 6
let base62_to_base34 =
let ascii_zero = Char.code '0' in
let ascii_a = Char.code 'a' in
let ascii_A = Char.code 'A' in
let to_int = function
| ('0' .. '9') as c -> Char.code c - ascii_zero
| ('a' .. 'z') as c -> Char.code c - ascii_a + 10
| ('A' .. 'Z') as c -> Char.code c - ascii_A + 10 + 26
| _ -> 0
in
let rec base62_extract s i l =
try if l = 0 then 0 else to_int s.[i] + 62 * base62_extract s (i+1) (l-1) with _ -> 0
in
let base34 = "0123456789ABCDEFGHJKLMNPQRSTUVWXYZ" in
let rec base34_write s i l value =
if l = 0 then () else (
s.[i] <- base34.[value mod 34] ;
base34_write s (i+1) (l-1) (value / 34)
)
in
fun input ->
let s = "xxxxx-xxxxx-xxxx" in
base34_write s 0 5 (base62_extract input 0 4) ;
base34_write s 6 5 (base62_extract input 4 4) ;
base34_write s 12 4 (base62_extract input 8 3) ;
s
let dec_of_hex_char = function
| '0' -> 0 | '1' -> 1 | '2' -> 2 | '3' -> 3
| '4' -> 4 | '5' -> 5 | '6' -> 6 | '7' -> 7
| '8' -> 8 | '9' -> 9 | 'a' -> 10 | 'b' -> 11
| 'c' -> 12 | 'd' -> 13 | 'e' -> 14 | 'f' -> 15
| _ -> 0
let sha1 string =
let hex = Sha1.to_hex (Sha1.string string) in
let len = String.length hex in
BatString.init
(len/2)
(fun i -> Char.chr (dec_of_hex_char hex.[2*i] * 16 + dec_of_hex_char hex.[2*i+1]))
let sha1_hmac (o_key_pad,i_key_pad) text =
BatBase64.str_encode (sha1 (o_key_pad ^ sha1 (i_key_pad ^ text)))
let utf8 string =
try BatUTF8.validate string ; Some string
with BatUTF8.Malformed_code -> None
let rec last = function
| [] -> None
| [x] -> Some x
| _::t -> last t
let first = function
| [] -> None
| h::_ -> Some h
let rec setdiff cmp a b = match a,b with
| _, [] -> a
| [], _ -> []
| ha :: ta, hb :: tb ->
let c = cmp ha hb in
if c = 0 then setdiff cmp ta tb
else if c < 0 then ha :: setdiff cmp ta tb
else setdiff cmp a tb
let rec setand cmp a b = match a,b with
| _, [] -> []
| [], _ -> []
| ha :: ta, hb :: tb ->
let c = cmp ha hb in
if c = 0 then ha :: (setand cmp ta tb)
else if c < 0 then setand cmp ta b else setand cmp a tb
let fold_accents text =
List.fold_left (fun text (reg,rep) -> Str.global_replace (Str.regexp reg) rep text) text
[ "à\\|À\\|â\\|Â\\|ä\\|Ä" , "a" ;
"é\\|É\\|ê\\|Ê\\|è\\|È\\|ë\\|Ë" , "e" ;
"ç\\|Ç" , "c" ;
"î\\|Î\\|ï\\|Ï" , "i" ;
"ù\\|Ù\\|û\\|Û\\|ü\\|Ü" , "u" ;
"ô\\|Ô\\|ö\\|Ö" , "o" ;
"œ\\|Œ" , "oe" ;
]
let uppercase s =
let s = String.copy s in
for i = 0 to String.length s - 1 do
let c = Char.code s.[i] in
if c >= 97 && c <= 122 then
s.[i] <- Char.chr (c - 32)
done ; s
let remove_bom text =
if BatString.starts_with text "\239\187\191" then String.sub text 3 (String.length text - 3)
else text
let fold_all text =
BatString.trim (uppercase (fold_accents (remove_bom text)))
let number list =
let rec aux acc = function
| [] -> []
| h :: t -> (acc , h) :: (aux (acc+1) t)
in aux 0 list
let clip size string =
if String.length string > size then String.sub string 0 size else string
let rec next_string string =
let n = String.length string in
if n = 0 then String.make 1 (Char.chr 0) else
let code = Char.code string.[n-1] in
if code = 255 then next_string (String.sub string 0 (n-1)) else
let copy = String.copy string in
copy.[n-1] <- Char.chr (code + 1) ;
copy
let every d f =
let next = ref 0. in
fun x ->
let now = Unix.gettimeofday () in
if now > !next then ( next := now +. d ; f x)
| |
0df314432b43ab8f5dcb873c38dd67c0849f0b61ab9968cd5cd030c187757f78 | Lovesan/doors | psapi.lisp | ;;;; -*- Mode: lisp; indent-tabs-mode: nil -*-
Copyright ( C ) 2010 - 2011 , < >
;;; Permission is hereby granted, free of charge, to any person
;;; obtaining a copy of this software and associated documentation
files ( the " Software " ) , to deal in the Software without
;;; restriction, including without limitation the rights to use, copy,
;;; modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software , and to permit persons to whom the Software is
;;; furnished to do so, subject to the following conditions:
;;; The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software .
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
;;; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
;;; MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
;;; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
;;; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
;;; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
;;; OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
;;; DEALINGS IN THE SOFTWARE
(in-package #:doors)
(define-struct (page-file-information
(:constructor make-page-file-info
(&key total-size total-in-use peak-usage))
(:conc-name page-file-info-))
"Contains information about a pagefile."
(cb dword :initform (sizeof 'page-file-information))
(reserved dword)
(total-size size-t)
(total-in-use size-t)
(peak-usage size-t))
(define-struct (module-info
(:conc-name module-))
"Contains the module load address, size, and entry point."
(base-of-dll pointer)
(size-of-image dword)
(entry-point pointer))
(define-struct (performance-information
(:conc-name perf-info-))
"Contains performance information."
(cb dword :initform (sizeof 'performance-information))
(commit-total size-t)
(commit-limit size-t)
(commit-peak size-t)
(physical-total size-t)
(physical-available size-t)
(system-cache size-t)
(kernel-total size-t)
(kernel-paged size-t)
(kernel-nonpaged size-t)
(page-size size-t)
(handle-count dword)
(process-count dword)
(thread-count dword))
(define-struct (process-memory-counters
(:conc-name process-mc-)
(:constructor make-process-memory-counters
(&key page-fault-count peak-working-set-size
working-set-size quota-paged-pool-usage
quota-peak-paged-pool-usage
quota-nonpaged-pool-usage
pagefile-usage
peak-pagefile-usage)))
"Contains the memory statistics for a process."
(cb dword :initform (sizeof 'process-memory-counters))
(page-fault-count dword)
(peak-working-set-size size-t)
(working-set-size size-t)
(quota-peak-paged-pool-usage size-t)
(quota-paged-pool-usage size-t)
(quota-peak-nonpaged-pool-usage size-t)
(quota-nonpaged-pool-usage size-t)
(pagefile-usage size-t)
(peak-pagefile-usage size-t))
(define-struct (process-memory-counters*
(:conc-name process-mc-)
(:include process-memory-counters)
(:constructor make-process-memory-counters*
(&key page-fault-count peak-working-set-size
working-set-size quota-paged-pool-usage
quota-peak-paged-pool-usage
quota-nonpaged-pool-usage
pagefile-usage
peak-pagefile-usage
private-usage
&aux (cb (sizeof 'process-memory-counters*)))))
"Contains extended memory statistics for a process."
(private-usage size-t))
(define-enum (ws-block-protection-flags
(:list t)
(:base-type ulong-ptr)
(:conc-name ws-block-))
(:read #b00001)
(:execute #b00010)
(:read/write #b00100)
(:copy-on-write #b00101)
(:non-cacheable #b01000)
(:guard-page #b10000))
(define-union (working-set-block-information
(:conc-name ws-block-info-)
(:reader %ws-block-info-reader)
(:writer %ws-block-info-writer))
"Contains working set information for a page."
(virtual-page ulong-ptr)
(protection ws-block-protection-flags)
(share-count byte)
(shared-p (boolean ulong-ptr)))
(defun %ws-block-info-reader (pointer out)
(declare (type pointer pointer))
(let* ((out (or out (make-working-set-block-information)))
(flags (deref pointer 'ulong-ptr)))
(declare (type working-set-block-information out)
(type ulong-ptr flags))
(setf (ws-block-info-protection out)
(translate (ldb (byte 5 0) flags)
'ws-block-protection-flags)
(ws-block-info-share-count out)
(ldb (byte 3 5) flags)
(ws-block-info-shared-p out)
(logbitp 8 flags)
(ws-block-info-virtual-page out)
(ash flags -12))
out))
(defun %ws-block-info-writer (value pointer)
(declare (type pointer pointer)
(type working-set-block-information value))
(let ((flags 0))
(declare (type ulong-ptr flags))
(setf (ldb (byte 5 0) flags)
(convert (ws-block-info-protection value)
'ws-block-protection-flags)
(ldb (byte 3 5) flags)
(ws-block-info-share-count value)
(ldb (byte 1 8) flags)
(if (ws-block-info-shared-p value) 1 0)
flags (logior flags (ash (ws-block-info-virtual-page value) 12))
(deref pointer 'ulong-ptr) flags))
value)
(define-union (working-set-block-information*
(:conc-name ws-block-info-)
(:reader %ws-block-info-reader*)
(:writer %ws-block-info-writer*))
"Contains extended working set information for a page."
(node ulong-ptr)
(valid-p (boolean ulong-ptr))
(share-count* ulong-ptr)
(protection* memory-protection-flags)
(shared-p* (boolean ulong-ptr))
(locked-p (boolean ulong-ptr))
(large-page-p (boolean ulong-ptr)))
(defun %ws-block-info-reader* (pointer out)
(declare (type pointer pointer))
(let ((out (or out (make-working-set-block-information*)))
(flags (deref pointer 'ulong-ptr)))
(declare (type working-set-block-information* out)
(type ulong-ptr flags))
(setf (ws-block-info-valid-p out)
(logbitp 0 flags)
(ws-block-info-share-count* out)
(ldb (byte 3 1) flags)
(ws-block-info-shared-p* out)
(logbitp 15 flags)
(ws-block-info-protection* out)
(translate (ldb (byte 11 4) flags)
'memory-protection-flags)
(ws-block-info-node out)
(ldb (byte 6 16) flags)
(ws-block-info-locked-p out)
(logbitp 22 flags)
(ws-block-info-large-page-p out)
(logbitp 23 flags))
out))
(defun %ws-block-info-writer* (value pointer)
(declare (type pointer pointer)
(type working-set-block-information* value))
(let ((flags 0))
(declare (type ulong-ptr flags))
(setf (ldb (byte 1 0) flags)
(if (ws-block-info-valid-p value) 1 0)
(ldb (byte 3 1) flags)
(ws-block-info-share-count* value)
(ldb (byte 11 4) flags)
(convert (ws-block-info-protection* value)
'memory-protection-flags)
(ldb (byte 1 15) flags)
(if (ws-block-info-shared-p* value) 1 0)
(ldb (byte 6 16) flags)
(ws-block-info-node value)
(ldb (byte 1 22) flags)
(if (ws-block-info-locked-p value) 1 0)
(ldb (byte 1 23) flags)
(if (ws-block-info-large-page-p value) 1 0)
(deref pointer 'ulong-ptr) flags))
value)
(define-struct (working-set-information*
(:conc-name ws-info-))
"Contains extended working set information for a process."
(virtual-address pointer)
(virtual-attributes working-set-block-information*))
(define-struct (ws-watch-information
(:conc-name ws-watch-info-))
"Contains information about a page added to a process working set."
(faulting-pc pointer)
(faulting-va pointer))
(define-struct (ws-watch-information*
(:include ws-watch-information)
(:constructor make-ws-watch-information*
(&key faulting-pc
faulting-va
faulting-thread-id))
(:conc-name ws-watch-info-))
"Contains extended information about a page added to a process working set."
(faulting-thread-id ulong-ptr)
(reserved ulong-ptr))
(define-external-function
("EmptyWorkingSet" (:camel-case))
(:stdcall psapi)
((last-error bool))
"Removes as many pages as possible from the working set of the specified process."
(process handle :optional current-process))
(define-external-function
("EnumDeviceDrivers" (:camel-case))
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumDeviceDrivers"
((:stdcall psapi)
((last-error bool) rv (if (/= needed %needed)
(subseq buffer 0 (floor needed (sizeof '*)))
buffer))
((& (simple-array pointer) :out) buffer
:aux (make-array (floor %needed (sizeof '*))
:element-type 'pointer
:initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux))))
"Retrieves the load address for each device driver in the system."
(%image-base pointer :aux)
(%cb dword :aux)
(%needed (& dword :inout) :aux))
(define-external-function
(#+doors.unicode "GetDeviceDriverFileNameW"
#-doors.unicode "GetDeviceDriverFileNameA"
device-driver-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv
(subseq filename 0 rv))
"Retrieves the path available for the specified device driver."
(image-base pointer)
(filename (& tstring :out) :aux (make-string buffer-length))
(buffer-length dword :optional 256))
#-win2000
(define-external-function
(#+doors.unicode "EnumPageFilesW"
#-doors.unicode "EnumPageFilesA"
enum-page-files)
(:stdcall psapi)
((last-error bool))
"Calls the callback routine for each installed pagefile in the system.
Callback signature: (:stdcall boolean
((context pointer)
(page-file-info (& page-file-information))
(filename (& tstring))))"
(callback pointer)
(context pointer))
(define-external-function
("EnumProcesses" (:camel-case))
(:stdcall psapi)
((last-error bool) rv (floor bytes-returned (sizeof 'dword)))
"Retrieves the process identifier for each process object in the system."
(buffer (& (array dword) :out))
(buffer-size dword :optional (array-total-size buffer))
(bytes-returned (& dword :inout)
:aux (setf buffer-size
(* buffer-size (sizeof 'dword)))))
(define-external-function
("EnumProcessModules" (:camel-case))
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumProcessModules"
((:stdcall psapi)
((last-error bool) rv
(if (/= needed %needed)
(subseq modules 0 (floor needed (sizeof 'pointer)))
modules))
(handle %process :aux process)
((& (simple-array handle) :out)
modules :aux (make-array (floor %needed (sizeof 'pointer))
:element-type 'pointer :initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux))))
"Retrieves a handle for each module in the specified process."
(process handle :optional current-process)
(%modules pointer :aux &0)
(%cb dword :aux 0)
(%needed (& dword :out) :aux))
(define-enum (list-modules-flag
(:conc-name list-modules-)
(:base-type dword))
(:default 0)
(:32bit 1)
(:64bit 2)
(:all 3))
#-(or win2000 winxp winserver2003 winxp64 winhomeserver)
(define-external-function
("EnumProcessModulesEx" enum-process-modules*)
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumProcessModulesEx"
((:stdcall psapi)
((last-error bool) rv (if (< needed %needed)
(subseq buffer (floor needed (sizeof 'pointer)))
buffer))
(handle %process :aux process)
((& (simple-array pointer) :out) buffer
:aux (make-array (floor %needed (sizeof 'pointer))
:element-type 'pointer :initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux)
(list-modules-flag %filter-flag :aux filter-flag))))
"Retrieves a handle for each module in the specified process that meets the specified filter criteria."
(process handle :optional current-process)
(%modules pointer :aux &0)
(%cb dword :aux 0)
(%needed (& dword :out) :aux)
(filter-flag list-modules-flag))
(define-external-function
(#+doors.unicode "GetDeviceDriverBaseNameW"
#-doors.unicode "GetDeviceDriverBaseNameA"
device-driver-base-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the base name of the specified device driver."
(image-base pointer)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-external-function
(#+doors.unicode "GetMappedFileNameW"
#-doors.unicode "GetMappedFileNameA"
mapped-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Returns the name of the memory-mapped file."
(process handle :optional current-process)
(address pointer)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-external-function
(#+doors.unicode "GetModuleBaseNameW"
#-doors.unicode "GetModuleBaseNameA"
module-base-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the base name of the specified module."
(process handle :optional current-process)
(module handle :optional)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-symbol-macro module-base-name (module-base-name))
(define-external-function
(#+doors.unicode "GetModuleFileNameExW"
#-doors.unicode "GetModuleFileNameExA"
module-file-name*)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the fully-qualified path for the file containing the specified module."
(process handle :optional current-process)
(module handle :optional)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-symbol-macro module-file-name* (module-file-name*))
(define-external-function
("GetModuleInformation" module-information)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves information about the specified module."
(process handle :optional current-process)
(module handle :optional)
(info (& module-info :out) :aux)
(cb dword :aux (sizeof 'module-info)))
(define-symbol-macro module-information (module-information))
#-win2000
(define-external-function
("GetPerformanceInfo" performance-info)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves the performance information."
(info (& performance-information :out) :aux)
(cb dword :aux (sizeof 'performance-information)))
#-win2000
(define-symbol-macro performance-info (performance-info))
#-win2000
(define-external-function
(#+doors.unicode "GetProcessImageFileNameW"
#-doors.unicode "GetProcessImageFileNameA"
process-image-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the name of the executable file for the specified process."
(process handle :optional current-process)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
#-win2000
(define-symbol-macro process-image-file-name (process-image-file-name))
(define-external-function
("GetProcessMemoryInfo" process-memory-info)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves information about the memory usage of the specified process."
(process handle :optional current-process)
(info (& (union ()
(mc* process-memory-counters*)
(mc process-memory-counters))
:out)
:optional (make-process-memory-counters))
(cb dword :aux (process-mc-cb info)))
(define-symbol-macro process-memory-info (process-memory-info))
(define-external-function
("GetWsChanges" ws-changes)
(:stdcall psapi)
((last-error bool))
"Retrieves information about the pages that have been added to the working set of the specified process since the last time this function or the initialize-process-for-ws-watch function was called."
(process handle :optional current-process)
(watch-info (& (array ws-watch-information) :out))
(cb dword :optional (* (sizeof 'ws-watch-information)
(array-total-size watch-info))))
#-(or win2000 winxp winserver2003 winxp64 winhomeserver)
(define-external-function
("GetWsChangesEx" ws-changes*)
(:stdcall psapi)
((last-error bool))
"Retrieves information about the pages that have been added to the working set of the specified process since the last time this function or the initialize-process-for-ws-watch function was called."
(process handle :optional current-process)
(watch-info (& (array ws-watch-information*) :out))
(cb dword :optional (* (sizeof 'ws-watch-information*)
(array-total-size watch-info))))
(define-external-function
("InitializeProcessForWsWatch" (:camel-case))
(:stdcall psapi)
((last-error bool))
"Initiates monitoring of the working set of the specified process. "
(process handle :optional current-process))
(define-external-function
("QueryWorkingSet" (:camel-case))
(:stdcall psapi)
((last-error bool) rv (and (> cb (sizeof 'ulong-ptr))
(<= (row-major-aref buffer 0)
(1- (floor cb (sizeof 'ulong-ptr))))))
"Retrieves information about the pages currently added to the working set of the specified process."
(process handle :optional current-process)
(buffer (& (array ulong-ptr) :out))
(cb dword :optional (* (sizeof 'ulong-ptr)
(array-total-size buffer))))
#-(or win2000 winxp winhomeserver)
(define-external-function
("QueryWorkingSetEx" query-working-set*)
(:stdcall psapi)
((last-error bool))
"Retrieves extended information about the pages at specific virtual addresses in the address space of the specified process."
(process handle :optional current-process)
(buffer (& (array working-set-information*) :inout))
(cb dword :optional (* (sizeof 'working-set-information*)
(array-total-size buffer))))
| null | https://raw.githubusercontent.com/Lovesan/doors/12a2fe2fd8d6c42ae314bd6d02a1d2332f12499e/system/psapi.lisp | lisp | -*- Mode: lisp; indent-tabs-mode: nil -*-
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE |
Copyright ( C ) 2010 - 2011 , < >
files ( the " Software " ) , to deal in the Software without
of the Software , and to permit persons to whom the Software is
included in all copies or substantial portions of the Software .
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
(in-package #:doors)
(define-struct (page-file-information
(:constructor make-page-file-info
(&key total-size total-in-use peak-usage))
(:conc-name page-file-info-))
"Contains information about a pagefile."
(cb dword :initform (sizeof 'page-file-information))
(reserved dword)
(total-size size-t)
(total-in-use size-t)
(peak-usage size-t))
(define-struct (module-info
(:conc-name module-))
"Contains the module load address, size, and entry point."
(base-of-dll pointer)
(size-of-image dword)
(entry-point pointer))
(define-struct (performance-information
(:conc-name perf-info-))
"Contains performance information."
(cb dword :initform (sizeof 'performance-information))
(commit-total size-t)
(commit-limit size-t)
(commit-peak size-t)
(physical-total size-t)
(physical-available size-t)
(system-cache size-t)
(kernel-total size-t)
(kernel-paged size-t)
(kernel-nonpaged size-t)
(page-size size-t)
(handle-count dword)
(process-count dword)
(thread-count dword))
(define-struct (process-memory-counters
(:conc-name process-mc-)
(:constructor make-process-memory-counters
(&key page-fault-count peak-working-set-size
working-set-size quota-paged-pool-usage
quota-peak-paged-pool-usage
quota-nonpaged-pool-usage
pagefile-usage
peak-pagefile-usage)))
"Contains the memory statistics for a process."
(cb dword :initform (sizeof 'process-memory-counters))
(page-fault-count dword)
(peak-working-set-size size-t)
(working-set-size size-t)
(quota-peak-paged-pool-usage size-t)
(quota-paged-pool-usage size-t)
(quota-peak-nonpaged-pool-usage size-t)
(quota-nonpaged-pool-usage size-t)
(pagefile-usage size-t)
(peak-pagefile-usage size-t))
(define-struct (process-memory-counters*
(:conc-name process-mc-)
(:include process-memory-counters)
(:constructor make-process-memory-counters*
(&key page-fault-count peak-working-set-size
working-set-size quota-paged-pool-usage
quota-peak-paged-pool-usage
quota-nonpaged-pool-usage
pagefile-usage
peak-pagefile-usage
private-usage
&aux (cb (sizeof 'process-memory-counters*)))))
"Contains extended memory statistics for a process."
(private-usage size-t))
(define-enum (ws-block-protection-flags
(:list t)
(:base-type ulong-ptr)
(:conc-name ws-block-))
(:read #b00001)
(:execute #b00010)
(:read/write #b00100)
(:copy-on-write #b00101)
(:non-cacheable #b01000)
(:guard-page #b10000))
(define-union (working-set-block-information
(:conc-name ws-block-info-)
(:reader %ws-block-info-reader)
(:writer %ws-block-info-writer))
"Contains working set information for a page."
(virtual-page ulong-ptr)
(protection ws-block-protection-flags)
(share-count byte)
(shared-p (boolean ulong-ptr)))
(defun %ws-block-info-reader (pointer out)
(declare (type pointer pointer))
(let* ((out (or out (make-working-set-block-information)))
(flags (deref pointer 'ulong-ptr)))
(declare (type working-set-block-information out)
(type ulong-ptr flags))
(setf (ws-block-info-protection out)
(translate (ldb (byte 5 0) flags)
'ws-block-protection-flags)
(ws-block-info-share-count out)
(ldb (byte 3 5) flags)
(ws-block-info-shared-p out)
(logbitp 8 flags)
(ws-block-info-virtual-page out)
(ash flags -12))
out))
(defun %ws-block-info-writer (value pointer)
(declare (type pointer pointer)
(type working-set-block-information value))
(let ((flags 0))
(declare (type ulong-ptr flags))
(setf (ldb (byte 5 0) flags)
(convert (ws-block-info-protection value)
'ws-block-protection-flags)
(ldb (byte 3 5) flags)
(ws-block-info-share-count value)
(ldb (byte 1 8) flags)
(if (ws-block-info-shared-p value) 1 0)
flags (logior flags (ash (ws-block-info-virtual-page value) 12))
(deref pointer 'ulong-ptr) flags))
value)
(define-union (working-set-block-information*
(:conc-name ws-block-info-)
(:reader %ws-block-info-reader*)
(:writer %ws-block-info-writer*))
"Contains extended working set information for a page."
(node ulong-ptr)
(valid-p (boolean ulong-ptr))
(share-count* ulong-ptr)
(protection* memory-protection-flags)
(shared-p* (boolean ulong-ptr))
(locked-p (boolean ulong-ptr))
(large-page-p (boolean ulong-ptr)))
(defun %ws-block-info-reader* (pointer out)
(declare (type pointer pointer))
(let ((out (or out (make-working-set-block-information*)))
(flags (deref pointer 'ulong-ptr)))
(declare (type working-set-block-information* out)
(type ulong-ptr flags))
(setf (ws-block-info-valid-p out)
(logbitp 0 flags)
(ws-block-info-share-count* out)
(ldb (byte 3 1) flags)
(ws-block-info-shared-p* out)
(logbitp 15 flags)
(ws-block-info-protection* out)
(translate (ldb (byte 11 4) flags)
'memory-protection-flags)
(ws-block-info-node out)
(ldb (byte 6 16) flags)
(ws-block-info-locked-p out)
(logbitp 22 flags)
(ws-block-info-large-page-p out)
(logbitp 23 flags))
out))
(defun %ws-block-info-writer* (value pointer)
(declare (type pointer pointer)
(type working-set-block-information* value))
(let ((flags 0))
(declare (type ulong-ptr flags))
(setf (ldb (byte 1 0) flags)
(if (ws-block-info-valid-p value) 1 0)
(ldb (byte 3 1) flags)
(ws-block-info-share-count* value)
(ldb (byte 11 4) flags)
(convert (ws-block-info-protection* value)
'memory-protection-flags)
(ldb (byte 1 15) flags)
(if (ws-block-info-shared-p* value) 1 0)
(ldb (byte 6 16) flags)
(ws-block-info-node value)
(ldb (byte 1 22) flags)
(if (ws-block-info-locked-p value) 1 0)
(ldb (byte 1 23) flags)
(if (ws-block-info-large-page-p value) 1 0)
(deref pointer 'ulong-ptr) flags))
value)
(define-struct (working-set-information*
(:conc-name ws-info-))
"Contains extended working set information for a process."
(virtual-address pointer)
(virtual-attributes working-set-block-information*))
(define-struct (ws-watch-information
(:conc-name ws-watch-info-))
"Contains information about a page added to a process working set."
(faulting-pc pointer)
(faulting-va pointer))
(define-struct (ws-watch-information*
(:include ws-watch-information)
(:constructor make-ws-watch-information*
(&key faulting-pc
faulting-va
faulting-thread-id))
(:conc-name ws-watch-info-))
"Contains extended information about a page added to a process working set."
(faulting-thread-id ulong-ptr)
(reserved ulong-ptr))
(define-external-function
("EmptyWorkingSet" (:camel-case))
(:stdcall psapi)
((last-error bool))
"Removes as many pages as possible from the working set of the specified process."
(process handle :optional current-process))
(define-external-function
("EnumDeviceDrivers" (:camel-case))
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumDeviceDrivers"
((:stdcall psapi)
((last-error bool) rv (if (/= needed %needed)
(subseq buffer 0 (floor needed (sizeof '*)))
buffer))
((& (simple-array pointer) :out) buffer
:aux (make-array (floor %needed (sizeof '*))
:element-type 'pointer
:initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux))))
"Retrieves the load address for each device driver in the system."
(%image-base pointer :aux)
(%cb dword :aux)
(%needed (& dword :inout) :aux))
(define-external-function
(#+doors.unicode "GetDeviceDriverFileNameW"
#-doors.unicode "GetDeviceDriverFileNameA"
device-driver-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv
(subseq filename 0 rv))
"Retrieves the path available for the specified device driver."
(image-base pointer)
(filename (& tstring :out) :aux (make-string buffer-length))
(buffer-length dword :optional 256))
#-win2000
(define-external-function
(#+doors.unicode "EnumPageFilesW"
#-doors.unicode "EnumPageFilesA"
enum-page-files)
(:stdcall psapi)
((last-error bool))
"Calls the callback routine for each installed pagefile in the system.
Callback signature: (:stdcall boolean
((context pointer)
(page-file-info (& page-file-information))
(filename (& tstring))))"
(callback pointer)
(context pointer))
(define-external-function
("EnumProcesses" (:camel-case))
(:stdcall psapi)
((last-error bool) rv (floor bytes-returned (sizeof 'dword)))
"Retrieves the process identifier for each process object in the system."
(buffer (& (array dword) :out))
(buffer-size dword :optional (array-total-size buffer))
(bytes-returned (& dword :inout)
:aux (setf buffer-size
(* buffer-size (sizeof 'dword)))))
(define-external-function
("EnumProcessModules" (:camel-case))
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumProcessModules"
((:stdcall psapi)
((last-error bool) rv
(if (/= needed %needed)
(subseq modules 0 (floor needed (sizeof 'pointer)))
modules))
(handle %process :aux process)
((& (simple-array handle) :out)
modules :aux (make-array (floor %needed (sizeof 'pointer))
:element-type 'pointer :initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux))))
"Retrieves a handle for each module in the specified process."
(process handle :optional current-process)
(%modules pointer :aux &0)
(%cb dword :aux 0)
(%needed (& dword :out) :aux))
(define-enum (list-modules-flag
(:conc-name list-modules-)
(:base-type dword))
(:default 0)
(:32bit 1)
(:64bit 2)
(:all 3))
#-(or win2000 winxp winserver2003 winxp64 winhomeserver)
(define-external-function
("EnumProcessModulesEx" enum-process-modules*)
(:stdcall psapi)
((last-error bool) rv
(external-function-call "EnumProcessModulesEx"
((:stdcall psapi)
((last-error bool) rv (if (< needed %needed)
(subseq buffer (floor needed (sizeof 'pointer)))
buffer))
(handle %process :aux process)
((& (simple-array pointer) :out) buffer
:aux (make-array (floor %needed (sizeof 'pointer))
:element-type 'pointer :initial-element &0))
(dword cb :aux %needed)
((& dword :out) needed :aux)
(list-modules-flag %filter-flag :aux filter-flag))))
"Retrieves a handle for each module in the specified process that meets the specified filter criteria."
(process handle :optional current-process)
(%modules pointer :aux &0)
(%cb dword :aux 0)
(%needed (& dword :out) :aux)
(filter-flag list-modules-flag))
(define-external-function
(#+doors.unicode "GetDeviceDriverBaseNameW"
#-doors.unicode "GetDeviceDriverBaseNameA"
device-driver-base-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the base name of the specified device driver."
(image-base pointer)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-external-function
(#+doors.unicode "GetMappedFileNameW"
#-doors.unicode "GetMappedFileNameA"
mapped-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Returns the name of the memory-mapped file."
(process handle :optional current-process)
(address pointer)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-external-function
(#+doors.unicode "GetModuleBaseNameW"
#-doors.unicode "GetModuleBaseNameA"
module-base-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the base name of the specified module."
(process handle :optional current-process)
(module handle :optional)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-symbol-macro module-base-name (module-base-name))
(define-external-function
(#+doors.unicode "GetModuleFileNameExW"
#-doors.unicode "GetModuleFileNameExA"
module-file-name*)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the fully-qualified path for the file containing the specified module."
(process handle :optional current-process)
(module handle :optional)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
(define-symbol-macro module-file-name* (module-file-name*))
(define-external-function
("GetModuleInformation" module-information)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves information about the specified module."
(process handle :optional current-process)
(module handle :optional)
(info (& module-info :out) :aux)
(cb dword :aux (sizeof 'module-info)))
(define-symbol-macro module-information (module-information))
#-win2000
(define-external-function
("GetPerformanceInfo" performance-info)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves the performance information."
(info (& performance-information :out) :aux)
(cb dword :aux (sizeof 'performance-information)))
#-win2000
(define-symbol-macro performance-info (performance-info))
#-win2000
(define-external-function
(#+doors.unicode "GetProcessImageFileNameW"
#-doors.unicode "GetProcessImageFileNameA"
process-image-file-name)
(:stdcall psapi)
((last-error dword not-zero) rv (subseq buffer 0 rv))
"Retrieves the name of the executable file for the specified process."
(process handle :optional current-process)
(buffer (& tstring :out) :aux (make-string buffer-size))
(buffer-size dword :optional 256))
#-win2000
(define-symbol-macro process-image-file-name (process-image-file-name))
(define-external-function
("GetProcessMemoryInfo" process-memory-info)
(:stdcall psapi)
((last-error bool) rv info)
"Retrieves information about the memory usage of the specified process."
(process handle :optional current-process)
(info (& (union ()
(mc* process-memory-counters*)
(mc process-memory-counters))
:out)
:optional (make-process-memory-counters))
(cb dword :aux (process-mc-cb info)))
(define-symbol-macro process-memory-info (process-memory-info))
(define-external-function
("GetWsChanges" ws-changes)
(:stdcall psapi)
((last-error bool))
"Retrieves information about the pages that have been added to the working set of the specified process since the last time this function or the initialize-process-for-ws-watch function was called."
(process handle :optional current-process)
(watch-info (& (array ws-watch-information) :out))
(cb dword :optional (* (sizeof 'ws-watch-information)
(array-total-size watch-info))))
#-(or win2000 winxp winserver2003 winxp64 winhomeserver)
(define-external-function
("GetWsChangesEx" ws-changes*)
(:stdcall psapi)
((last-error bool))
"Retrieves information about the pages that have been added to the working set of the specified process since the last time this function or the initialize-process-for-ws-watch function was called."
(process handle :optional current-process)
(watch-info (& (array ws-watch-information*) :out))
(cb dword :optional (* (sizeof 'ws-watch-information*)
(array-total-size watch-info))))
(define-external-function
("InitializeProcessForWsWatch" (:camel-case))
(:stdcall psapi)
((last-error bool))
"Initiates monitoring of the working set of the specified process. "
(process handle :optional current-process))
(define-external-function
("QueryWorkingSet" (:camel-case))
(:stdcall psapi)
((last-error bool) rv (and (> cb (sizeof 'ulong-ptr))
(<= (row-major-aref buffer 0)
(1- (floor cb (sizeof 'ulong-ptr))))))
"Retrieves information about the pages currently added to the working set of the specified process."
(process handle :optional current-process)
(buffer (& (array ulong-ptr) :out))
(cb dword :optional (* (sizeof 'ulong-ptr)
(array-total-size buffer))))
#-(or win2000 winxp winhomeserver)
(define-external-function
("QueryWorkingSetEx" query-working-set*)
(:stdcall psapi)
((last-error bool))
"Retrieves extended information about the pages at specific virtual addresses in the address space of the specified process."
(process handle :optional current-process)
(buffer (& (array working-set-information*) :inout))
(cb dword :optional (* (sizeof 'working-set-information*)
(array-total-size buffer))))
|
7870c7f0c90c796b8ab5cd865efcbba9f55fd7f58c2095907551a08514fdded9 | ruhler/smten | Either.hs |
# LANGUAGE NoImplicitPrelude #
module Smten.Base.Data.Either (
Either(..), either, lefts, rights, partitionEithers,
) where
import GHC.Classes
import GHC.Base
import GHC.Show
data Either a b = Left a | Right b
deriving (Eq, Ord, Show) -- TODO: derive Read too
instance Functor (Either a) where
fmap _ (Left x) = Left x
fmap f (Right y) = Right (f y)
instance Monad (Either e) where
return = Right
Left l >>= _ = Left l
Right r >>= k = k r
either :: (a -> c) -> (b -> c) -> Either a b -> c
either f _ (Left x) = f x
either _ g (Right y) = g y
lefts :: [Either a b] -> [a]
lefts x = [a | Left a <- x]
rights :: [Either a b] -> [b]
rights x = [a | Right a <- x]
partitionEithers :: [Either a b] -> ([a],[b])
partitionEithers = foldr (either left right) ([],[])
where
left a ~(l, r) = (a:l, r)
right a ~(l, r) = (l, a:r)
| null | https://raw.githubusercontent.com/ruhler/smten/16dd37fb0ee3809408803d4be20401211b6c4027/smten-base/Smten/Base/Data/Either.hs | haskell | TODO: derive Read too |
# LANGUAGE NoImplicitPrelude #
module Smten.Base.Data.Either (
Either(..), either, lefts, rights, partitionEithers,
) where
import GHC.Classes
import GHC.Base
import GHC.Show
data Either a b = Left a | Right b
instance Functor (Either a) where
fmap _ (Left x) = Left x
fmap f (Right y) = Right (f y)
instance Monad (Either e) where
return = Right
Left l >>= _ = Left l
Right r >>= k = k r
either :: (a -> c) -> (b -> c) -> Either a b -> c
either f _ (Left x) = f x
either _ g (Right y) = g y
lefts :: [Either a b] -> [a]
lefts x = [a | Left a <- x]
rights :: [Either a b] -> [b]
rights x = [a | Right a <- x]
partitionEithers :: [Either a b] -> ([a],[b])
partitionEithers = foldr (either left right) ([],[])
where
left a ~(l, r) = (a:l, r)
right a ~(l, r) = (l, a:r)
|
2329b033acda5149a9dd59f6317860f3954584d9095ddc0237759344661e0c66 | DrakeAxelrod/mrclean | Spec.hs | -- DEPRECATED
import MrCParser
import Test.QuickCheck
-- |
statements :: [(String, Expr, Bool)]
statements = [
("x := y", Assign (Var "x") (Var "y"), True),
("x -> x", Lambda (Var "x") (Var "x"), True),
("x|y", Application (Var "x") (Var "y"), True),
("v", Var "v", True),
("x := (x -> x)", Assign (Var "x") (Lambda (Var "x") (Var "x")), True),
("y := (f -> (f|f))", Assign (Var "y") (Lambda (Var "f") (Application (Var "f") (Var "f"))), True),
("z := (f -> ((f|f)|f))", Assign (Var "z") (Lambda (Var "f") (Application (Application (Var "f") (Var "f")) (Var "f"))), True),
("1 + 2", Application (Var "2") (Application (Var "1") (Var "+")), True),
("1 + 2 * 3 = 7", Application (Application (Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3")) (Var "=")) (Var "7"), True),
("(1 + 2) * 3 = 9", Application (Application (Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3")) (Var "=")) (Var "9"), True),
("1 + 2 * 3", Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3"), True),
("1 + (2 * 3)", Application (Application (Var "1") (Var "+")) (Application (Application (Var "2") (Var "*")) (Var "3")), True),
("(1 + 2) * 3", Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3"), True),
("x := 1 + 2", Assign (Var "x") (Application (Application (Var "1") (Var "+")) (Var "2")), True),
("x := 1 + 2 | 3", Assign (Var "x") (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "3")), True),
("x := 1 | (y -> y + 1) | 2", Assign (Var "x") (Application (Application (Var "1") (Lambda (Var "y") (Application (Application (Var "y") (Var "+")) (Var "1")))) (Var "2")), True)
]
testParseExpr :: String -> Expr -> Bool
testParseExpr s e = case parseExpr s of
Left _ -> False
Right e' -> e == e'
testAllStatements :: [(String, Expr, Bool)] -> Bool
testAllStatements [] = True
testAllStatements ((s, e, b):xs) = testParseExpr s e == b && testAllStatements xs
prop_allStatements :: Bool
prop_allStatements = testAllStatements statements
main :: IO ()
main = do
-- test allTestStatements
quickCheck prop_allStatements
-- show the one that fails
print $ filter (\(s, e, b) -> not $ testParseExpr s e) statements
| null | https://raw.githubusercontent.com/DrakeAxelrod/mrclean/1fe5d422e9eb88b03f034e792b8ebe97fdbe5fb5/test/Spec.hs | haskell | DEPRECATED
|
test allTestStatements
show the one that fails
| import MrCParser
import Test.QuickCheck
statements :: [(String, Expr, Bool)]
statements = [
("x := y", Assign (Var "x") (Var "y"), True),
("x -> x", Lambda (Var "x") (Var "x"), True),
("x|y", Application (Var "x") (Var "y"), True),
("v", Var "v", True),
("x := (x -> x)", Assign (Var "x") (Lambda (Var "x") (Var "x")), True),
("y := (f -> (f|f))", Assign (Var "y") (Lambda (Var "f") (Application (Var "f") (Var "f"))), True),
("z := (f -> ((f|f)|f))", Assign (Var "z") (Lambda (Var "f") (Application (Application (Var "f") (Var "f")) (Var "f"))), True),
("1 + 2", Application (Var "2") (Application (Var "1") (Var "+")), True),
("1 + 2 * 3 = 7", Application (Application (Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3")) (Var "=")) (Var "7"), True),
("(1 + 2) * 3 = 9", Application (Application (Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3")) (Var "=")) (Var "9"), True),
("1 + 2 * 3", Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3"), True),
("1 + (2 * 3)", Application (Application (Var "1") (Var "+")) (Application (Application (Var "2") (Var "*")) (Var "3")), True),
("(1 + 2) * 3", Application (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "*")) (Var "3"), True),
("x := 1 + 2", Assign (Var "x") (Application (Application (Var "1") (Var "+")) (Var "2")), True),
("x := 1 + 2 | 3", Assign (Var "x") (Application (Application (Application (Var "1") (Var "+")) (Var "2")) (Var "3")), True),
("x := 1 | (y -> y + 1) | 2", Assign (Var "x") (Application (Application (Var "1") (Lambda (Var "y") (Application (Application (Var "y") (Var "+")) (Var "1")))) (Var "2")), True)
]
testParseExpr :: String -> Expr -> Bool
testParseExpr s e = case parseExpr s of
Left _ -> False
Right e' -> e == e'
testAllStatements :: [(String, Expr, Bool)] -> Bool
testAllStatements [] = True
testAllStatements ((s, e, b):xs) = testParseExpr s e == b && testAllStatements xs
prop_allStatements :: Bool
prop_allStatements = testAllStatements statements
main :: IO ()
main = do
quickCheck prop_allStatements
print $ filter (\(s, e, b) -> not $ testParseExpr s e) statements
|
41c5e92ecfa49738d74ce38003a22a910c4c43b9acb65c46942b118b3eb0af9e | brendanhay/amazonka | UpdateConnectClientAddIn.hs | # LANGUAGE DeriveGeneric #
# LANGUAGE DuplicateRecordFields #
# LANGUAGE NamedFieldPuns #
{-# LANGUAGE OverloadedStrings #-}
# LANGUAGE RecordWildCards #
{-# LANGUAGE StrictData #-}
# LANGUAGE TypeFamilies #
# LANGUAGE NoImplicitPrelude #
# OPTIONS_GHC -fno - warn - unused - binds #
# OPTIONS_GHC -fno - warn - unused - imports #
# OPTIONS_GHC -fno - warn - unused - matches #
Derived from AWS service descriptions , licensed under Apache 2.0 .
-- |
Module : Amazonka . WorkSpaces . UpdateConnectClientAddIn
Copyright : ( c ) 2013 - 2023
License : Mozilla Public License , v. 2.0 .
Maintainer : < brendan.g.hay+ >
-- Stability : auto-generated
Portability : non - portable ( GHC extensions )
--
Updates a Amazon Connect client add - in . Use this action to update the
name and endpoint URL of a Amazon Connect client add - in .
module Amazonka.WorkSpaces.UpdateConnectClientAddIn
( -- * Creating a Request
UpdateConnectClientAddIn (..),
newUpdateConnectClientAddIn,
-- * Request Lenses
updateConnectClientAddIn_name,
updateConnectClientAddIn_url,
updateConnectClientAddIn_addInId,
updateConnectClientAddIn_resourceId,
-- * Destructuring the Response
UpdateConnectClientAddInResponse (..),
newUpdateConnectClientAddInResponse,
-- * Response Lenses
updateConnectClientAddInResponse_httpStatus,
)
where
import qualified Amazonka.Core as Core
import qualified Amazonka.Core.Lens.Internal as Lens
import qualified Amazonka.Data as Data
import qualified Amazonka.Prelude as Prelude
import qualified Amazonka.Request as Request
import qualified Amazonka.Response as Response
import Amazonka.WorkSpaces.Types
-- | /See:/ 'newUpdateConnectClientAddIn' smart constructor.
data UpdateConnectClientAddIn = UpdateConnectClientAddIn'
{ -- | The name of the client add-in.
name :: Prelude.Maybe Prelude.Text,
| The endpoint URL of the Amazon Connect client add - in .
url :: Prelude.Maybe Prelude.Text,
-- | The identifier of the client add-in to update.
addInId :: Prelude.Text,
-- | The directory identifier for which the client add-in is configured.
resourceId :: Prelude.Text
}
deriving (Prelude.Eq, Prelude.Read, Prelude.Show, Prelude.Generic)
-- |
-- Create a value of 'UpdateConnectClientAddIn' with all optional fields omitted.
--
Use < -lens generic - lens > or < optics > to modify other optional fields .
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'name', 'updateConnectClientAddIn_name' - The name of the client add-in.
--
' url ' , ' updateConnectClientAddIn_url ' - The endpoint URL of the Amazon Connect client add - in .
--
-- 'addInId', 'updateConnectClientAddIn_addInId' - The identifier of the client add-in to update.
--
-- 'resourceId', 'updateConnectClientAddIn_resourceId' - The directory identifier for which the client add-in is configured.
newUpdateConnectClientAddIn ::
-- | 'addInId'
Prelude.Text ->
-- | 'resourceId'
Prelude.Text ->
UpdateConnectClientAddIn
newUpdateConnectClientAddIn pAddInId_ pResourceId_ =
UpdateConnectClientAddIn'
{ name = Prelude.Nothing,
url = Prelude.Nothing,
addInId = pAddInId_,
resourceId = pResourceId_
}
-- | The name of the client add-in.
updateConnectClientAddIn_name :: Lens.Lens' UpdateConnectClientAddIn (Prelude.Maybe Prelude.Text)
updateConnectClientAddIn_name = Lens.lens (\UpdateConnectClientAddIn' {name} -> name) (\s@UpdateConnectClientAddIn' {} a -> s {name = a} :: UpdateConnectClientAddIn)
| The endpoint URL of the Amazon Connect client add - in .
updateConnectClientAddIn_url :: Lens.Lens' UpdateConnectClientAddIn (Prelude.Maybe Prelude.Text)
updateConnectClientAddIn_url = Lens.lens (\UpdateConnectClientAddIn' {url} -> url) (\s@UpdateConnectClientAddIn' {} a -> s {url = a} :: UpdateConnectClientAddIn)
-- | The identifier of the client add-in to update.
updateConnectClientAddIn_addInId :: Lens.Lens' UpdateConnectClientAddIn Prelude.Text
updateConnectClientAddIn_addInId = Lens.lens (\UpdateConnectClientAddIn' {addInId} -> addInId) (\s@UpdateConnectClientAddIn' {} a -> s {addInId = a} :: UpdateConnectClientAddIn)
-- | The directory identifier for which the client add-in is configured.
updateConnectClientAddIn_resourceId :: Lens.Lens' UpdateConnectClientAddIn Prelude.Text
updateConnectClientAddIn_resourceId = Lens.lens (\UpdateConnectClientAddIn' {resourceId} -> resourceId) (\s@UpdateConnectClientAddIn' {} a -> s {resourceId = a} :: UpdateConnectClientAddIn)
instance Core.AWSRequest UpdateConnectClientAddIn where
type
AWSResponse UpdateConnectClientAddIn =
UpdateConnectClientAddInResponse
request overrides =
Request.postJSON (overrides defaultService)
response =
Response.receiveEmpty
( \s h x ->
UpdateConnectClientAddInResponse'
Prelude.<$> (Prelude.pure (Prelude.fromEnum s))
)
instance Prelude.Hashable UpdateConnectClientAddIn where
hashWithSalt _salt UpdateConnectClientAddIn' {..} =
_salt `Prelude.hashWithSalt` name
`Prelude.hashWithSalt` url
`Prelude.hashWithSalt` addInId
`Prelude.hashWithSalt` resourceId
instance Prelude.NFData UpdateConnectClientAddIn where
rnf UpdateConnectClientAddIn' {..} =
Prelude.rnf name
`Prelude.seq` Prelude.rnf url
`Prelude.seq` Prelude.rnf addInId
`Prelude.seq` Prelude.rnf resourceId
instance Data.ToHeaders UpdateConnectClientAddIn where
toHeaders =
Prelude.const
( Prelude.mconcat
[ "X-Amz-Target"
Data.=# ( "WorkspacesService.UpdateConnectClientAddIn" ::
Prelude.ByteString
),
"Content-Type"
Data.=# ( "application/x-amz-json-1.1" ::
Prelude.ByteString
)
]
)
instance Data.ToJSON UpdateConnectClientAddIn where
toJSON UpdateConnectClientAddIn' {..} =
Data.object
( Prelude.catMaybes
[ ("Name" Data..=) Prelude.<$> name,
("URL" Data..=) Prelude.<$> url,
Prelude.Just ("AddInId" Data..= addInId),
Prelude.Just ("ResourceId" Data..= resourceId)
]
)
instance Data.ToPath UpdateConnectClientAddIn where
toPath = Prelude.const "/"
instance Data.ToQuery UpdateConnectClientAddIn where
toQuery = Prelude.const Prelude.mempty
-- | /See:/ 'newUpdateConnectClientAddInResponse' smart constructor.
data UpdateConnectClientAddInResponse = UpdateConnectClientAddInResponse'
{ -- | The response's http status code.
httpStatus :: Prelude.Int
}
deriving (Prelude.Eq, Prelude.Read, Prelude.Show, Prelude.Generic)
-- |
-- Create a value of 'UpdateConnectClientAddInResponse' with all optional fields omitted.
--
Use < -lens generic - lens > or < optics > to modify other optional fields .
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
' httpStatus ' , ' updateConnectClientAddInResponse_httpStatus ' - The response 's http status code .
newUpdateConnectClientAddInResponse ::
-- | 'httpStatus'
Prelude.Int ->
UpdateConnectClientAddInResponse
newUpdateConnectClientAddInResponse pHttpStatus_ =
UpdateConnectClientAddInResponse'
{ httpStatus =
pHttpStatus_
}
-- | The response's http status code.
updateConnectClientAddInResponse_httpStatus :: Lens.Lens' UpdateConnectClientAddInResponse Prelude.Int
updateConnectClientAddInResponse_httpStatus = Lens.lens (\UpdateConnectClientAddInResponse' {httpStatus} -> httpStatus) (\s@UpdateConnectClientAddInResponse' {} a -> s {httpStatus = a} :: UpdateConnectClientAddInResponse)
instance
Prelude.NFData
UpdateConnectClientAddInResponse
where
rnf UpdateConnectClientAddInResponse' {..} =
Prelude.rnf httpStatus
| null | https://raw.githubusercontent.com/brendanhay/amazonka/09f52b75d2cfdff221b439280d3279d22690d6a6/lib/services/amazonka-workspaces/gen/Amazonka/WorkSpaces/UpdateConnectClientAddIn.hs | haskell | # LANGUAGE OverloadedStrings #
# LANGUAGE StrictData #
|
Stability : auto-generated
* Creating a Request
* Request Lenses
* Destructuring the Response
* Response Lenses
| /See:/ 'newUpdateConnectClientAddIn' smart constructor.
| The name of the client add-in.
| The identifier of the client add-in to update.
| The directory identifier for which the client add-in is configured.
|
Create a value of 'UpdateConnectClientAddIn' with all optional fields omitted.
The following record fields are available, with the corresponding lenses provided
for backwards compatibility:
'name', 'updateConnectClientAddIn_name' - The name of the client add-in.
'addInId', 'updateConnectClientAddIn_addInId' - The identifier of the client add-in to update.
'resourceId', 'updateConnectClientAddIn_resourceId' - The directory identifier for which the client add-in is configured.
| 'addInId'
| 'resourceId'
| The name of the client add-in.
| The identifier of the client add-in to update.
| The directory identifier for which the client add-in is configured.
| /See:/ 'newUpdateConnectClientAddInResponse' smart constructor.
| The response's http status code.
|
Create a value of 'UpdateConnectClientAddInResponse' with all optional fields omitted.
The following record fields are available, with the corresponding lenses provided
for backwards compatibility:
| 'httpStatus'
| The response's http status code. | # LANGUAGE DeriveGeneric #
# LANGUAGE DuplicateRecordFields #
# LANGUAGE NamedFieldPuns #
# LANGUAGE RecordWildCards #
# LANGUAGE TypeFamilies #
# LANGUAGE NoImplicitPrelude #
# OPTIONS_GHC -fno - warn - unused - binds #
# OPTIONS_GHC -fno - warn - unused - imports #
# OPTIONS_GHC -fno - warn - unused - matches #
Derived from AWS service descriptions , licensed under Apache 2.0 .
Module : Amazonka . WorkSpaces . UpdateConnectClientAddIn
Copyright : ( c ) 2013 - 2023
License : Mozilla Public License , v. 2.0 .
Maintainer : < brendan.g.hay+ >
Portability : non - portable ( GHC extensions )
Updates a Amazon Connect client add - in . Use this action to update the
name and endpoint URL of a Amazon Connect client add - in .
module Amazonka.WorkSpaces.UpdateConnectClientAddIn
UpdateConnectClientAddIn (..),
newUpdateConnectClientAddIn,
updateConnectClientAddIn_name,
updateConnectClientAddIn_url,
updateConnectClientAddIn_addInId,
updateConnectClientAddIn_resourceId,
UpdateConnectClientAddInResponse (..),
newUpdateConnectClientAddInResponse,
updateConnectClientAddInResponse_httpStatus,
)
where
import qualified Amazonka.Core as Core
import qualified Amazonka.Core.Lens.Internal as Lens
import qualified Amazonka.Data as Data
import qualified Amazonka.Prelude as Prelude
import qualified Amazonka.Request as Request
import qualified Amazonka.Response as Response
import Amazonka.WorkSpaces.Types
data UpdateConnectClientAddIn = UpdateConnectClientAddIn'
name :: Prelude.Maybe Prelude.Text,
| The endpoint URL of the Amazon Connect client add - in .
url :: Prelude.Maybe Prelude.Text,
addInId :: Prelude.Text,
resourceId :: Prelude.Text
}
deriving (Prelude.Eq, Prelude.Read, Prelude.Show, Prelude.Generic)
Use < -lens generic - lens > or < optics > to modify other optional fields .
' url ' , ' updateConnectClientAddIn_url ' - The endpoint URL of the Amazon Connect client add - in .
newUpdateConnectClientAddIn ::
Prelude.Text ->
Prelude.Text ->
UpdateConnectClientAddIn
newUpdateConnectClientAddIn pAddInId_ pResourceId_ =
UpdateConnectClientAddIn'
{ name = Prelude.Nothing,
url = Prelude.Nothing,
addInId = pAddInId_,
resourceId = pResourceId_
}
updateConnectClientAddIn_name :: Lens.Lens' UpdateConnectClientAddIn (Prelude.Maybe Prelude.Text)
updateConnectClientAddIn_name = Lens.lens (\UpdateConnectClientAddIn' {name} -> name) (\s@UpdateConnectClientAddIn' {} a -> s {name = a} :: UpdateConnectClientAddIn)
| The endpoint URL of the Amazon Connect client add - in .
updateConnectClientAddIn_url :: Lens.Lens' UpdateConnectClientAddIn (Prelude.Maybe Prelude.Text)
updateConnectClientAddIn_url = Lens.lens (\UpdateConnectClientAddIn' {url} -> url) (\s@UpdateConnectClientAddIn' {} a -> s {url = a} :: UpdateConnectClientAddIn)
updateConnectClientAddIn_addInId :: Lens.Lens' UpdateConnectClientAddIn Prelude.Text
updateConnectClientAddIn_addInId = Lens.lens (\UpdateConnectClientAddIn' {addInId} -> addInId) (\s@UpdateConnectClientAddIn' {} a -> s {addInId = a} :: UpdateConnectClientAddIn)
updateConnectClientAddIn_resourceId :: Lens.Lens' UpdateConnectClientAddIn Prelude.Text
updateConnectClientAddIn_resourceId = Lens.lens (\UpdateConnectClientAddIn' {resourceId} -> resourceId) (\s@UpdateConnectClientAddIn' {} a -> s {resourceId = a} :: UpdateConnectClientAddIn)
instance Core.AWSRequest UpdateConnectClientAddIn where
type
AWSResponse UpdateConnectClientAddIn =
UpdateConnectClientAddInResponse
request overrides =
Request.postJSON (overrides defaultService)
response =
Response.receiveEmpty
( \s h x ->
UpdateConnectClientAddInResponse'
Prelude.<$> (Prelude.pure (Prelude.fromEnum s))
)
instance Prelude.Hashable UpdateConnectClientAddIn where
hashWithSalt _salt UpdateConnectClientAddIn' {..} =
_salt `Prelude.hashWithSalt` name
`Prelude.hashWithSalt` url
`Prelude.hashWithSalt` addInId
`Prelude.hashWithSalt` resourceId
instance Prelude.NFData UpdateConnectClientAddIn where
rnf UpdateConnectClientAddIn' {..} =
Prelude.rnf name
`Prelude.seq` Prelude.rnf url
`Prelude.seq` Prelude.rnf addInId
`Prelude.seq` Prelude.rnf resourceId
instance Data.ToHeaders UpdateConnectClientAddIn where
toHeaders =
Prelude.const
( Prelude.mconcat
[ "X-Amz-Target"
Data.=# ( "WorkspacesService.UpdateConnectClientAddIn" ::
Prelude.ByteString
),
"Content-Type"
Data.=# ( "application/x-amz-json-1.1" ::
Prelude.ByteString
)
]
)
instance Data.ToJSON UpdateConnectClientAddIn where
toJSON UpdateConnectClientAddIn' {..} =
Data.object
( Prelude.catMaybes
[ ("Name" Data..=) Prelude.<$> name,
("URL" Data..=) Prelude.<$> url,
Prelude.Just ("AddInId" Data..= addInId),
Prelude.Just ("ResourceId" Data..= resourceId)
]
)
instance Data.ToPath UpdateConnectClientAddIn where
toPath = Prelude.const "/"
instance Data.ToQuery UpdateConnectClientAddIn where
toQuery = Prelude.const Prelude.mempty
data UpdateConnectClientAddInResponse = UpdateConnectClientAddInResponse'
httpStatus :: Prelude.Int
}
deriving (Prelude.Eq, Prelude.Read, Prelude.Show, Prelude.Generic)
Use < -lens generic - lens > or < optics > to modify other optional fields .
' httpStatus ' , ' updateConnectClientAddInResponse_httpStatus ' - The response 's http status code .
newUpdateConnectClientAddInResponse ::
Prelude.Int ->
UpdateConnectClientAddInResponse
newUpdateConnectClientAddInResponse pHttpStatus_ =
UpdateConnectClientAddInResponse'
{ httpStatus =
pHttpStatus_
}
updateConnectClientAddInResponse_httpStatus :: Lens.Lens' UpdateConnectClientAddInResponse Prelude.Int
updateConnectClientAddInResponse_httpStatus = Lens.lens (\UpdateConnectClientAddInResponse' {httpStatus} -> httpStatus) (\s@UpdateConnectClientAddInResponse' {} a -> s {httpStatus = a} :: UpdateConnectClientAddInResponse)
instance
Prelude.NFData
UpdateConnectClientAddInResponse
where
rnf UpdateConnectClientAddInResponse' {..} =
Prelude.rnf httpStatus
|
e94952f8d863daf568e9910a413933c1fdb5ebc82f1b7f5e6e66e857563087eb | hstreamdb/hstream | ValidateSpec.hs | # LANGUAGE LambdaCase #
# LANGUAGE OverloadedLists #
{-# LANGUAGE OverloadedStrings #-}
module HStream.SQL.ValidateSpec where
import qualified Data.Aeson as A
import Data.Either (isLeft, isRight)
import Data.Function
import Data.Functor
import qualified Data.Text as T
import qualified Data.Vector as V
import HStream.SQL.Abs
import HStream.SQL.Internal.Validate
import Test.Hspec
mapi = h 0 where
h i f = \case
[] -> []
a : l -> let r = f i a in r : h (i + 1) f l
spec :: Spec
spec = describe "Validate Basic Data Types" $ do
let mkNothing :: BNFC'Position
mkNothing = Nothing :: BNFC'Position
let setH = ExprSetFunc mkNothing (SetFuncCountAll mkNothing)
let xsVal = [ExprInt mkNothing $ PInteger mkNothing 42, ExprBool mkNothing $ BoolTrue mkNothing, ExprArr mkNothing [ExprInt mkNothing $ PInteger mkNothing 42, ExprBool mkNothing $ BoolTrue mkNothing]]
it "PNInteger" $ do
validate (PInteger mkNothing 807) `shouldSatisfy` isRight
validate (NInteger mkNothing 36) `shouldSatisfy` isRight
validate (IPInteger mkNothing 16) `shouldSatisfy` isRight
it "PNDouble" $ do
validate (PDouble mkNothing 0.807) `shouldSatisfy` isRight
validate (IPDouble mkNothing 20.05) `shouldSatisfy` isRight
validate (NDouble mkNothing 15.00) `shouldSatisfy` isRight
it "SString" $ do
validate (SString "netural term") `shouldSatisfy` isRight
it "RawColumn" $ do
validate (RawColumn "Kaze no Yukue") `shouldSatisfy` isRight
it "Boolean" $ do
validate (BoolTrue mkNothing) `shouldSatisfy` isRight
validate (BoolFalse mkNothing) `shouldSatisfy` isRight
it "date" $ do
validate (DDate mkNothing
(IPInteger Nothing 2021) (IPInteger Nothing 02) (IPInteger Nothing 29))
`shouldSatisfy` isLeft
validate (DDate mkNothing
(IPInteger Nothing 2020) (IPInteger Nothing 02) (IPInteger Nothing 29))
`shouldSatisfy` isRight
validate (DDate mkNothing
(IPInteger Nothing 2005) (IPInteger Nothing 13) (IPInteger Nothing 29))
`shouldSatisfy` isLeft
it "time" $ do
validate (DTime mkNothing
(IPInteger Nothing 14) (IPInteger Nothing 61) (IPInteger Nothing 59))
`shouldSatisfy` isLeft
validate (DTime mkNothing
(IPInteger Nothing 14) (IPInteger Nothing 16) (IPInteger Nothing 59))
`shouldSatisfy` isRight
it "Interval" $ do
validate (DInterval mkNothing (IPInteger mkNothing 13) (TimeUnitYear mkNothing))
`shouldSatisfy` isRight
validate (DInterval mkNothing (NInteger mkNothing (-1)) (TimeUnitYear mkNothing))
`shouldSatisfy` isRight
it "ColName" $ do
validate (ColNameSimple mkNothing (Ident "col")) `shouldSatisfy` isRight
validate (ColNameStream mkNothing (Ident "stream") (Ident "col"))
`shouldSatisfy` isRight
it "Aggregate function Ok" $ do
validate (SetFuncCountAll mkNothing) `shouldSatisfy` isRight
validate (SetFuncCount mkNothing (ExprBool mkNothing $ BoolTrue mkNothing))
`shouldSatisfy` isRight
validate (SetFuncAvg mkNothing (ExprInt mkNothing $ PInteger mkNothing 42))
`shouldSatisfy` isRight
validate (SetFuncSum mkNothing (ExprInt mkNothing $ PInteger mkNothing 42))
`shouldSatisfy` isRight
validate (SetFuncMax mkNothing (ExprString mkNothing "g free"))
`shouldSatisfy` isRight
validate (SetFuncMin mkNothing (ExprInt mkNothing $ NInteger mkNothing 40))
`shouldSatisfy` isRight
it "Aggregate function Err" $ do
validate (SetFuncCount mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncAvg mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncMax mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncMin mkNothing setH) `shouldSatisfy` isLeft
it "array const" $ do
validate (ExprArr mkNothing []) `shouldSatisfy` isRight
validate (ExprArr mkNothing xsVal)
`shouldSatisfy` isRight
it "map const" $ do
validate (ExprMap mkNothing []) `shouldSatisfy` isRight
validate
(ExprMap mkNothing $ DLabelledValueExpr mkNothing "foo" <$> xsVal)
`shouldSatisfy` isLeft
validate
(ExprMap mkNothing $ mapi (\i -> DLabelledValueExpr mkNothing (Ident $ "foo" <> T.pack (show i))) xsVal)
`shouldSatisfy` isRight
it "sel" $ do
validate (SelListSublist mkNothing (DerivedColSimpl mkNothing <$> xsVal))
`shouldSatisfy` isRight
validate (SelListSublist mkNothing ((\x -> DerivedColAs mkNothing x (Ident "comm")) <$> xsVal))
`shouldSatisfy` isLeft
validate (SelListSublist mkNothing (mapi (\i x -> DerivedColAs mkNothing x (Ident $ "comm" <> T.pack (show i))) $ xsVal))
`shouldSatisfy` isRight
| null | https://raw.githubusercontent.com/hstreamdb/hstream/1e62ffdf8e51ade146df43ac18c163948b90cc1b/hstream-sql/test/HStream/SQL/ValidateSpec.hs | haskell | # LANGUAGE OverloadedStrings # | # LANGUAGE LambdaCase #
# LANGUAGE OverloadedLists #
module HStream.SQL.ValidateSpec where
import qualified Data.Aeson as A
import Data.Either (isLeft, isRight)
import Data.Function
import Data.Functor
import qualified Data.Text as T
import qualified Data.Vector as V
import HStream.SQL.Abs
import HStream.SQL.Internal.Validate
import Test.Hspec
mapi = h 0 where
h i f = \case
[] -> []
a : l -> let r = f i a in r : h (i + 1) f l
spec :: Spec
spec = describe "Validate Basic Data Types" $ do
let mkNothing :: BNFC'Position
mkNothing = Nothing :: BNFC'Position
let setH = ExprSetFunc mkNothing (SetFuncCountAll mkNothing)
let xsVal = [ExprInt mkNothing $ PInteger mkNothing 42, ExprBool mkNothing $ BoolTrue mkNothing, ExprArr mkNothing [ExprInt mkNothing $ PInteger mkNothing 42, ExprBool mkNothing $ BoolTrue mkNothing]]
it "PNInteger" $ do
validate (PInteger mkNothing 807) `shouldSatisfy` isRight
validate (NInteger mkNothing 36) `shouldSatisfy` isRight
validate (IPInteger mkNothing 16) `shouldSatisfy` isRight
it "PNDouble" $ do
validate (PDouble mkNothing 0.807) `shouldSatisfy` isRight
validate (IPDouble mkNothing 20.05) `shouldSatisfy` isRight
validate (NDouble mkNothing 15.00) `shouldSatisfy` isRight
it "SString" $ do
validate (SString "netural term") `shouldSatisfy` isRight
it "RawColumn" $ do
validate (RawColumn "Kaze no Yukue") `shouldSatisfy` isRight
it "Boolean" $ do
validate (BoolTrue mkNothing) `shouldSatisfy` isRight
validate (BoolFalse mkNothing) `shouldSatisfy` isRight
it "date" $ do
validate (DDate mkNothing
(IPInteger Nothing 2021) (IPInteger Nothing 02) (IPInteger Nothing 29))
`shouldSatisfy` isLeft
validate (DDate mkNothing
(IPInteger Nothing 2020) (IPInteger Nothing 02) (IPInteger Nothing 29))
`shouldSatisfy` isRight
validate (DDate mkNothing
(IPInteger Nothing 2005) (IPInteger Nothing 13) (IPInteger Nothing 29))
`shouldSatisfy` isLeft
it "time" $ do
validate (DTime mkNothing
(IPInteger Nothing 14) (IPInteger Nothing 61) (IPInteger Nothing 59))
`shouldSatisfy` isLeft
validate (DTime mkNothing
(IPInteger Nothing 14) (IPInteger Nothing 16) (IPInteger Nothing 59))
`shouldSatisfy` isRight
it "Interval" $ do
validate (DInterval mkNothing (IPInteger mkNothing 13) (TimeUnitYear mkNothing))
`shouldSatisfy` isRight
validate (DInterval mkNothing (NInteger mkNothing (-1)) (TimeUnitYear mkNothing))
`shouldSatisfy` isRight
it "ColName" $ do
validate (ColNameSimple mkNothing (Ident "col")) `shouldSatisfy` isRight
validate (ColNameStream mkNothing (Ident "stream") (Ident "col"))
`shouldSatisfy` isRight
it "Aggregate function Ok" $ do
validate (SetFuncCountAll mkNothing) `shouldSatisfy` isRight
validate (SetFuncCount mkNothing (ExprBool mkNothing $ BoolTrue mkNothing))
`shouldSatisfy` isRight
validate (SetFuncAvg mkNothing (ExprInt mkNothing $ PInteger mkNothing 42))
`shouldSatisfy` isRight
validate (SetFuncSum mkNothing (ExprInt mkNothing $ PInteger mkNothing 42))
`shouldSatisfy` isRight
validate (SetFuncMax mkNothing (ExprString mkNothing "g free"))
`shouldSatisfy` isRight
validate (SetFuncMin mkNothing (ExprInt mkNothing $ NInteger mkNothing 40))
`shouldSatisfy` isRight
it "Aggregate function Err" $ do
validate (SetFuncCount mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncAvg mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncMax mkNothing setH) `shouldSatisfy` isLeft
validate (SetFuncMin mkNothing setH) `shouldSatisfy` isLeft
it "array const" $ do
validate (ExprArr mkNothing []) `shouldSatisfy` isRight
validate (ExprArr mkNothing xsVal)
`shouldSatisfy` isRight
it "map const" $ do
validate (ExprMap mkNothing []) `shouldSatisfy` isRight
validate
(ExprMap mkNothing $ DLabelledValueExpr mkNothing "foo" <$> xsVal)
`shouldSatisfy` isLeft
validate
(ExprMap mkNothing $ mapi (\i -> DLabelledValueExpr mkNothing (Ident $ "foo" <> T.pack (show i))) xsVal)
`shouldSatisfy` isRight
it "sel" $ do
validate (SelListSublist mkNothing (DerivedColSimpl mkNothing <$> xsVal))
`shouldSatisfy` isRight
validate (SelListSublist mkNothing ((\x -> DerivedColAs mkNothing x (Ident "comm")) <$> xsVal))
`shouldSatisfy` isLeft
validate (SelListSublist mkNothing (mapi (\i x -> DerivedColAs mkNothing x (Ident $ "comm" <> T.pack (show i))) $ xsVal))
`shouldSatisfy` isRight
|
08dca2a503f886b9826ef2eaba5d5771e59f8a106010df0250ba5b7bdbfc6707 | tezos/tezos-mirror | baking_actions.mli | (*****************************************************************************)
(* *)
(* Open Source License *)
Copyright ( c ) 2021 Nomadic Labs < >
(* *)
(* Permission is hereby granted, free of charge, to any person obtaining a *)
(* copy of this software and associated documentation files (the "Software"),*)
to deal in the Software without restriction , including without limitation
(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *)
and/or sell copies of the Software , and to permit persons to whom the
(* Software is furnished to do so, subject to the following conditions: *)
(* *)
(* The above copyright notice and this permission notice shall be included *)
(* in all copies or substantial portions of the Software. *)
(* *)
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *)
(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *)
(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*)
LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *)
(* DEALINGS IN THE SOFTWARE. *)
(* *)
(*****************************************************************************)
open Protocol
open Alpha_context
open Baking_state
type block_kind =
| Fresh of Operation_pool.pool
| Reproposal of {
consensus_operations : packed_operation list;
payload_hash : Block_payload_hash.t;
payload_round : Round.t;
payload : Operation_pool.payload;
}
type block_to_bake = {
predecessor : block_info;
round : Round.t;
delegate : consensus_key_and_delegate;
kind : block_kind;
force_apply : bool;
(** if true, while baking the block, try and apply the block and its
operations instead of only validating them. this can be permanently
set using the [--force-apply] flag (see [force_apply_switch_arg] in
[baking_commands.ml]). *)
}
type action =
| Do_nothing
| Inject_block of {block_to_bake : block_to_bake; updated_state : state}
| Inject_preendorsements of {
preendorsements : (consensus_key_and_delegate * consensus_content) list;
}
| Reinject_preendorsements of {preendorsements : packed_operation list}
| Inject_endorsements of {
endorsements : (consensus_key_and_delegate * consensus_content) list;
}
| Update_to_level of level_update
| Synchronize_round of round_update
| Watch_proposal
and level_update = {
new_level_proposal : proposal;
compute_new_state :
current_round:Round.t ->
delegate_slots:delegate_slots ->
next_level_delegate_slots:delegate_slots ->
(state * action) Lwt.t;
}
and round_update = {
new_round_proposal : proposal;
handle_proposal : state -> (state * action) Lwt.t;
}
type t = action
val generate_seed_nonce_hash :
Baking_configuration.nonce_config ->
consensus_key ->
Level.t ->
(Nonce_hash.t * Nonce.t) option tzresult Lwt.t
val inject_block :
state_recorder:(new_state:state -> unit tzresult Lwt.t) ->
state ->
block_to_bake ->
updated_state:state ->
state tzresult Lwt.t
val inject_preendorsements :
state ->
preendorsements:(consensus_key_and_delegate * consensus_content) list ->
state tzresult Lwt.t
val sign_endorsements :
state ->
(consensus_key_and_delegate * consensus_content) list ->
(consensus_key_and_delegate * packed_operation) list tzresult Lwt.t
val inject_endorsements :
state ->
endorsements:(consensus_key_and_delegate * consensus_content) list ->
unit tzresult Lwt.t
val sign_dal_attestations :
state ->
(consensus_key_and_delegate * Dal.Attestation.operation) list ->
(consensus_key_and_delegate * packed_operation * Dal.Attestation.t) list
tzresult
Lwt.t
val get_dal_attestations :
state ->
level:Int32.t ->
(consensus_key_and_delegate * Dal.Attestation.operation) list tzresult Lwt.t
val prepare_waiting_for_quorum :
state -> int * (slot:Slot.t -> int) * Operation_worker.candidate
val start_waiting_for_preendorsement_quorum : state -> unit Lwt.t
val start_waiting_for_endorsement_quorum : state -> unit Lwt.t
val update_to_level : state -> level_update -> (state * t) tzresult Lwt.t
val pp_action : Format.formatter -> t -> unit
val compute_round : proposal -> Round.round_durations -> Round.t tzresult
val perform_action :
state_recorder:(new_state:state -> unit tzresult Lwt.t) ->
state ->
t ->
state tzresult Lwt.t
| null | https://raw.githubusercontent.com/tezos/tezos-mirror/f0fac81ca8d49f180e663316b2566780ddc1517e/src/proto_alpha/lib_delegate/baking_actions.mli | ocaml | ***************************************************************************
Open Source License
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
the rights to use, copy, modify, merge, publish, distribute, sublicense,
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
***************************************************************************
* if true, while baking the block, try and apply the block and its
operations instead of only validating them. this can be permanently
set using the [--force-apply] flag (see [force_apply_switch_arg] in
[baking_commands.ml]). | Copyright ( c ) 2021 Nomadic Labs < >
to deal in the Software without restriction , including without limitation
and/or sell copies of the Software , and to permit persons to whom the
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
open Protocol
open Alpha_context
open Baking_state
type block_kind =
| Fresh of Operation_pool.pool
| Reproposal of {
consensus_operations : packed_operation list;
payload_hash : Block_payload_hash.t;
payload_round : Round.t;
payload : Operation_pool.payload;
}
type block_to_bake = {
predecessor : block_info;
round : Round.t;
delegate : consensus_key_and_delegate;
kind : block_kind;
force_apply : bool;
}
type action =
| Do_nothing
| Inject_block of {block_to_bake : block_to_bake; updated_state : state}
| Inject_preendorsements of {
preendorsements : (consensus_key_and_delegate * consensus_content) list;
}
| Reinject_preendorsements of {preendorsements : packed_operation list}
| Inject_endorsements of {
endorsements : (consensus_key_and_delegate * consensus_content) list;
}
| Update_to_level of level_update
| Synchronize_round of round_update
| Watch_proposal
and level_update = {
new_level_proposal : proposal;
compute_new_state :
current_round:Round.t ->
delegate_slots:delegate_slots ->
next_level_delegate_slots:delegate_slots ->
(state * action) Lwt.t;
}
and round_update = {
new_round_proposal : proposal;
handle_proposal : state -> (state * action) Lwt.t;
}
type t = action
val generate_seed_nonce_hash :
Baking_configuration.nonce_config ->
consensus_key ->
Level.t ->
(Nonce_hash.t * Nonce.t) option tzresult Lwt.t
val inject_block :
state_recorder:(new_state:state -> unit tzresult Lwt.t) ->
state ->
block_to_bake ->
updated_state:state ->
state tzresult Lwt.t
val inject_preendorsements :
state ->
preendorsements:(consensus_key_and_delegate * consensus_content) list ->
state tzresult Lwt.t
val sign_endorsements :
state ->
(consensus_key_and_delegate * consensus_content) list ->
(consensus_key_and_delegate * packed_operation) list tzresult Lwt.t
val inject_endorsements :
state ->
endorsements:(consensus_key_and_delegate * consensus_content) list ->
unit tzresult Lwt.t
val sign_dal_attestations :
state ->
(consensus_key_and_delegate * Dal.Attestation.operation) list ->
(consensus_key_and_delegate * packed_operation * Dal.Attestation.t) list
tzresult
Lwt.t
val get_dal_attestations :
state ->
level:Int32.t ->
(consensus_key_and_delegate * Dal.Attestation.operation) list tzresult Lwt.t
val prepare_waiting_for_quorum :
state -> int * (slot:Slot.t -> int) * Operation_worker.candidate
val start_waiting_for_preendorsement_quorum : state -> unit Lwt.t
val start_waiting_for_endorsement_quorum : state -> unit Lwt.t
val update_to_level : state -> level_update -> (state * t) tzresult Lwt.t
val pp_action : Format.formatter -> t -> unit
val compute_round : proposal -> Round.round_durations -> Round.t tzresult
val perform_action :
state_recorder:(new_state:state -> unit tzresult Lwt.t) ->
state ->
t ->
state tzresult Lwt.t
|
cc30ecfa1eb2902e9a7a3ca9d94123d56bd1cf73e382635527defeee772bd45f | ianthehenry/basilica | Routes.hs | module Routes
( Request(..)
, Response(..)
) where
import ClassyPrelude
import Types
type Name = Text
data Request = GetPost ID
| ListPosts PostQuery
| CreatePost (Maybe ID) Token Text
| CreateCode EmailAddress
| CreateToken Code
| CreateUser EmailAddress Name
data Response = NewPost ResolvedPost
| ExistingPost ResolvedPost
| PostList [ResolvedPost]
| NewToken ResolvedToken
| NewUser ResolvedCode
| NewCode ResolvedCode
| BadToken
| BadCode
| UnknownEmail
| InvalidUsername
| ExistingNameOrEmail
| BadRequest LText
| PostNotFound ID
| null | https://raw.githubusercontent.com/ianthehenry/basilica/da80accd601efa0d90187afee90fe6e77cddbd76/Routes.hs | haskell | module Routes
( Request(..)
, Response(..)
) where
import ClassyPrelude
import Types
type Name = Text
data Request = GetPost ID
| ListPosts PostQuery
| CreatePost (Maybe ID) Token Text
| CreateCode EmailAddress
| CreateToken Code
| CreateUser EmailAddress Name
data Response = NewPost ResolvedPost
| ExistingPost ResolvedPost
| PostList [ResolvedPost]
| NewToken ResolvedToken
| NewUser ResolvedCode
| NewCode ResolvedCode
| BadToken
| BadCode
| UnknownEmail
| InvalidUsername
| ExistingNameOrEmail
| BadRequest LText
| PostNotFound ID
| |
ef7fc4a0570b0f30d40292c4a6934fbd8ab6843aa677dd5367d490f7a8480e26 | dcavar/schemeNLP | chartrigram.scm | ":"; exec mzscheme -r $0 "$@"
;;; ----------------------------------------------------
;;; Filename: chartrigrams.ss
Author : < >
;;;
( C ) 2006 by
;;;
;;; This code is published under the restrictive GPL!
;;; Please find the text of the GPL here:
;;;
;;;
;;; It is free for use, change, etc. as long as the copyright
;;; note above is included in any modified version of the code.
;;;
;;; This script assumes that the text is raw and encoded in UTF8.
;;;
;;; Functions:
1 . The text file is loaded into memory .
2 . Trigrams of characters are created from the corpus .
3 . The hash - table is converted into a list of key - value tuples .
4 . The key - values are sorted by value , and a list of tokens
;;; and their relative frequency is printed out.
;;;
If the command line parameters contain more than one text file ,
;;; the above results are accumulated over all the input text files.
;;;
;;; Usage:
;;; mzscheme -r chartrigrams.ss test1.txt test2.txt ...
;;; ----------------------------------------------------
;;; all required libraries and functions
(require (lib "vector-lib.ss" "srfi" "43")) ; for vector-for-each
(require (lib "list.ss")) ; for sort
;;; Global variables
(define trigramcount 0.0) ; counter of total number tokens
(define trigrams (make-hash-table 'equal)) ; hash-table for tokens and counts
;;; sort-by-value
;;; <- hash-table
;;; -> list of key-value tuples (lists)
;;; ----------------------------------------------------
;;; Sort a hash-table of key-value pairs by value, by converting it
;;; into a list of key-value tuples and sorting on the value.
(define sort-by-value
(lambda (table)
(let ([keyval (hash-table-map table (lambda (key val) (list key val)))])
(sort keyval (lambda (a b)
(< (cadr a) (cadr b)))))))
;;; add-words
;;; <- list of characters, i.e. string
;;; !-> updated hash-table trigrams
;;; !-> updated trigramcount counter
;;; ----------------------------------------------------
;;; Add words/tokens from an ordered list of tokens to the hash-table
;;; container and keep track of their count.
(define add-trigrams
(lambda (text)
(let ([max (- (string-length text) 2)])
(set! trigramcount (+ trigramcount max)) ; increment the total number of tokens
(let loop ([i 0])
(let* ([token (substring text i (+ i 3))]
[value (hash-table-get trigrams token 0.0)])
(hash-table-put! trigrams token (+ value 1)))
(if (< i (- max 1))
(loop (+ i 1)))))))
;;; load-file
;;; <- string filename
;;; -> string file content
;;; ----------------------------------------------------
;;; Load text from file into a string variable and return it.
(define load-file
(lambda (name)
(call-with-input-file name
(lambda (p)
(read-string (file-size name) p)))))
;;; ----------------------------------------------------
;;; main steps
(begin
(vector-for-each (lambda (i fname)
(printf "Loading file: ~a\n" fname)
(add-trigrams (load-file fname)))
argv)
(printf "Number of tokens: ~a\n" trigramcount)
(printf "Number of types: ~a\n" (hash-table-count trigrams))
(printf "Type/Token ratio: ~a\n" (/ (hash-table-count trigrams) trigramcount))
(let ([result (sort-by-value trigrams)])
(printf "---------------------------------------------------------\n")
(printf "Sorted decreasing with relative frequency:\n")
(printf "token\tabsolute frequency\trelative frequency\n")
(for-each (lambda (a)
(write (car a))
(printf "\t~a\t~a\n" (cadr a) (/ (cadr a) trigramcount)))
(reverse result))))
| null | https://raw.githubusercontent.com/dcavar/schemeNLP/daa0ddcc4fa67fe00dcf6054c4d30d11a00b2f7f/src/chartrigram.scm | scheme | exec mzscheme -r $0 "$@"
----------------------------------------------------
Filename: chartrigrams.ss
This code is published under the restrictive GPL!
Please find the text of the GPL here:
It is free for use, change, etc. as long as the copyright
note above is included in any modified version of the code.
This script assumes that the text is raw and encoded in UTF8.
Functions:
and their relative frequency is printed out.
the above results are accumulated over all the input text files.
Usage:
mzscheme -r chartrigrams.ss test1.txt test2.txt ...
----------------------------------------------------
all required libraries and functions
for vector-for-each
for sort
Global variables
counter of total number tokens
hash-table for tokens and counts
sort-by-value
<- hash-table
-> list of key-value tuples (lists)
----------------------------------------------------
Sort a hash-table of key-value pairs by value, by converting it
into a list of key-value tuples and sorting on the value.
add-words
<- list of characters, i.e. string
!-> updated hash-table trigrams
!-> updated trigramcount counter
----------------------------------------------------
Add words/tokens from an ordered list of tokens to the hash-table
container and keep track of their count.
increment the total number of tokens
load-file
<- string filename
-> string file content
----------------------------------------------------
Load text from file into a string variable and return it.
----------------------------------------------------
main steps |
Author : < >
( C ) 2006 by
1 . The text file is loaded into memory .
2 . Trigrams of characters are created from the corpus .
3 . The hash - table is converted into a list of key - value tuples .
4 . The key - values are sorted by value , and a list of tokens
If the command line parameters contain more than one text file ,
(define sort-by-value
(lambda (table)
(let ([keyval (hash-table-map table (lambda (key val) (list key val)))])
(sort keyval (lambda (a b)
(< (cadr a) (cadr b)))))))
(define add-trigrams
(lambda (text)
(let ([max (- (string-length text) 2)])
(let loop ([i 0])
(let* ([token (substring text i (+ i 3))]
[value (hash-table-get trigrams token 0.0)])
(hash-table-put! trigrams token (+ value 1)))
(if (< i (- max 1))
(loop (+ i 1)))))))
(define load-file
(lambda (name)
(call-with-input-file name
(lambda (p)
(read-string (file-size name) p)))))
(begin
(vector-for-each (lambda (i fname)
(printf "Loading file: ~a\n" fname)
(add-trigrams (load-file fname)))
argv)
(printf "Number of tokens: ~a\n" trigramcount)
(printf "Number of types: ~a\n" (hash-table-count trigrams))
(printf "Type/Token ratio: ~a\n" (/ (hash-table-count trigrams) trigramcount))
(let ([result (sort-by-value trigrams)])
(printf "---------------------------------------------------------\n")
(printf "Sorted decreasing with relative frequency:\n")
(printf "token\tabsolute frequency\trelative frequency\n")
(for-each (lambda (a)
(write (car a))
(printf "\t~a\t~a\n" (cadr a) (/ (cadr a) trigramcount)))
(reverse result))))
|
c02911ad5d02f04045c25102d0468687c6b51a4c193c16263d0bd244490dadcd | javalib-team/sawja | ssaBir.mli |
* This file is part of SAWJA
* Copyright ( c)2010 ( INRIA )
* Copyright ( c)2010 ( INRIA )
*
* This program is free software : you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation , either version 3 of
* the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public
* License along with this program . If not , see
* < / > .
* This file is part of SAWJA
* Copyright (c)2010 David Pichardie (INRIA)
* Copyright (c)2010 Vincent Monfort (INRIA)
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* </>.
*)
open Javalib_pack
open JBasics
open Javalib
* Common code for SSA representations
(** Signature of IR to transform in SSA form*)
module type IRSig = sig
(** Abstract data type for variables *)
type var
(** [var_equal v1 v2] is equivalent to [v1 = v2], but is faster. *)
val var_equal : var -> var -> bool
(** [var_orig v] is [true] if and only if the variable [v] was already used at
bytecode level. *)
val var_orig : var -> bool
(** Used only for internal transformations. *)
val var_ssa : var -> bool
* [ v ] returns a string representation of the variable [ v ] .
val var_name : var -> string
(** [var_name_debug v] returns, if possible, the original variable name of [v],
if the initial class was compiled using debug information. *)
val var_name_debug : var -> string option
* [ var_name_g v ] returns a string representation of the variable [ v ] .
If the initial class was compiled using debug information , original
variable names are build on this information . It is equivalent to
[ var_name_g x = match var_name_debug with Some s - > s | _ - > var_name x ]
If the initial class was compiled using debug information, original
variable names are build on this information. It is equivalent to
[var_name_g x = match var_name_debug with Some s -> s | _ -> var_name x] *)
val var_name_g : var -> string
(** [bc_num v] returns the local var number if the variable comes from the initial bytecode program. *)
val bc_num : var -> int option
(** [index v] returns the hash value of the given variable. *)
val index : var -> int
type instr
val print_instr : ?show_type:bool -> instr -> string
type exception_handler = {
e_start : int;
e_end : int;
e_handler : int;
e_catch_type : JBasics.class_name option;
e_catch_var : var
}
* [ t ] is the parameter type for JBir methods .
type t = {
vars : var array;
(** All variables that appear in the method. [vars.(i)] is the variable of
index [i]. *)
params : (JBasics.value_type * var) list;
(** [params] contains the method parameters (including the receiver this for
virtual methods). *)
code : instr array;
(** Array of instructions the immediate successor of [pc] is [pc+1]. Jumps
are absolute. *)
exc_tbl : exception_handler list;
(** [exc_tbl] is the exception table of the method code. Jumps are
absolute. *)
line_number_table : (int * int) list option;
(** [line_number_table] contains debug information. It is a list of pairs
[(i,j)] where [i] indicates the index into the bytecode array at which the
code for a new line [j] in the original source file begins. *)
pc_bc2ir : int Ptmap.t;
(** map from bytecode code line to ir code line (very sparse). *)
pc_ir2bc : int array;
(** map from ir code line to bytecode code line *)
}
(** [jump_target m] indicates whether program points are join points or not in [m]. *)
val jump_target : t -> bool array
(** [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i]. *)
val exception_edges : t -> (int * exception_handler) list
end
( * * Common " variable " type and functions signature for SSA form
(** Common "variable" type and functions signature for SSA form *)
module type VarSig =
sig
type ir_var
type var = int * (ir_var * int)
val var_equal : var -> var -> bool
val var_orig : var -> bool
val var_name_debug: var -> string option
val var_name: var -> string
val var_name_g: var -> string
val bc_num: var -> int option
val var_origin : var -> ir_var
val var_ssa_index : var -> int
val index : var -> int
type dictionary
val make_dictionary : unit -> dictionary
val make_var : dictionary -> ir_var -> int -> var
val make_array_var : dictionary -> ir_var -> var array
module VarSet : Javalib_pack.JBasics.GenericSetSig with type elt = int * (ir_var * int)
module VarMap : Javalib_pack.JBasics.GenericMapSig with type key = int * (ir_var * int)
end
(** Functor to create "variable" type and functions for SSA form from
IR*)
module Var (IR:IRSig) : VarSig with type ir_var = IR.var
* Common code represenation types for SSA forms
module type TSsaSig =
sig
type var_t
type var_set
type instr_t
type phi_node = {
def : var_t;
* The variable defined in the phi node
use : var_t array;
* Array of used variable in the phi node , the index of a used
variable in the array corresponds to the index of the program
point predecessor in [ preds.(phi_node_pc ) ] .
variable in the array corresponds to the index of the program
point predecessor in [preds.(phi_node_pc)].*)
use_set : var_set;
* Set of used variable in the phi node ( no information on
predecessor program point for a used variable )
predecessor program point for a used variable)*)
}
type t = {
vars : var_t array;
(** All variables that appear in the method. [vars.(i)] is the variable of
index [i]. *)
params : (JBasics.value_type * var_t) list;
(** [params] contains the method parameters (including the receiver this for
virtual methods). *)
code : instr_t array;
(** Array of instructions the immediate successor of [pc] is [pc+1]. Jumps
are absolute. *)
preds : (int array) array;
(** Array of instructions program point that are predecessors of
instruction [pc]. *)
phi_nodes : (phi_node list) array;
* Array of phi nodes assignments . Each phi nodes assignments at point [ pc ] must
be executed before the corresponding [ code.(pc ) ] instruction .
be executed before the corresponding [code.(pc)] instruction. *)
exc_tbl : exception_handler list;
(** [exc_tbl] is the exception table of the method code. Jumps are
absolute. *)
line_number_table : (int * int) list option;
(** [line_number_table] contains debug information. It is a list of pairs
[(i,j)] where [i] indicates the index into the bytecode array at which the
code for a new line [j] in the original source file begins. *)
pc_bc2ir : int Ptmap.t;
(** map from bytecode code line to ir code line (very sparse). *)
pc_ir2bc : int array;
(** map from ir code line to bytecode code line *)
}
val jump_target : t -> bool array
(** [print_phi_node phi] returns a string representation for phi node [phi]. *)
val print_phi_node : ?phi_simpl:bool -> phi_node -> string
(** [print_phi_nodes phi_list] returns a string representation for phi nodes
[phi_list]. *)
val print_phi_nodes : ?phi_simpl:bool -> phi_node list -> string
* [ print c ] returns a list of string representations for instruction of [ c ]
( one string for each program point of the code [ c ] ) .
(one string for each program point of the code [c]). *)
val print : ?phi_simpl:bool -> t -> string list
(** [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i]. *)
val exception_edges : t -> (int * exception_handler) list
* [ get_source_line_number pc m ] returns the source line number corresponding
the program point [ pp ] of the method code [ m ] . The line number give a rough
idea and may be wrong . It uses the field [ t.pc_ir2bc ] of the code
representation and the attribute LineNumberTable ( cf . § 4.7.8 ) .
the program point [pp] of the method code [m]. The line number give a rough
idea and may be wrong. It uses the field [t.pc_ir2bc] of the code
representation and the attribute LineNumberTable (cf. JVMS §4.7.8).*)
val get_source_line_number : int -> t -> int option
end
* Functor to create code representation from SSA " variable " and " instruction "
module T (Var : VarSig)
(Instr : Cmn.InstrSig)
: sig
type var_t = Var.var
type instr_t = Instr.instr
type var_set = Var.VarSet.t
include Cmn.ExceptionSig with type var_e = var_t
type phi_node = {
def : Var.var;
* The variable defined in the phi node
use : Var.var array;
* Array of used variables in the phi node , the index of a used
variable in the array corresponds to the index of the program
point predecessor in [ preds.(phi_node_pc ) ] .
variable in the array corresponds to the index of the program
point predecessor in [preds.(phi_node_pc)].*)
use_set : Var.VarSet.t;
* Set of used variables in the phi node ( no information on
predecessor program point for a used variable )
predecessor program point for a used variable)*)
}
type t = {
vars : Var.var array;
params : (JBasics.value_type * Var.var) list;
code : Instr.instr array;
preds : (int array) array;
(** Array of instructions program point that are predecessors of
instruction [pc]. *)
phi_nodes : (phi_node list) array;
* Array of phi nodes assignments . Each phi nodes assignments at point [ pc ] must
be executed before the corresponding [ code.(pc ) ] instruction .
be executed before the corresponding [code.(pc)] instruction. *)
exc_tbl : exception_handler list;
line_number_table : (int * int) list option;
pc_bc2ir : int Ptmap.t;
pc_ir2bc : int array;
}
val jump_target : t -> bool array
(** [print_phi_node phi] returns a string representation for phi node [phi]. *)
val print_phi_node : ?phi_simpl:bool -> phi_node -> string
(** [print_phi_nodes phi_list] returns a string representation for phi nodes
[phi_list]. *)
val print_phi_nodes : ?phi_simpl:bool -> phi_node list -> string
* [ print c ] returns a list of string representations for instruction of [ c ]
( one string for each program point of the code [ c ] ) .
(one string for each program point of the code [c]). *)
val print : ?phi_simpl:bool -> t -> string list
(** [print_simple c] same fun as print with phi_simpl = true (for
compatibility with non-SSA representations). *)
val print_simple : t -> string list
(** [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i]. *)
val exception_edges : t -> (int * exception_handler) list
* [ get_source_line_number pc m ] returns the source line number corresponding
the program point [ pp ] of the method code [ m ] . The line number give a rough
idea and may be wrong . It uses the field [ t.pc_ir2bc ] of the code
representation and the attribute LineNumberTable ( cf . § 4.7.8 ) .
the program point [pp] of the method code [m]. The line number give a rough
idea and may be wrong. It uses the field [t.pc_ir2bc] of the code
representation and the attribute LineNumberTable (cf. JVMS §4.7.8).*)
val get_source_line_number : int -> t -> int option
val vars : t -> Var.var array
val params : t -> (JBasics.value_type * Var.var) list
val code : t -> Instr.instr array
val exc_tbl : t -> exception_handler list
val line_number_table : t -> (int * int) list option
val pc_bc2ir : t -> int Ptmap.t
val pc_ir2bc : t -> int array
end
(** Signature of type and function to provide in order to transform IR
in SSA form*)
module type IR2SsaSig = sig
type ir_t
type ir_var
type ir_instr
type ir_exc_h
type ssa_var
type ssa_instr
type ssa_exc_h
val use_bcvars : ir_instr -> Ptset.t
val def_bcvar : ir_instr -> Ptset.t
val var_defs : ir_t -> Ptset.t Ptmap.t
val map_instr : (ir_var -> ssa_var) -> (ir_var -> ssa_var) -> ir_instr -> ssa_instr
val map_exception_handler :
(ir_var -> int -> ssa_var) -> ir_exc_h -> ssa_exc_h
val preds : ir_t -> int -> int list
val succs : ir_t -> int -> int list
val live_analysis : ir_t -> int -> ir_var -> bool
end
(** Functor that provides the transformation function *)
module SSA
(IR:IRSig)
(Var:VarSig
with type ir_var = IR.var
and type var = int * (IR.var * int))
(TSSA:TSsaSig
with type var_t = Var.var
and type var_set = Var.VarSet.t)
(IR2SSA:IR2SsaSig
with type ir_t = IR.t
and type ir_var = IR.var
and type ir_instr = IR.instr
and type ir_exc_h = IR.exception_handler
and type ssa_var = Var.var
and type ssa_instr = TSSA.instr_t
and type ssa_exc_h = TSSA.exception_handler
)
:
sig
val transform_from_ir : IR.t -> TSSA.t
end
*)
| null | https://raw.githubusercontent.com/javalib-team/sawja/da39f9c1c4fc52a1a1a6350be0e39789812b6c00/src/ssaBir.mli | ocaml | * Signature of IR to transform in SSA form
* Abstract data type for variables
* [var_equal v1 v2] is equivalent to [v1 = v2], but is faster.
* [var_orig v] is [true] if and only if the variable [v] was already used at
bytecode level.
* Used only for internal transformations.
* [var_name_debug v] returns, if possible, the original variable name of [v],
if the initial class was compiled using debug information.
* [bc_num v] returns the local var number if the variable comes from the initial bytecode program.
* [index v] returns the hash value of the given variable.
* All variables that appear in the method. [vars.(i)] is the variable of
index [i].
* [params] contains the method parameters (including the receiver this for
virtual methods).
* Array of instructions the immediate successor of [pc] is [pc+1]. Jumps
are absolute.
* [exc_tbl] is the exception table of the method code. Jumps are
absolute.
* [line_number_table] contains debug information. It is a list of pairs
[(i,j)] where [i] indicates the index into the bytecode array at which the
code for a new line [j] in the original source file begins.
* map from bytecode code line to ir code line (very sparse).
* map from ir code line to bytecode code line
* [jump_target m] indicates whether program points are join points or not in [m].
* [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i].
* Common "variable" type and functions signature for SSA form
* Functor to create "variable" type and functions for SSA form from
IR
* All variables that appear in the method. [vars.(i)] is the variable of
index [i].
* [params] contains the method parameters (including the receiver this for
virtual methods).
* Array of instructions the immediate successor of [pc] is [pc+1]. Jumps
are absolute.
* Array of instructions program point that are predecessors of
instruction [pc].
* [exc_tbl] is the exception table of the method code. Jumps are
absolute.
* [line_number_table] contains debug information. It is a list of pairs
[(i,j)] where [i] indicates the index into the bytecode array at which the
code for a new line [j] in the original source file begins.
* map from bytecode code line to ir code line (very sparse).
* map from ir code line to bytecode code line
* [print_phi_node phi] returns a string representation for phi node [phi].
* [print_phi_nodes phi_list] returns a string representation for phi nodes
[phi_list].
* [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i].
* Array of instructions program point that are predecessors of
instruction [pc].
* [print_phi_node phi] returns a string representation for phi node [phi].
* [print_phi_nodes phi_list] returns a string representation for phi nodes
[phi_list].
* [print_simple c] same fun as print with phi_simpl = true (for
compatibility with non-SSA representations).
* [exception_edges m] returns a list of edges [(i,e);...] where
[i] is an instruction index in [m] and [e] is a handler whose
range contains [i].
* Signature of type and function to provide in order to transform IR
in SSA form
* Functor that provides the transformation function |
* This file is part of SAWJA
* Copyright ( c)2010 ( INRIA )
* Copyright ( c)2010 ( INRIA )
*
* This program is free software : you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation , either version 3 of
* the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public
* License along with this program . If not , see
* < / > .
* This file is part of SAWJA
* Copyright (c)2010 David Pichardie (INRIA)
* Copyright (c)2010 Vincent Monfort (INRIA)
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* </>.
*)
open Javalib_pack
open JBasics
open Javalib
* Common code for SSA representations
module type IRSig = sig
type var
val var_equal : var -> var -> bool
val var_orig : var -> bool
val var_ssa : var -> bool
* [ v ] returns a string representation of the variable [ v ] .
val var_name : var -> string
val var_name_debug : var -> string option
* [ var_name_g v ] returns a string representation of the variable [ v ] .
If the initial class was compiled using debug information , original
variable names are build on this information . It is equivalent to
[ var_name_g x = match var_name_debug with Some s - > s | _ - > var_name x ]
If the initial class was compiled using debug information, original
variable names are build on this information. It is equivalent to
[var_name_g x = match var_name_debug with Some s -> s | _ -> var_name x] *)
val var_name_g : var -> string
val bc_num : var -> int option
val index : var -> int
type instr
val print_instr : ?show_type:bool -> instr -> string
type exception_handler = {
e_start : int;
e_end : int;
e_handler : int;
e_catch_type : JBasics.class_name option;
e_catch_var : var
}
* [ t ] is the parameter type for JBir methods .
type t = {
vars : var array;
params : (JBasics.value_type * var) list;
code : instr array;
exc_tbl : exception_handler list;
line_number_table : (int * int) list option;
pc_bc2ir : int Ptmap.t;
pc_ir2bc : int array;
}
val jump_target : t -> bool array
val exception_edges : t -> (int * exception_handler) list
end
( * * Common " variable " type and functions signature for SSA form
module type VarSig =
sig
type ir_var
type var = int * (ir_var * int)
val var_equal : var -> var -> bool
val var_orig : var -> bool
val var_name_debug: var -> string option
val var_name: var -> string
val var_name_g: var -> string
val bc_num: var -> int option
val var_origin : var -> ir_var
val var_ssa_index : var -> int
val index : var -> int
type dictionary
val make_dictionary : unit -> dictionary
val make_var : dictionary -> ir_var -> int -> var
val make_array_var : dictionary -> ir_var -> var array
module VarSet : Javalib_pack.JBasics.GenericSetSig with type elt = int * (ir_var * int)
module VarMap : Javalib_pack.JBasics.GenericMapSig with type key = int * (ir_var * int)
end
module Var (IR:IRSig) : VarSig with type ir_var = IR.var
* Common code represenation types for SSA forms
module type TSsaSig =
sig
type var_t
type var_set
type instr_t
type phi_node = {
def : var_t;
* The variable defined in the phi node
use : var_t array;
* Array of used variable in the phi node , the index of a used
variable in the array corresponds to the index of the program
point predecessor in [ preds.(phi_node_pc ) ] .
variable in the array corresponds to the index of the program
point predecessor in [preds.(phi_node_pc)].*)
use_set : var_set;
* Set of used variable in the phi node ( no information on
predecessor program point for a used variable )
predecessor program point for a used variable)*)
}
type t = {
vars : var_t array;
params : (JBasics.value_type * var_t) list;
code : instr_t array;
preds : (int array) array;
phi_nodes : (phi_node list) array;
* Array of phi nodes assignments . Each phi nodes assignments at point [ pc ] must
be executed before the corresponding [ code.(pc ) ] instruction .
be executed before the corresponding [code.(pc)] instruction. *)
exc_tbl : exception_handler list;
line_number_table : (int * int) list option;
pc_bc2ir : int Ptmap.t;
pc_ir2bc : int array;
}
val jump_target : t -> bool array
val print_phi_node : ?phi_simpl:bool -> phi_node -> string
val print_phi_nodes : ?phi_simpl:bool -> phi_node list -> string
* [ print c ] returns a list of string representations for instruction of [ c ]
( one string for each program point of the code [ c ] ) .
(one string for each program point of the code [c]). *)
val print : ?phi_simpl:bool -> t -> string list
val exception_edges : t -> (int * exception_handler) list
* [ get_source_line_number pc m ] returns the source line number corresponding
the program point [ pp ] of the method code [ m ] . The line number give a rough
idea and may be wrong . It uses the field [ t.pc_ir2bc ] of the code
representation and the attribute LineNumberTable ( cf . § 4.7.8 ) .
the program point [pp] of the method code [m]. The line number give a rough
idea and may be wrong. It uses the field [t.pc_ir2bc] of the code
representation and the attribute LineNumberTable (cf. JVMS §4.7.8).*)
val get_source_line_number : int -> t -> int option
end
* Functor to create code representation from SSA " variable " and " instruction "
module T (Var : VarSig)
(Instr : Cmn.InstrSig)
: sig
type var_t = Var.var
type instr_t = Instr.instr
type var_set = Var.VarSet.t
include Cmn.ExceptionSig with type var_e = var_t
type phi_node = {
def : Var.var;
* The variable defined in the phi node
use : Var.var array;
* Array of used variables in the phi node , the index of a used
variable in the array corresponds to the index of the program
point predecessor in [ preds.(phi_node_pc ) ] .
variable in the array corresponds to the index of the program
point predecessor in [preds.(phi_node_pc)].*)
use_set : Var.VarSet.t;
* Set of used variables in the phi node ( no information on
predecessor program point for a used variable )
predecessor program point for a used variable)*)
}
type t = {
vars : Var.var array;
params : (JBasics.value_type * Var.var) list;
code : Instr.instr array;
preds : (int array) array;
phi_nodes : (phi_node list) array;
* Array of phi nodes assignments . Each phi nodes assignments at point [ pc ] must
be executed before the corresponding [ code.(pc ) ] instruction .
be executed before the corresponding [code.(pc)] instruction. *)
exc_tbl : exception_handler list;
line_number_table : (int * int) list option;
pc_bc2ir : int Ptmap.t;
pc_ir2bc : int array;
}
val jump_target : t -> bool array
val print_phi_node : ?phi_simpl:bool -> phi_node -> string
val print_phi_nodes : ?phi_simpl:bool -> phi_node list -> string
* [ print c ] returns a list of string representations for instruction of [ c ]
( one string for each program point of the code [ c ] ) .
(one string for each program point of the code [c]). *)
val print : ?phi_simpl:bool -> t -> string list
val print_simple : t -> string list
val exception_edges : t -> (int * exception_handler) list
* [ get_source_line_number pc m ] returns the source line number corresponding
the program point [ pp ] of the method code [ m ] . The line number give a rough
idea and may be wrong . It uses the field [ t.pc_ir2bc ] of the code
representation and the attribute LineNumberTable ( cf . § 4.7.8 ) .
the program point [pp] of the method code [m]. The line number give a rough
idea and may be wrong. It uses the field [t.pc_ir2bc] of the code
representation and the attribute LineNumberTable (cf. JVMS §4.7.8).*)
val get_source_line_number : int -> t -> int option
val vars : t -> Var.var array
val params : t -> (JBasics.value_type * Var.var) list
val code : t -> Instr.instr array
val exc_tbl : t -> exception_handler list
val line_number_table : t -> (int * int) list option
val pc_bc2ir : t -> int Ptmap.t
val pc_ir2bc : t -> int array
end
module type IR2SsaSig = sig
type ir_t
type ir_var
type ir_instr
type ir_exc_h
type ssa_var
type ssa_instr
type ssa_exc_h
val use_bcvars : ir_instr -> Ptset.t
val def_bcvar : ir_instr -> Ptset.t
val var_defs : ir_t -> Ptset.t Ptmap.t
val map_instr : (ir_var -> ssa_var) -> (ir_var -> ssa_var) -> ir_instr -> ssa_instr
val map_exception_handler :
(ir_var -> int -> ssa_var) -> ir_exc_h -> ssa_exc_h
val preds : ir_t -> int -> int list
val succs : ir_t -> int -> int list
val live_analysis : ir_t -> int -> ir_var -> bool
end
module SSA
(IR:IRSig)
(Var:VarSig
with type ir_var = IR.var
and type var = int * (IR.var * int))
(TSSA:TSsaSig
with type var_t = Var.var
and type var_set = Var.VarSet.t)
(IR2SSA:IR2SsaSig
with type ir_t = IR.t
and type ir_var = IR.var
and type ir_instr = IR.instr
and type ir_exc_h = IR.exception_handler
and type ssa_var = Var.var
and type ssa_instr = TSSA.instr_t
and type ssa_exc_h = TSSA.exception_handler
)
:
sig
val transform_from_ir : IR.t -> TSSA.t
end
*)
|
f928b4e2f98e2871dc0189a541f6dba8de54923a06bf58ba1201226fcd59ab28 | fpottier/mpri-2.4-projet-2022-2023 | Name.ml | Names of toplevel functions . Used in Surface and Linear .
type name =
string
type names =
name list
(* Sets of names. *)
module NameSet =
Set.Make(struct
type t = name
let compare = String.compare
end)
| null | https://raw.githubusercontent.com/fpottier/mpri-2.4-projet-2022-2023/1ce08cadfb3a8ec8bc72609bc82873b29d2ce241/src/Name.ml | ocaml | Sets of names. | Names of toplevel functions . Used in Surface and Linear .
type name =
string
type names =
name list
module NameSet =
Set.Make(struct
type t = name
let compare = String.compare
end)
|
0f443a3f453563fe71f0824b3cf54118583a5fdf096ca5e599c61fab5c699b02 | mbuczko/revolt | test.clj | (ns revolt.tasks.test
(:require [metosin.bat-test.impl :as bat-test]
[clojure.tools.logging :as log])
(:import [javazoom.jl.player Player]
[java.io File FileInputStream]))
(defonce default-options
{
Regex used to select test namespaces
:test-matcher #".*test"
;; Run tests parallel
:parallel false
;; Reporting function
;; See -test/blob/master/src/metosin/bat_test.clj for other options
:report :pretty
;; Function to filter the test vars
:filter nil
;; Function to be called before running tests (after reloading namespaces)
:on-start nil
;; Function to be called after running tests
:on-end nil
;; Enable Cloverage coverage report
:cloverage false
;; Cloverage options
:cloverage-opts nil
;; Sound notification?
:notify true
;; Directories to watch
:watch-directories ["src" "test"]})
(defn play!
[file]
(try
(-> (.getResourceAsStream (clojure.lang.RT/baseLoader) file)
java.io.BufferedInputStream.
javazoom.jl.player.Player.
.play)
(catch Exception e
(log/error "Cannot play a file: " (str file)))))
(defn invoke
[ctx opts]
(let [{:keys [fail error] :as result} (bat-test/run opts)]
(when (:notify opts)
(future (play!
(cond
(> error 0) "notification/failure.mp3"
(> fail 0) "notification/warning.mp3"
:default "notification/success.mp3"))))
(assoc ctx :test-report result)))
| null | https://raw.githubusercontent.com/mbuczko/revolt/65ef8de68d7aa77d1ced40e7d669ebcbba8a340e/src/revolt/tasks/test.clj | clojure | Run tests parallel
Reporting function
See -test/blob/master/src/metosin/bat_test.clj for other options
Function to filter the test vars
Function to be called before running tests (after reloading namespaces)
Function to be called after running tests
Enable Cloverage coverage report
Cloverage options
Sound notification?
Directories to watch | (ns revolt.tasks.test
(:require [metosin.bat-test.impl :as bat-test]
[clojure.tools.logging :as log])
(:import [javazoom.jl.player Player]
[java.io File FileInputStream]))
(defonce default-options
{
Regex used to select test namespaces
:test-matcher #".*test"
:parallel false
:report :pretty
:filter nil
:on-start nil
:on-end nil
:cloverage false
:cloverage-opts nil
:notify true
:watch-directories ["src" "test"]})
(defn play!
[file]
(try
(-> (.getResourceAsStream (clojure.lang.RT/baseLoader) file)
java.io.BufferedInputStream.
javazoom.jl.player.Player.
.play)
(catch Exception e
(log/error "Cannot play a file: " (str file)))))
(defn invoke
[ctx opts]
(let [{:keys [fail error] :as result} (bat-test/run opts)]
(when (:notify opts)
(future (play!
(cond
(> error 0) "notification/failure.mp3"
(> fail 0) "notification/warning.mp3"
:default "notification/success.mp3"))))
(assoc ctx :test-report result)))
|
3089f1f52b8bf0d9f7e49290b6c01a547c3a8450076ad2d85412451f8b09924c | jaspervdj/dcpu16-hs | Memory.hs | # LANGUAGE BangPatterns , MagicHash , UnboxedTuples #
module Memory
( -- * Addresses
Register (..)
, Address (..)
-- * Talking to the memory
, Memory
, new
, load
, store
) where
import Control.Monad (forM_)
import GHC.Base (Int (..))
import GHC.Prim
import GHC.ST (ST (..))
import GHC.Word (Word16 (..))
import Util
data Register = A | B | C | X | Y | Z | I | J
deriving (Bounded, Enum, Eq, Show)
data Address
= Pc
| Sp
| O
| Cycles
| Register Register
| Ram Word16
deriving (Eq)
instance Show Address where
show Pc = "Pc"
show Sp = "Sp"
show O = "O"
show Cycles = "Cycles"
show (Register r) = show r
show (Ram r) = "[" ++ prettifyWord16 r ++ "]"
fromAddress :: Address -> Int
fromAddress Pc = 0x0
fromAddress Sp = 0x1
fromAddress O = 0x2
fromAddress Cycles = 0x3
fromAddress (Register r) = 0x8 + fromEnum r
fromAddress (Ram r) = 0x16 + fromIntegral r
data Memory s = Memory (MutableByteArray# s)
new :: ST s (Memory s)
new = do
mem <- new'
store mem Pc 0x0000
store mem Sp 0xffff
store mem O 0x0000
store mem Cycles 0x0000
-- TODO: This is slow.
forM_ [minBound .. maxBound] $ \r -> store mem (Register r) 0x0000
forM_ [minBound .. maxBound] $ \r -> store mem (Ram r) 0x0000
return mem
new' :: ST s (Memory s)
new' = ST $ \s1# ->
case newAlignedPinnedByteArray# (len# *# 2#) 2# s1# of
(# s2#, marr# #) -> (# s2#, Memory marr# #)
where
!(I# len#) = 0x8 + 0x8 + 0x10000
load :: Memory s -> Address -> ST s Word16
load (Memory marr#) address = ST $ \s1# ->
case readWord16Array# marr# i# s1# of
(# s2#, w16# #) -> (# s2#, W16# w16# #)
where
!(I# i#) = fromAddress address
store :: Memory s -> Address -> Word16 -> ST s ()
store (Memory marr#) address (W16# w16#) = ST $ \s1# ->
case writeWord16Array# marr# i# w16# s1# of
s2# -> (# s2#, () #)
where
!(I# i#) = fromAddress address
| null | https://raw.githubusercontent.com/jaspervdj/dcpu16-hs/7598f083fa6ba88b72f7896bd14b705474a01d25/src/Memory.hs | haskell | * Addresses
* Talking to the memory
TODO: This is slow. | # LANGUAGE BangPatterns , MagicHash , UnboxedTuples #
module Memory
Register (..)
, Address (..)
, Memory
, new
, load
, store
) where
import Control.Monad (forM_)
import GHC.Base (Int (..))
import GHC.Prim
import GHC.ST (ST (..))
import GHC.Word (Word16 (..))
import Util
data Register = A | B | C | X | Y | Z | I | J
deriving (Bounded, Enum, Eq, Show)
data Address
= Pc
| Sp
| O
| Cycles
| Register Register
| Ram Word16
deriving (Eq)
instance Show Address where
show Pc = "Pc"
show Sp = "Sp"
show O = "O"
show Cycles = "Cycles"
show (Register r) = show r
show (Ram r) = "[" ++ prettifyWord16 r ++ "]"
fromAddress :: Address -> Int
fromAddress Pc = 0x0
fromAddress Sp = 0x1
fromAddress O = 0x2
fromAddress Cycles = 0x3
fromAddress (Register r) = 0x8 + fromEnum r
fromAddress (Ram r) = 0x16 + fromIntegral r
data Memory s = Memory (MutableByteArray# s)
new :: ST s (Memory s)
new = do
mem <- new'
store mem Pc 0x0000
store mem Sp 0xffff
store mem O 0x0000
store mem Cycles 0x0000
forM_ [minBound .. maxBound] $ \r -> store mem (Register r) 0x0000
forM_ [minBound .. maxBound] $ \r -> store mem (Ram r) 0x0000
return mem
new' :: ST s (Memory s)
new' = ST $ \s1# ->
case newAlignedPinnedByteArray# (len# *# 2#) 2# s1# of
(# s2#, marr# #) -> (# s2#, Memory marr# #)
where
!(I# len#) = 0x8 + 0x8 + 0x10000
load :: Memory s -> Address -> ST s Word16
load (Memory marr#) address = ST $ \s1# ->
case readWord16Array# marr# i# s1# of
(# s2#, w16# #) -> (# s2#, W16# w16# #)
where
!(I# i#) = fromAddress address
store :: Memory s -> Address -> Word16 -> ST s ()
store (Memory marr#) address (W16# w16#) = ST $ \s1# ->
case writeWord16Array# marr# i# w16# s1# of
s2# -> (# s2#, () #)
where
!(I# i#) = fromAddress address
|
2f79b38708781a945e44e072fd1a476eaacb975656967bea4ece40c08f8c6ec9 | esl/MongooseIM | amp_big_SUITE.erl | -module(amp_big_SUITE).
%% @doc Tests for XEP-0079 Advanced Message Processing support
< a href=" / extensions / xep-0079.html">XEP-0079</a >
@author < >
2014 Erlang Solutions , Ltd.
This work was sponsored by Grindr.com
-compile([export_all, nowarn_export_all]).
-include_lib("common_test/include/ct.hrl").
-include_lib("escalus/include/escalus.hrl").
-include_lib("escalus/include/escalus_xmlns.hrl").
-include_lib("exml/include/exml.hrl").
-import(distributed_helper, [mim/0,
require_rpc_nodes/1,
rpc/4]).
-import(muc_light_helper, [lbin/1]).
-import(domain_helper, [host_type/0, domain/0]).
suite() ->
require_rpc_nodes([mim]) ++ escalus:suite().
all() ->
[{group, G} || G <- main_group_names(), is_enabled(G)].
groups() ->
group_spec(main_group_names()).
is_enabled(mam) -> mongoose_helper:is_rdbms_enabled(host_type());
is_enabled(_) -> true.
%% Group definitions
main_group_names() ->
[basic, mam, offline].
subgroups(mam) -> [mam_success, mam_failure];
subgroups(offline) -> [offline_success, offline_failure];
subgroups(_) -> [].
group_spec(Groups) when is_list(Groups) ->
lists:flatmap(fun group_spec/1, Groups);
group_spec(Group) ->
case subgroups(Group) of
[] -> [{Group, [parallel], test_cases(Group)}];
SubGroups -> [{Group, [{group, SubG} || SubG <- SubGroups]} | group_spec(SubGroups)]
end.
test_cases(Group) ->
regular_tests(Group) ++ multiple_config_cth:flatten_and_strip_config(tests_with_config(Group)).
regular_tests(basic) -> basic_test_cases();
regular_tests(_) -> [].
%% This function is called by multiple_config_cth for each group
%% to get a list of configs for each test case
-spec tests_with_config(_GroupName :: atom()) -> [{TestCase :: atom(),
[Config :: [{Key :: atom(), Value :: term()}]]}].
tests_with_config(_GroupName) ->
lists:append([deliver_tests_with_config(notify),
deliver_tests_with_config(error),
deliver_tests_with_config(drop)]).
%% Each of the 'deliver' tests is repeated several times, each time with a different config
deliver_tests_with_config(Action) ->
multiple_config_cth:add_config(deliver_rule_configs(Action), deliver_test_cases(Action)).
Each config tests different rules in the AMP message
deliver_rule_configs(Action) ->
[
[{rules, [{deliver, direct, Action}]}],
[{rules, [{deliver, stored, Action}]}],
[{rules, [{deliver, none, Action}]}],
[{rules, [{deliver, direct, Action},
{deliver, stored, Action},
{deliver, none, Action}]}]
].
%% Test case list, each test has to be listed exactly once
basic_test_cases() ->
[initial_service_discovery_test,
actions_and_conditions_discovery_test,
unsupported_actions_test,
unsupported_conditions_test,
unacceptable_rules_test,
notify_match_resource_any_test,
notify_match_resource_exact_test,
notify_match_resource_other_test,
notify_match_resource_other_bare_test,
last_rule_applies_test].
deliver_test_cases(notify) ->
[notify_deliver_to_online_user_test,
notify_deliver_to_online_user_bare_jid_test,
notify_deliver_to_online_user_recipient_privacy_test,
notify_deliver_to_offline_user_test,
notify_deliver_to_offline_user_recipient_privacy_test,
notify_deliver_to_online_user_broken_connection_test,
notify_deliver_to_stranger_test,
notify_deliver_to_unknown_domain_test];
deliver_test_cases(error) ->
[error_deliver_to_online_user_test,
error_deliver_to_offline_user_test,
error_deliver_to_stranger_test];
deliver_test_cases(drop) ->
[drop_deliver_to_online_user_test,
drop_deliver_to_offline_user_test,
drop_deliver_to_stranger_test].
%% Setup and teardown
init_per_suite(Config) ->
ConfigWithHooks = [{ct_hooks, [{multiple_config_cth, fun tests_with_config/1}]} | Config],
{Mod, Code} = rpc(mim(), dynamic_compile, from_string, [amp_test_helper_code()]),
rpc(mim(), code, load_binary, [Mod, "amp_test_helper.erl", Code]),
setup_meck(suite),
escalus:init_per_suite(ConfigWithHooks).
amp_test_helper_code() ->
"-module(amp_test_helper).\n"
"-compile([export_all, nowarn_export_all]).\n"
"setup_meck() ->\n"
" meck:expect(ranch_tcp, send, fun ranch_tcp_send/2).\n"
"ranch_tcp_send(Socket, Data) ->\n"
" case catch binary:match(Data, <<\"Recipient connection breaks\">>) of\n"
" {N, _} when is_integer(N) -> {error, simulated};\n"
" _ -> meck:passthrough([Socket, Data])\n"
" end.\n".
end_per_suite(C) ->
teardown_meck(suite),
escalus_fresh:clean(),
escalus:end_per_suite(C).
init_per_group(GroupName, Config) ->
Config1 = case lists:member(GroupName, main_group_names()) of
true ->
ConfigWithModules = dynamic_modules:save_modules(host_type(), Config),
dynamic_modules:ensure_modules(host_type(), required_modules(GroupName)),
ConfigWithModules;
false ->
Config
end,
setup_meck(GroupName),
save_offline_status(GroupName, Config1).
setup_meck(suite) ->
ok = rpc(mim(), meck, new, [ranch_tcp, [passthrough, no_link]]),
ok = rpc(mim(), amp_test_helper, setup_meck, []);
setup_meck(mam_failure) ->
ok = rpc(mim(), meck, expect, [mod_mam_rdbms_arch, archive_message, 3, {ok, {error, simulated}}]);
setup_meck(offline_failure) ->
ok = rpc(mim(), meck, expect, [mod_offline_mnesia, write_messages, 4, {error, simulated}]);
setup_meck(_) -> ok.
save_offline_status(mam_success, Config) -> [{offline_storage, mam} | Config];
save_offline_status(mam_failure, Config) -> [{offline_storage, mam_failure} | Config];
save_offline_status(offline_success, Config) -> [{offline_storage, offline} | Config];
save_offline_status(offline_failure, Config) -> [{offline_storage, offline_failure} | Config];
save_offline_status(basic, Config) -> [{offline_storage, none} | Config];
save_offline_status(_GN, Config) -> Config.
end_per_group(GroupName, Config) ->
teardown_meck(GroupName),
case lists:member(GroupName, main_group_names()) of
true -> dynamic_modules:restore_modules(Config);
false -> ok
end.
teardown_meck(mam_failure) ->
rpc(mim(), meck, unload, [mod_mam_rdbms_arch]);
teardown_meck(offline_failure) ->
rpc(mim(), meck, unload, [mod_offline_mnesia]);
teardown_meck(suite) ->
rpc(mim(), meck, unload, []);
teardown_meck(_) -> ok.
init_per_testcase(Name, C) -> escalus:init_per_testcase(Name, C).
end_per_testcase(Name, C) -> escalus:end_per_testcase(Name, C).
%% Test cases
initial_service_discovery_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
escalus_client:send(Alice, disco_info()),
Response = escalus_client:wait_for_stanza(Alice),
escalus:assert(has_feature, [ns_amp()], Response)
end).
actions_and_conditions_discovery_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
Args = [ns_amp(),
<<"">>,
<<"">>,
<<"">>,
<<"-resource">>
],
escalus_client:send(Alice, disco_info_amp_node()),
Response = escalus_client:wait_for_stanza(Alice),
assert_has_features(Response, Args)
end).
unsupported_actions_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Msg = amp_message_to(Bob, [{deliver, direct, alert}], % alert is unsupported
<<"A paradoxical payload!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_amp_error(Alice, {deliver, direct, alert}, <<"unsupported-actions">>)
end).
unsupported_conditions_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
%% expire-at is unsupported
Msg = amp_message_to(Bob, [{'expire-at', <<"2020-06-06T12:20:20Z">>, notify}],
<<"Never fade away!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_amp_error(Alice, {'expire-at', <<"2020-06-06T12:20:20Z">>, notify},
<<"unsupported-conditions">>)
end).
unacceptable_rules_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Msg = amp_message_to(Bob, [{broken, rule, spec}
, {also_broken, rule, spec}
],
<<"Break all the rules!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_amp_error(Alice, [{broken, rule, spec}
, {also_broken, rule, spec}],
<<"not-acceptable">>)
end).
notify_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Rule = {deliver, direct, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"I want to be sure you get this!">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_message(Bob, <<"I want to be sure you get this!">>),
client_receives_nothing(Alice)
end).
notify_deliver_to_online_user_bare_jid_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Message = <<"One of your resources needs to get this!">>,
Rule = {deliver, direct, notify},
Rules = rules(Config, [Rule]),
BobsBareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BobsBareJid, Rules, Message),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobsBareJid, Rule);
false -> ok
end,
client_receives_message(Bob, Message),
client_receives_nothing(Alice)
end).
notify_deliver_to_online_user_recipient_privacy_test(Config) ->
case is_module_loaded(mod_mam_pm) of
true -> {skip, "MAM does not support privacy lists"};
false -> do_notify_deliver_to_online_user_recipient_privacy_test(Config)
end.
do_notify_deliver_to_online_user_recipient_privacy_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"Should be filtered by Bob's privacy list">>),
privacy_helper:set_and_activate(Bob, <<"deny_all_message">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_generic_error(Alice, <<"503">>, <<"cancel">>),
client_receives_nothing(Alice),
client_receives_nothing(Bob)
end).
notify_deliver_to_online_user_broken_connection_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Rule = {deliver, case ?config(offline_storage, Config) of
mam -> stored;
_ -> none
end, notify},
Rules = rules(Config, [Rule]),
%% This special message is matched by the ejabberd_socket mock
%% (see amp_test_helper_code/0)
Msg = amp_message_to(Bob, Rules, <<"Recipient connection breaks">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_nothing(Alice),
connection to avoid errors with closing the stream
%% while the session is being resumed after the simulated error
escalus_connection:kill(Bob)
end),
ok.
notify_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
%% given
Rule = {deliver, case is_offline_storage_working(Config) of
true -> stored;
false -> none
end, notify},
Rules = rules(Config, [Rule]),
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobJid, Rule);
false -> ok
end,
case ?config(offline_storage, Config) of
offline_failure -> client_receives_generic_error(Alice, <<"500">>, <<"wait">>);
_ -> client_receives_nothing(Alice)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) of
true -> user_has_incoming_offline_message(FreshConfig, bob, <<"A message in a bottle...">>);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
is_offline_storage_working(Config) ->
Status = ?config(offline_storage, Config),
Status == mam orelse Status == offline.
notify_deliver_to_offline_user_recipient_privacy_test(Config) ->
case is_module_loaded(mod_mam_pm) of
true -> {skip, "MAM does not support privacy lists"};
false -> do_notify_deliver_to_offline_user_recipient_privacy_test(Config)
end.
do_notify_deliver_to_offline_user_recipient_privacy_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
escalus:story(
FreshConfig, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
privacy_helper:set_and_activate(Bob, <<"deny_all_message">>),
privacy_helper:set_default_list(Bob, <<"deny_all_message">>),
mongoose_helper:logout_user(Config, Bob),
%% given
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
BobJid = lbin(escalus_client:short_jid(Bob)),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobJid, Rule);
false -> ok
end,
client_receives_nothing(Alice)
end),
user_has_no_incoming_offline_messages(FreshConfig, bob).
notify_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
%% given
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"A message in a bottle...">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, StrangerJid, Rule);
false -> ok
end,
client_receives_generic_error(Alice, <<"503">>, <<"cancel">>),
client_receives_nothing(Alice)
end).
notify_deliver_to_unknown_domain_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
%% given
StrangerJid = <<"">>,
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(StrangerJid, Rules, <<"Msg to unknown domain">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, StrangerJid, Rule);
false -> ok
end,
error 404 : ' remote server not found ' is expected
client_receives_generic_error(Alice, <<"404">>, <<"cancel">>),
client_receives_nothing(Alice)
end).
notify_match_resource_any_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 4}],
fun(Alice, Bob, _, _, _) ->
%% given
Msg = amp_message_to(Bob, [{'match-resource', any, notify}],
<<"Church-encoded hot-dogs">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_notification(Alice, Bob, {'match-resource', any, notify}),
client_receives_message(Bob, <<"Church-encoded hot-dogs">>)
end).
notify_match_resource_exact_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 4}],
fun(Alice, _, _, Bob3, _) ->
%% given
Msg = amp_message_to(Bob3, [{'match-resource', exact, notify}],
<<"Resource three, your battery is on fire!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_notification(Alice, Bob3, {'match-resource', exact, notify}),
client_receives_message(Bob3, <<"Resource three, your battery is on fire!">>)
end).
notify_match_resource_other_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
NonmatchingJid = << (escalus_client:short_jid(Bob))/binary,
"/blahblahblah_resource" >>,
Msg = amp_message_to(NonmatchingJid,
[{'match-resource', other, notify}],
<<"A Bob by any other name!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_notification(Alice, NonmatchingJid,
{'match-resource', other, notify}),
client_receives_message(Bob, <<"A Bob by any other name!">>)
end).
notify_match_resource_other_bare_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
BareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BareJid,
[{'match-resource', other, notify}],
<<"A Bob by any other name!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_notification(Alice, BareJid, {'match-resource', other, notify}),
client_receives_message(Bob, <<"A Bob by any other name!">>)
end).
error_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Rule = {deliver, direct, error},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"It might cause an error">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true ->
client_receives_amp_error(Alice, Bob, Rule, <<"undefined-condition">>),
client_receives_nothing(Bob);
false ->
client_receives_message(Bob, <<"It might cause an error">>)
end,
client_receives_nothing(Alice)
end).
error_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
Rule = {deliver, case ?config(offline_storage, Config) of
none -> none;
_ -> stored
end, error},
Rules = rules(Config, [Rule]),
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
%% given
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true ->
client_receives_amp_error(Alice, BobJid, Rule, <<"undefined-condition">>);
false ->
check_offline_storage(Alice, Config)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) andalso not lists:member(Rule, Rules) of
true -> user_has_incoming_offline_message(FreshConfig, bob, <<"A message in a bottle...">>);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
check_offline_storage(User, Config) ->
case ?config(offline_storage, Config) of
offline_failure ->
client_receives_generic_error(User, <<"500">>, <<"wait">>);
_ -> client_receives_nothing(User)
end.
error_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
%% given
Rule = {deliver, none, error},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"This cannot possibly succeed">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_amp_error(Alice, StrangerJid, Rule,
<<"undefined-condition">>);
false -> client_receives_generic_error(Alice, <<"503">>, <<"cancel">>)
end,
client_receives_nothing(Alice)
end).
drop_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
Rule = {deliver, direct, drop},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"It might get dropped">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> client_receives_nothing(Bob);
false -> client_receives_message(Bob, <<"It might get dropped">>)
end,
client_receives_nothing(Alice)
end).
drop_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
Rule = {deliver, case ?config(offline_storage, Config) of
none -> none;
_ -> stored
end, drop},
Rules = rules(Config, [Rule]),
Message = <<"A message in a bottle...">>,
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
%% given
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, Message),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) orelse
?config(offline_storage, Config) /= offline_failure of
true -> client_receives_nothing(Alice);
false -> client_receives_generic_error(Alice, <<"500">>, <<"wait">>)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) andalso not lists:member(Rule, Rules) of
true -> user_has_incoming_offline_message(FreshConfig, bob, Message);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
drop_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
%% given
Rule = {deliver, none, drop},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"This cannot possibly succeed">>),
%% when
client_sends_message(Alice, Msg),
% then
case lists:member(Rule, Rules) of
true -> ok;
false -> client_receives_generic_error(Alice, <<"503">>, <<"cancel">>)
end,
client_receives_nothing(Alice)
end).
last_rule_applies_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
%% given
BobsBareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BobsBareJid, [{deliver, none, error},
{deliver, stored, error},
{deliver, direct, notify}],
<<"One of your resources needs to get this!">>),
%% when
client_sends_message(Alice, Msg),
% then
client_receives_notification(Alice, BobsBareJid, {deliver, direct, notify}),
client_receives_message(Bob, <<"One of your resources needs to get this!">>)
end).
Internal
wait_until_no_session(FreshConfig, User) ->
U = escalus_users:get_username(FreshConfig, User),
S = escalus_users:get_server(FreshConfig, User),
JID = jid:make(U, S, <<>>),
mongoose_helper:wait_until(
fun() -> rpc(mim(), ejabberd_sm, get_user_resources, [JID]) end, []).
user_has_no_incoming_offline_messages(FreshConfig, UserName) ->
escalus:fresh_story(
FreshConfig, [{UserName, 1}],
fun(User) ->
client_receives_nothing(User),
case is_module_loaded(mod_mam_pm) of
true -> client_has_no_mam_messages(User);
false -> ok
end
end).
user_has_incoming_offline_message(FreshConfig, UserName, MsgText) ->
true = is_module_loaded(mod_mam_pm) orelse is_module_loaded(mod_offline),
{ok, Client} = escalus_client:start(FreshConfig, UserName, <<"new-session">>),
escalus:send(Client, escalus_stanza:presence(<<"available">>)),
case is_module_loaded(mod_offline) of
true -> client_receives_message(Client, MsgText);
false -> ok
end,
Presence = escalus:wait_for_stanza(Client),
escalus:assert(is_presence, Presence),
case is_module_loaded(mod_mam_pm) of
true -> client_has_mam_message(Client);
false -> ok
end,
escalus_client:stop(FreshConfig, Client).
client_has_no_mam_messages(User) ->
P = mam_helper:mam04_props(),
escalus:send(User, mam_helper:stanza_archive_request(P, <<"q1">>)),
Res = mam_helper:wait_archive_respond(User),
mam_helper:assert_respond_size(0, Res).
client_has_mam_message(User) ->
P = mam_helper:mam04_props(),
escalus:send(User, mam_helper:stanza_archive_request(P, <<"q1">>)),
Res = mam_helper:wait_archive_respond(User),
mam_helper:assert_respond_size(1, Res).
rules(Config, Default) ->
case lists:keysearch(rules, 1, Config) of
{value, {rules, Val}} -> Val;
_ -> Default
end.
ns_amp() ->
<<"">>.
client_sends_message(Client, Msg) ->
escalus_client:send(Client, Msg).
client_receives_amp_error(Client, Rules, AmpErrorKind) when is_list(Rules) ->
Received = escalus_client:wait_for_stanza(Client),
assert_amp_error(Client, Received, Rules, AmpErrorKind);
client_receives_amp_error(Client, Rule, AmpErrorKind) ->
client_receives_amp_error(Client, [Rule], AmpErrorKind).
client_receives_amp_error(Client, IntendedRecipient, Rule, AmpErrorKind) ->
Received = escalus_client:wait_for_stanza(Client),
assert_amp_error_with_full_amp(Client, IntendedRecipient,
Received, Rule, AmpErrorKind).
client_receives_generic_error(Client, Code, Type) ->
Received = escalus_client:wait_for_stanza(Client, 5000),
escalus:assert(fun contains_error/3, [Code, Type], Received).
client_receives_nothing(Client) ->
timer:sleep(300),
escalus_assert:has_no_stanzas(Client).
client_receives_message(Client, MsgText) ->
Received = escalus_client:wait_for_stanza(Client),
escalus:assert(is_chat_message, [MsgText], Received).
client_receives_notification(Client, IntendedRecipient, Rule) ->
Msg = escalus_client:wait_for_stanza(Client),
assert_notification(Client, IntendedRecipient, Msg, Rule).
disco_info() ->
escalus_stanza:disco_info(domain()).
disco_info_amp_node() ->
escalus_stanza:disco_info(domain(), ns_amp()).
assert_amp_error(Client, Response, Rules, AmpErrorKind) when is_list(Rules) ->
ClientJID = escalus_client:full_jid(Client),
Server = escalus_client:server(Client),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[amp_status_attr(AmpErrorKind), no_to_attr, no_from_attr, Rules],
Response),
escalus:assert(fun contains_amp_error/3,
[AmpErrorKind, Rules],
Response);
assert_amp_error(Client, Response, Rule, AmpErrorKind) ->
assert_amp_error(Client, Response, [Rule], AmpErrorKind).
assert_amp_error_with_full_amp(Client, IntendedRecipient, Response,
{_C, _V, _A} = Rule, AmpErrorKind) ->
ClientJID = escalus_client:full_jid(Client),
RecipientJID = full_jid(IntendedRecipient),
Server = escalus_client:server(Client),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[amp_status_attr(AmpErrorKind), RecipientJID, ClientJID, [Rule]],
Response),
escalus:assert(fun contains_amp_error/3,
[AmpErrorKind, [Rule]],
Response).
assert_notification(Client, IntendedRecipient, Response, {_C, _V, A} = Rule) ->
ClientJID = escalus_client:full_jid(Client),
RecipientJID = full_jid(IntendedRecipient),
Server = escalus_client:server(Client),
Action = a2b(A),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[Action, RecipientJID, ClientJID, [Rule]],
Response).
assert_has_features(Response, Features) ->
CheckF = fun(F) -> escalus:assert(has_feature, [F], Response) end,
lists:foreach(CheckF, Features).
full_jid(#client{} = Client) ->
escalus_client:full_jid(Client);
full_jid(B) when is_binary(B) ->
B.
@TODO : Move me out to escalus_stanza % % % % % % % % %
Element constructors % % % % % % % % % % % % % % % % % %
amp_message_to(To, Rules, MsgText) ->
Msg0 = #xmlel{children=C} = escalus_stanza:chat_to(To, MsgText),
Msg = escalus_stanza:set_id(Msg0, escalus_stanza:id()),
Amp = amp_el(Rules),
Msg#xmlel{children = C ++ [Amp]}.
amp_el([]) ->
throw("cannot build <amp> with no rules!");
amp_el(Rules) ->
#xmlel{name = <<"amp">>,
attrs = [{<<"xmlns">>, ns_amp()}],
children = [ rule_el(R) || R <- Rules ]}.
rule_el({Condition, Value, Action}) ->
check_rules(Condition, Value, Action),
#xmlel{name = <<"rule">>
, attrs = [{<<"condition">>, a2b(Condition)}
, {<<"value">>, a2b(Value)}
, {<<"action">>, a2b(Action)}]}.
@TODO : Move me out to escalus_pred % % % % % % % % % % % %
%%%%%%%%% XML predicates %%%%% %%%%%%%%%%%%%%%%%%%
contains_amp(Status, To, From, ExpectedRules, Stanza) when is_list(ExpectedRules)->
Amp = exml_query:subelement(Stanza, <<"amp">>),
undefined =/= Amp andalso
To == exml_query:attr(Amp, <<"to">>, no_to_attr) andalso
From == exml_query:attr(Amp, <<"from">>, no_from_attr) andalso
Status == exml_query:attr(Amp, <<"status">>, no_status_attr) andalso
all_present([ rule_el(R) || R <- ExpectedRules ], exml_query:subelements(Amp, <<"rule">>)).
contains_amp_error(AmpErrorKind, Rules, Response) ->
ErrorEl = exml_query:subelement(Response, <<"error">>),
<<"modify">> == exml_query:attr(ErrorEl, <<"type">>)
andalso
amp_error_code(AmpErrorKind) == exml_query:attr(ErrorEl, <<"code">>)
andalso
undefined =/= (Marker = exml_query:subelement(ErrorEl, amp_error_marker(AmpErrorKind)))
andalso
ns_stanzas() == exml_query:attr(Marker, <<"xmlns">>)
andalso
undefined =/= (Container = exml_query:subelement(ErrorEl,
amp_error_container(AmpErrorKind)))
andalso
all_present([ rule_el(R) || R <- Rules ], exml_query:subelements(Container, <<"rule">>)).
contains_error(Code, Type, Response) ->
ErrorEl = exml_query:subelement(Response, <<"error">>),
Type == exml_query:attr(ErrorEl, <<"type">>)
andalso (Code == any orelse Code == exml_query:attr(ErrorEl, <<"code">>)).
all_present(Needles, Haystack) ->
list_and([ lists:member(Needle, Haystack)
|| Needle <- Needles ]).
list_and(List) ->
lists:all(fun(X) -> X =:= true end, List).
ns_stanzas() ->
<<"urn:ietf:params:xml:ns:xmpp-stanzas">>.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
check_rules(deliver, direct, notify) -> ok;
check_rules(deliver, stored, notify) -> ok;
check_rules(deliver, none, notify) -> ok;
check_rules(deliver, direct, error) -> ok;
check_rules(deliver, stored, error) -> ok;
check_rules(deliver, none, error) -> ok;
check_rules(deliver, direct, drop) -> ok;
check_rules(deliver, stored, drop) -> ok;
check_rules(deliver, none, drop) -> ok;
check_rules('match-resource', any, notify) -> ok;
check_rules('match-resource', exact, notify) -> ok;
check_rules('match-resource', other, notify) -> ok;
check_rules(deliver, direct, alert) -> ok; %% for testing unsupported rules
check_rules('expire-at', _binary, notify) -> ok; %% for testing unsupported conditions
check_rules(broken, rule, spec) -> ok; %% for testing unacceptable rules
check_rules(also_broken, rule, spec) -> ok; %% for testing unacceptable rules
check_rules(C, V, A) -> throw({illegal_amp_rule, {C, V, A}}).
a2b(B) when is_binary(B) -> B;
a2b(A) -> atom_to_binary(A, utf8).
%% Undefined-condition errors return a fully-fledged amp element with status=error
%% The other errors have 'thin' <amp>s with no status attribute
amp_status_attr(<<"undefined-condition">>) -> <<"error">>;
amp_status_attr(_) -> no_status_attr.
amp_error_code(<<"undefined-condition">>) -> <<"500">>;
amp_error_code(<<"not-acceptable">>) -> <<"405">>;
amp_error_code(<<"unsupported-actions">>) -> <<"400">>;
amp_error_code(<<"unsupported-conditions">>) -> <<"400">>.
amp_error_marker(<<"not-acceptable">>) -> <<"not-acceptable">>;
amp_error_marker(<<"unsupported-actions">>) -> <<"bad-request">>;
amp_error_marker(<<"unsupported-conditions">>) -> <<"bad-request">>;
amp_error_marker(<<"undefined-condition">>) -> <<"undefined-condition">>.
amp_error_container(<<"not-acceptable">>) -> <<"invalid-rules">>;
amp_error_container(<<"unsupported-actions">>) -> <<"unsupported-actions">>;
amp_error_container(<<"unsupported-conditions">>) -> <<"unsupported-conditions">>;
amp_error_container(<<"undefined-condition">>) -> <<"failed-rules">>.
is_module_loaded(Mod) ->
rpc(mim(), gen_mod, is_loaded, [host_type(), Mod]).
required_modules(basic) ->
mam_modules(off) ++ offline_modules(off) ++ privacy_modules(on);
required_modules(mam) ->
mam_modules(on) ++ offline_modules(off) ++ privacy_modules(off);
required_modules(offline) ->
mam_modules(off) ++ offline_modules(on) ++ privacy_modules(on);
required_modules(_) ->
[].
mam_modules(on) ->
[{mod_mam, mam_helper:config_opts(#{pm => #{}, async_writer => #{enabled => false}})}];
mam_modules(off) ->
[{mod_mam, stopped}].
offline_modules(on) ->
[{mod_offline, config_parser_helper:mod_config(mod_offline,
#{access_max_user_messages => max_user_offline_messages})}];
offline_modules(off) ->
[{mod_offline, stopped},
{mod_offline_stub, []}].
privacy_modules(on) ->
[{mod_privacy, config_parser_helper:default_mod_config(mod_privacy)},
{mod_blocking, config_parser_helper:default_mod_config(mod_blocking)}];
privacy_modules(off) ->
[{mod_privacy, stopped},
{mod_blocking, stopped}].
| null | https://raw.githubusercontent.com/esl/MongooseIM/997ce8cc01dacf8bf1f1f4e3a984ee10f0ce5dd6/big_tests/tests/amp_big_SUITE.erl | erlang | @doc Tests for XEP-0079 Advanced Message Processing support
Group definitions
This function is called by multiple_config_cth for each group
to get a list of configs for each test case
Each of the 'deliver' tests is repeated several times, each time with a different config
Test case list, each test has to be listed exactly once
Setup and teardown
Test cases
given
alert is unsupported
when
then
given
expire-at is unsupported
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
This special message is matched by the ejabberd_socket mock
(see amp_test_helper_code/0)
when
then
while the session is being resumed after the simulated error
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
given
when
then
% % % % % % % %
% % % % % % % % % % % % % % % % %
% % % % % % % % % % %
XML predicates %%%%% %%%%%%%%%%%%%%%%%%%
for testing unsupported rules
for testing unsupported conditions
for testing unacceptable rules
for testing unacceptable rules
Undefined-condition errors return a fully-fledged amp element with status=error
The other errors have 'thin' <amp>s with no status attribute | -module(amp_big_SUITE).
< a href=" / extensions / xep-0079.html">XEP-0079</a >
@author < >
2014 Erlang Solutions , Ltd.
This work was sponsored by Grindr.com
-compile([export_all, nowarn_export_all]).
-include_lib("common_test/include/ct.hrl").
-include_lib("escalus/include/escalus.hrl").
-include_lib("escalus/include/escalus_xmlns.hrl").
-include_lib("exml/include/exml.hrl").
-import(distributed_helper, [mim/0,
require_rpc_nodes/1,
rpc/4]).
-import(muc_light_helper, [lbin/1]).
-import(domain_helper, [host_type/0, domain/0]).
suite() ->
require_rpc_nodes([mim]) ++ escalus:suite().
all() ->
[{group, G} || G <- main_group_names(), is_enabled(G)].
groups() ->
group_spec(main_group_names()).
is_enabled(mam) -> mongoose_helper:is_rdbms_enabled(host_type());
is_enabled(_) -> true.
main_group_names() ->
[basic, mam, offline].
subgroups(mam) -> [mam_success, mam_failure];
subgroups(offline) -> [offline_success, offline_failure];
subgroups(_) -> [].
group_spec(Groups) when is_list(Groups) ->
lists:flatmap(fun group_spec/1, Groups);
group_spec(Group) ->
case subgroups(Group) of
[] -> [{Group, [parallel], test_cases(Group)}];
SubGroups -> [{Group, [{group, SubG} || SubG <- SubGroups]} | group_spec(SubGroups)]
end.
test_cases(Group) ->
regular_tests(Group) ++ multiple_config_cth:flatten_and_strip_config(tests_with_config(Group)).
regular_tests(basic) -> basic_test_cases();
regular_tests(_) -> [].
-spec tests_with_config(_GroupName :: atom()) -> [{TestCase :: atom(),
[Config :: [{Key :: atom(), Value :: term()}]]}].
tests_with_config(_GroupName) ->
lists:append([deliver_tests_with_config(notify),
deliver_tests_with_config(error),
deliver_tests_with_config(drop)]).
deliver_tests_with_config(Action) ->
multiple_config_cth:add_config(deliver_rule_configs(Action), deliver_test_cases(Action)).
Each config tests different rules in the AMP message
deliver_rule_configs(Action) ->
[
[{rules, [{deliver, direct, Action}]}],
[{rules, [{deliver, stored, Action}]}],
[{rules, [{deliver, none, Action}]}],
[{rules, [{deliver, direct, Action},
{deliver, stored, Action},
{deliver, none, Action}]}]
].
basic_test_cases() ->
[initial_service_discovery_test,
actions_and_conditions_discovery_test,
unsupported_actions_test,
unsupported_conditions_test,
unacceptable_rules_test,
notify_match_resource_any_test,
notify_match_resource_exact_test,
notify_match_resource_other_test,
notify_match_resource_other_bare_test,
last_rule_applies_test].
deliver_test_cases(notify) ->
[notify_deliver_to_online_user_test,
notify_deliver_to_online_user_bare_jid_test,
notify_deliver_to_online_user_recipient_privacy_test,
notify_deliver_to_offline_user_test,
notify_deliver_to_offline_user_recipient_privacy_test,
notify_deliver_to_online_user_broken_connection_test,
notify_deliver_to_stranger_test,
notify_deliver_to_unknown_domain_test];
deliver_test_cases(error) ->
[error_deliver_to_online_user_test,
error_deliver_to_offline_user_test,
error_deliver_to_stranger_test];
deliver_test_cases(drop) ->
[drop_deliver_to_online_user_test,
drop_deliver_to_offline_user_test,
drop_deliver_to_stranger_test].
init_per_suite(Config) ->
ConfigWithHooks = [{ct_hooks, [{multiple_config_cth, fun tests_with_config/1}]} | Config],
{Mod, Code} = rpc(mim(), dynamic_compile, from_string, [amp_test_helper_code()]),
rpc(mim(), code, load_binary, [Mod, "amp_test_helper.erl", Code]),
setup_meck(suite),
escalus:init_per_suite(ConfigWithHooks).
amp_test_helper_code() ->
"-module(amp_test_helper).\n"
"-compile([export_all, nowarn_export_all]).\n"
"setup_meck() ->\n"
" meck:expect(ranch_tcp, send, fun ranch_tcp_send/2).\n"
"ranch_tcp_send(Socket, Data) ->\n"
" case catch binary:match(Data, <<\"Recipient connection breaks\">>) of\n"
" {N, _} when is_integer(N) -> {error, simulated};\n"
" _ -> meck:passthrough([Socket, Data])\n"
" end.\n".
end_per_suite(C) ->
teardown_meck(suite),
escalus_fresh:clean(),
escalus:end_per_suite(C).
init_per_group(GroupName, Config) ->
Config1 = case lists:member(GroupName, main_group_names()) of
true ->
ConfigWithModules = dynamic_modules:save_modules(host_type(), Config),
dynamic_modules:ensure_modules(host_type(), required_modules(GroupName)),
ConfigWithModules;
false ->
Config
end,
setup_meck(GroupName),
save_offline_status(GroupName, Config1).
setup_meck(suite) ->
ok = rpc(mim(), meck, new, [ranch_tcp, [passthrough, no_link]]),
ok = rpc(mim(), amp_test_helper, setup_meck, []);
setup_meck(mam_failure) ->
ok = rpc(mim(), meck, expect, [mod_mam_rdbms_arch, archive_message, 3, {ok, {error, simulated}}]);
setup_meck(offline_failure) ->
ok = rpc(mim(), meck, expect, [mod_offline_mnesia, write_messages, 4, {error, simulated}]);
setup_meck(_) -> ok.
save_offline_status(mam_success, Config) -> [{offline_storage, mam} | Config];
save_offline_status(mam_failure, Config) -> [{offline_storage, mam_failure} | Config];
save_offline_status(offline_success, Config) -> [{offline_storage, offline} | Config];
save_offline_status(offline_failure, Config) -> [{offline_storage, offline_failure} | Config];
save_offline_status(basic, Config) -> [{offline_storage, none} | Config];
save_offline_status(_GN, Config) -> Config.
end_per_group(GroupName, Config) ->
teardown_meck(GroupName),
case lists:member(GroupName, main_group_names()) of
true -> dynamic_modules:restore_modules(Config);
false -> ok
end.
teardown_meck(mam_failure) ->
rpc(mim(), meck, unload, [mod_mam_rdbms_arch]);
teardown_meck(offline_failure) ->
rpc(mim(), meck, unload, [mod_offline_mnesia]);
teardown_meck(suite) ->
rpc(mim(), meck, unload, []);
teardown_meck(_) -> ok.
init_per_testcase(Name, C) -> escalus:init_per_testcase(Name, C).
end_per_testcase(Name, C) -> escalus:end_per_testcase(Name, C).
initial_service_discovery_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
escalus_client:send(Alice, disco_info()),
Response = escalus_client:wait_for_stanza(Alice),
escalus:assert(has_feature, [ns_amp()], Response)
end).
actions_and_conditions_discovery_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
Args = [ns_amp(),
<<"">>,
<<"">>,
<<"">>,
<<"-resource">>
],
escalus_client:send(Alice, disco_info_amp_node()),
Response = escalus_client:wait_for_stanza(Alice),
assert_has_features(Response, Args)
end).
unsupported_actions_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
<<"A paradoxical payload!">>),
client_sends_message(Alice, Msg),
client_receives_amp_error(Alice, {deliver, direct, alert}, <<"unsupported-actions">>)
end).
unsupported_conditions_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Msg = amp_message_to(Bob, [{'expire-at', <<"2020-06-06T12:20:20Z">>, notify}],
<<"Never fade away!">>),
client_sends_message(Alice, Msg),
client_receives_amp_error(Alice, {'expire-at', <<"2020-06-06T12:20:20Z">>, notify},
<<"unsupported-conditions">>)
end).
unacceptable_rules_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Msg = amp_message_to(Bob, [{broken, rule, spec}
, {also_broken, rule, spec}
],
<<"Break all the rules!">>),
client_sends_message(Alice, Msg),
client_receives_amp_error(Alice, [{broken, rule, spec}
, {also_broken, rule, spec}],
<<"not-acceptable">>)
end).
notify_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Rule = {deliver, direct, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"I want to be sure you get this!">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_message(Bob, <<"I want to be sure you get this!">>),
client_receives_nothing(Alice)
end).
notify_deliver_to_online_user_bare_jid_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Message = <<"One of your resources needs to get this!">>,
Rule = {deliver, direct, notify},
Rules = rules(Config, [Rule]),
BobsBareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BobsBareJid, Rules, Message),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobsBareJid, Rule);
false -> ok
end,
client_receives_message(Bob, Message),
client_receives_nothing(Alice)
end).
notify_deliver_to_online_user_recipient_privacy_test(Config) ->
case is_module_loaded(mod_mam_pm) of
true -> {skip, "MAM does not support privacy lists"};
false -> do_notify_deliver_to_online_user_recipient_privacy_test(Config)
end.
do_notify_deliver_to_online_user_recipient_privacy_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"Should be filtered by Bob's privacy list">>),
privacy_helper:set_and_activate(Bob, <<"deny_all_message">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_generic_error(Alice, <<"503">>, <<"cancel">>),
client_receives_nothing(Alice),
client_receives_nothing(Bob)
end).
notify_deliver_to_online_user_broken_connection_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Rule = {deliver, case ?config(offline_storage, Config) of
mam -> stored;
_ -> none
end, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"Recipient connection breaks">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, Bob, Rule);
false -> ok
end,
client_receives_nothing(Alice),
connection to avoid errors with closing the stream
escalus_connection:kill(Bob)
end),
ok.
notify_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
Rule = {deliver, case is_offline_storage_working(Config) of
true -> stored;
false -> none
end, notify},
Rules = rules(Config, [Rule]),
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobJid, Rule);
false -> ok
end,
case ?config(offline_storage, Config) of
offline_failure -> client_receives_generic_error(Alice, <<"500">>, <<"wait">>);
_ -> client_receives_nothing(Alice)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) of
true -> user_has_incoming_offline_message(FreshConfig, bob, <<"A message in a bottle...">>);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
is_offline_storage_working(Config) ->
Status = ?config(offline_storage, Config),
Status == mam orelse Status == offline.
notify_deliver_to_offline_user_recipient_privacy_test(Config) ->
case is_module_loaded(mod_mam_pm) of
true -> {skip, "MAM does not support privacy lists"};
false -> do_notify_deliver_to_offline_user_recipient_privacy_test(Config)
end.
do_notify_deliver_to_offline_user_recipient_privacy_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
escalus:story(
FreshConfig, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
privacy_helper:set_and_activate(Bob, <<"deny_all_message">>),
privacy_helper:set_default_list(Bob, <<"deny_all_message">>),
mongoose_helper:logout_user(Config, Bob),
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
BobJid = lbin(escalus_client:short_jid(Bob)),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, BobJid, Rule);
false -> ok
end,
client_receives_nothing(Alice)
end),
user_has_no_incoming_offline_messages(FreshConfig, bob).
notify_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"A message in a bottle...">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, StrangerJid, Rule);
false -> ok
end,
client_receives_generic_error(Alice, <<"503">>, <<"cancel">>),
client_receives_nothing(Alice)
end).
notify_deliver_to_unknown_domain_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
StrangerJid = <<"">>,
Rule = {deliver, none, notify},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(StrangerJid, Rules, <<"Msg to unknown domain">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_notification(Alice, StrangerJid, Rule);
false -> ok
end,
error 404 : ' remote server not found ' is expected
client_receives_generic_error(Alice, <<"404">>, <<"cancel">>),
client_receives_nothing(Alice)
end).
notify_match_resource_any_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 4}],
fun(Alice, Bob, _, _, _) ->
Msg = amp_message_to(Bob, [{'match-resource', any, notify}],
<<"Church-encoded hot-dogs">>),
client_sends_message(Alice, Msg),
client_receives_notification(Alice, Bob, {'match-resource', any, notify}),
client_receives_message(Bob, <<"Church-encoded hot-dogs">>)
end).
notify_match_resource_exact_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 4}],
fun(Alice, _, _, Bob3, _) ->
Msg = amp_message_to(Bob3, [{'match-resource', exact, notify}],
<<"Resource three, your battery is on fire!">>),
client_sends_message(Alice, Msg),
client_receives_notification(Alice, Bob3, {'match-resource', exact, notify}),
client_receives_message(Bob3, <<"Resource three, your battery is on fire!">>)
end).
notify_match_resource_other_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
NonmatchingJid = << (escalus_client:short_jid(Bob))/binary,
"/blahblahblah_resource" >>,
Msg = amp_message_to(NonmatchingJid,
[{'match-resource', other, notify}],
<<"A Bob by any other name!">>),
client_sends_message(Alice, Msg),
client_receives_notification(Alice, NonmatchingJid,
{'match-resource', other, notify}),
client_receives_message(Bob, <<"A Bob by any other name!">>)
end).
notify_match_resource_other_bare_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
BareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BareJid,
[{'match-resource', other, notify}],
<<"A Bob by any other name!">>),
client_sends_message(Alice, Msg),
client_receives_notification(Alice, BareJid, {'match-resource', other, notify}),
client_receives_message(Bob, <<"A Bob by any other name!">>)
end).
error_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Rule = {deliver, direct, error},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"It might cause an error">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true ->
client_receives_amp_error(Alice, Bob, Rule, <<"undefined-condition">>),
client_receives_nothing(Bob);
false ->
client_receives_message(Bob, <<"It might cause an error">>)
end,
client_receives_nothing(Alice)
end).
error_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
Rule = {deliver, case ?config(offline_storage, Config) of
none -> none;
_ -> stored
end, error},
Rules = rules(Config, [Rule]),
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, <<"A message in a bottle...">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true ->
client_receives_amp_error(Alice, BobJid, Rule, <<"undefined-condition">>);
false ->
check_offline_storage(Alice, Config)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) andalso not lists:member(Rule, Rules) of
true -> user_has_incoming_offline_message(FreshConfig, bob, <<"A message in a bottle...">>);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
check_offline_storage(User, Config) ->
case ?config(offline_storage, Config) of
offline_failure ->
client_receives_generic_error(User, <<"500">>, <<"wait">>);
_ -> client_receives_nothing(User)
end.
error_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
Rule = {deliver, none, error},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"This cannot possibly succeed">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_amp_error(Alice, StrangerJid, Rule,
<<"undefined-condition">>);
false -> client_receives_generic_error(Alice, <<"503">>, <<"cancel">>)
end,
client_receives_nothing(Alice)
end).
drop_deliver_to_online_user_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
Rule = {deliver, direct, drop},
Rules = rules(Config, [Rule]),
Msg = amp_message_to(Bob, Rules, <<"It might get dropped">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> client_receives_nothing(Bob);
false -> client_receives_message(Bob, <<"It might get dropped">>)
end,
client_receives_nothing(Alice)
end).
drop_deliver_to_offline_user_test(Config) ->
FreshConfig = escalus_fresh:create_users(Config, [{alice, 1}, {bob, 1}]),
Rule = {deliver, case ?config(offline_storage, Config) of
none -> none;
_ -> stored
end, drop},
Rules = rules(Config, [Rule]),
Message = <<"A message in a bottle...">>,
escalus:story(
FreshConfig, [{alice, 1}],
fun(Alice) ->
BobJid = escalus_users:get_jid(FreshConfig, bob),
Msg = amp_message_to(BobJid, Rules, Message),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) orelse
?config(offline_storage, Config) /= offline_failure of
true -> client_receives_nothing(Alice);
false -> client_receives_generic_error(Alice, <<"500">>, <<"wait">>)
end
end),
wait_until_no_session(FreshConfig, alice),
case is_offline_storage_working(Config) andalso not lists:member(Rule, Rules) of
true -> user_has_incoming_offline_message(FreshConfig, bob, Message);
false -> user_has_no_incoming_offline_messages(FreshConfig, bob)
end.
drop_deliver_to_stranger_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}],
fun(Alice) ->
Rule = {deliver, none, drop},
Rules = rules(Config, [Rule]),
Domain = domain(),
StrangerJid = <<"stranger@", Domain/binary>>,
Msg = amp_message_to(StrangerJid, Rules, <<"This cannot possibly succeed">>),
client_sends_message(Alice, Msg),
case lists:member(Rule, Rules) of
true -> ok;
false -> client_receives_generic_error(Alice, <<"503">>, <<"cancel">>)
end,
client_receives_nothing(Alice)
end).
last_rule_applies_test(Config) ->
escalus:fresh_story(
Config, [{alice, 1}, {bob, 1}],
fun(Alice, Bob) ->
BobsBareJid = escalus_client:short_jid(Bob),
Msg = amp_message_to(BobsBareJid, [{deliver, none, error},
{deliver, stored, error},
{deliver, direct, notify}],
<<"One of your resources needs to get this!">>),
client_sends_message(Alice, Msg),
client_receives_notification(Alice, BobsBareJid, {deliver, direct, notify}),
client_receives_message(Bob, <<"One of your resources needs to get this!">>)
end).
Internal
wait_until_no_session(FreshConfig, User) ->
U = escalus_users:get_username(FreshConfig, User),
S = escalus_users:get_server(FreshConfig, User),
JID = jid:make(U, S, <<>>),
mongoose_helper:wait_until(
fun() -> rpc(mim(), ejabberd_sm, get_user_resources, [JID]) end, []).
user_has_no_incoming_offline_messages(FreshConfig, UserName) ->
escalus:fresh_story(
FreshConfig, [{UserName, 1}],
fun(User) ->
client_receives_nothing(User),
case is_module_loaded(mod_mam_pm) of
true -> client_has_no_mam_messages(User);
false -> ok
end
end).
user_has_incoming_offline_message(FreshConfig, UserName, MsgText) ->
true = is_module_loaded(mod_mam_pm) orelse is_module_loaded(mod_offline),
{ok, Client} = escalus_client:start(FreshConfig, UserName, <<"new-session">>),
escalus:send(Client, escalus_stanza:presence(<<"available">>)),
case is_module_loaded(mod_offline) of
true -> client_receives_message(Client, MsgText);
false -> ok
end,
Presence = escalus:wait_for_stanza(Client),
escalus:assert(is_presence, Presence),
case is_module_loaded(mod_mam_pm) of
true -> client_has_mam_message(Client);
false -> ok
end,
escalus_client:stop(FreshConfig, Client).
client_has_no_mam_messages(User) ->
P = mam_helper:mam04_props(),
escalus:send(User, mam_helper:stanza_archive_request(P, <<"q1">>)),
Res = mam_helper:wait_archive_respond(User),
mam_helper:assert_respond_size(0, Res).
client_has_mam_message(User) ->
P = mam_helper:mam04_props(),
escalus:send(User, mam_helper:stanza_archive_request(P, <<"q1">>)),
Res = mam_helper:wait_archive_respond(User),
mam_helper:assert_respond_size(1, Res).
rules(Config, Default) ->
case lists:keysearch(rules, 1, Config) of
{value, {rules, Val}} -> Val;
_ -> Default
end.
ns_amp() ->
<<"">>.
client_sends_message(Client, Msg) ->
escalus_client:send(Client, Msg).
client_receives_amp_error(Client, Rules, AmpErrorKind) when is_list(Rules) ->
Received = escalus_client:wait_for_stanza(Client),
assert_amp_error(Client, Received, Rules, AmpErrorKind);
client_receives_amp_error(Client, Rule, AmpErrorKind) ->
client_receives_amp_error(Client, [Rule], AmpErrorKind).
client_receives_amp_error(Client, IntendedRecipient, Rule, AmpErrorKind) ->
Received = escalus_client:wait_for_stanza(Client),
assert_amp_error_with_full_amp(Client, IntendedRecipient,
Received, Rule, AmpErrorKind).
client_receives_generic_error(Client, Code, Type) ->
Received = escalus_client:wait_for_stanza(Client, 5000),
escalus:assert(fun contains_error/3, [Code, Type], Received).
client_receives_nothing(Client) ->
timer:sleep(300),
escalus_assert:has_no_stanzas(Client).
client_receives_message(Client, MsgText) ->
Received = escalus_client:wait_for_stanza(Client),
escalus:assert(is_chat_message, [MsgText], Received).
client_receives_notification(Client, IntendedRecipient, Rule) ->
Msg = escalus_client:wait_for_stanza(Client),
assert_notification(Client, IntendedRecipient, Msg, Rule).
disco_info() ->
escalus_stanza:disco_info(domain()).
disco_info_amp_node() ->
escalus_stanza:disco_info(domain(), ns_amp()).
assert_amp_error(Client, Response, Rules, AmpErrorKind) when is_list(Rules) ->
ClientJID = escalus_client:full_jid(Client),
Server = escalus_client:server(Client),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[amp_status_attr(AmpErrorKind), no_to_attr, no_from_attr, Rules],
Response),
escalus:assert(fun contains_amp_error/3,
[AmpErrorKind, Rules],
Response);
assert_amp_error(Client, Response, Rule, AmpErrorKind) ->
assert_amp_error(Client, Response, [Rule], AmpErrorKind).
assert_amp_error_with_full_amp(Client, IntendedRecipient, Response,
{_C, _V, _A} = Rule, AmpErrorKind) ->
ClientJID = escalus_client:full_jid(Client),
RecipientJID = full_jid(IntendedRecipient),
Server = escalus_client:server(Client),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[amp_status_attr(AmpErrorKind), RecipientJID, ClientJID, [Rule]],
Response),
escalus:assert(fun contains_amp_error/3,
[AmpErrorKind, [Rule]],
Response).
assert_notification(Client, IntendedRecipient, Response, {_C, _V, A} = Rule) ->
ClientJID = escalus_client:full_jid(Client),
RecipientJID = full_jid(IntendedRecipient),
Server = escalus_client:server(Client),
Action = a2b(A),
Server = exml_query:attr(Response, <<"from">>),
ClientJID = exml_query:attr(Response, <<"to">>),
escalus:assert(fun contains_amp/5,
[Action, RecipientJID, ClientJID, [Rule]],
Response).
assert_has_features(Response, Features) ->
CheckF = fun(F) -> escalus:assert(has_feature, [F], Response) end,
lists:foreach(CheckF, Features).
full_jid(#client{} = Client) ->
escalus_client:full_jid(Client);
full_jid(B) when is_binary(B) ->
B.
amp_message_to(To, Rules, MsgText) ->
Msg0 = #xmlel{children=C} = escalus_stanza:chat_to(To, MsgText),
Msg = escalus_stanza:set_id(Msg0, escalus_stanza:id()),
Amp = amp_el(Rules),
Msg#xmlel{children = C ++ [Amp]}.
amp_el([]) ->
throw("cannot build <amp> with no rules!");
amp_el(Rules) ->
#xmlel{name = <<"amp">>,
attrs = [{<<"xmlns">>, ns_amp()}],
children = [ rule_el(R) || R <- Rules ]}.
rule_el({Condition, Value, Action}) ->
check_rules(Condition, Value, Action),
#xmlel{name = <<"rule">>
, attrs = [{<<"condition">>, a2b(Condition)}
, {<<"value">>, a2b(Value)}
, {<<"action">>, a2b(Action)}]}.
contains_amp(Status, To, From, ExpectedRules, Stanza) when is_list(ExpectedRules)->
Amp = exml_query:subelement(Stanza, <<"amp">>),
undefined =/= Amp andalso
To == exml_query:attr(Amp, <<"to">>, no_to_attr) andalso
From == exml_query:attr(Amp, <<"from">>, no_from_attr) andalso
Status == exml_query:attr(Amp, <<"status">>, no_status_attr) andalso
all_present([ rule_el(R) || R <- ExpectedRules ], exml_query:subelements(Amp, <<"rule">>)).
contains_amp_error(AmpErrorKind, Rules, Response) ->
ErrorEl = exml_query:subelement(Response, <<"error">>),
<<"modify">> == exml_query:attr(ErrorEl, <<"type">>)
andalso
amp_error_code(AmpErrorKind) == exml_query:attr(ErrorEl, <<"code">>)
andalso
undefined =/= (Marker = exml_query:subelement(ErrorEl, amp_error_marker(AmpErrorKind)))
andalso
ns_stanzas() == exml_query:attr(Marker, <<"xmlns">>)
andalso
undefined =/= (Container = exml_query:subelement(ErrorEl,
amp_error_container(AmpErrorKind)))
andalso
all_present([ rule_el(R) || R <- Rules ], exml_query:subelements(Container, <<"rule">>)).
contains_error(Code, Type, Response) ->
ErrorEl = exml_query:subelement(Response, <<"error">>),
Type == exml_query:attr(ErrorEl, <<"type">>)
andalso (Code == any orelse Code == exml_query:attr(ErrorEl, <<"code">>)).
all_present(Needles, Haystack) ->
list_and([ lists:member(Needle, Haystack)
|| Needle <- Needles ]).
list_and(List) ->
lists:all(fun(X) -> X =:= true end, List).
ns_stanzas() ->
<<"urn:ietf:params:xml:ns:xmpp-stanzas">>.
check_rules(deliver, direct, notify) -> ok;
check_rules(deliver, stored, notify) -> ok;
check_rules(deliver, none, notify) -> ok;
check_rules(deliver, direct, error) -> ok;
check_rules(deliver, stored, error) -> ok;
check_rules(deliver, none, error) -> ok;
check_rules(deliver, direct, drop) -> ok;
check_rules(deliver, stored, drop) -> ok;
check_rules(deliver, none, drop) -> ok;
check_rules('match-resource', any, notify) -> ok;
check_rules('match-resource', exact, notify) -> ok;
check_rules('match-resource', other, notify) -> ok;
check_rules(C, V, A) -> throw({illegal_amp_rule, {C, V, A}}).
a2b(B) when is_binary(B) -> B;
a2b(A) -> atom_to_binary(A, utf8).
amp_status_attr(<<"undefined-condition">>) -> <<"error">>;
amp_status_attr(_) -> no_status_attr.
amp_error_code(<<"undefined-condition">>) -> <<"500">>;
amp_error_code(<<"not-acceptable">>) -> <<"405">>;
amp_error_code(<<"unsupported-actions">>) -> <<"400">>;
amp_error_code(<<"unsupported-conditions">>) -> <<"400">>.
amp_error_marker(<<"not-acceptable">>) -> <<"not-acceptable">>;
amp_error_marker(<<"unsupported-actions">>) -> <<"bad-request">>;
amp_error_marker(<<"unsupported-conditions">>) -> <<"bad-request">>;
amp_error_marker(<<"undefined-condition">>) -> <<"undefined-condition">>.
amp_error_container(<<"not-acceptable">>) -> <<"invalid-rules">>;
amp_error_container(<<"unsupported-actions">>) -> <<"unsupported-actions">>;
amp_error_container(<<"unsupported-conditions">>) -> <<"unsupported-conditions">>;
amp_error_container(<<"undefined-condition">>) -> <<"failed-rules">>.
is_module_loaded(Mod) ->
rpc(mim(), gen_mod, is_loaded, [host_type(), Mod]).
required_modules(basic) ->
mam_modules(off) ++ offline_modules(off) ++ privacy_modules(on);
required_modules(mam) ->
mam_modules(on) ++ offline_modules(off) ++ privacy_modules(off);
required_modules(offline) ->
mam_modules(off) ++ offline_modules(on) ++ privacy_modules(on);
required_modules(_) ->
[].
mam_modules(on) ->
[{mod_mam, mam_helper:config_opts(#{pm => #{}, async_writer => #{enabled => false}})}];
mam_modules(off) ->
[{mod_mam, stopped}].
offline_modules(on) ->
[{mod_offline, config_parser_helper:mod_config(mod_offline,
#{access_max_user_messages => max_user_offline_messages})}];
offline_modules(off) ->
[{mod_offline, stopped},
{mod_offline_stub, []}].
privacy_modules(on) ->
[{mod_privacy, config_parser_helper:default_mod_config(mod_privacy)},
{mod_blocking, config_parser_helper:default_mod_config(mod_blocking)}];
privacy_modules(off) ->
[{mod_privacy, stopped},
{mod_blocking, stopped}].
|
5fc6580be09f4b8d22dee2ac5a09011fec4a03842ffd52a51b7c73a522598787 | avalor/eJason | variables.erl | -module(variables).
-compile(export_all).
-include("include/macros.hrl").
-include("include/parser.hrl").
-include("include/variables.hrl").
-include("include/records.hrl").
-define(ERLTOEJASONVAR, "ERLTOEJASONVAR").
%% Checks whether the leftmost variable/atom can be matched to the
%% rightmost one. (e.g. a(b) matches a(b)[c]
%% but a(b)[c] does not match a(b)
%%
%% NOTE that annotations are ignored. If used in the belief base, they
%% must be ignored.
%%
%% Note: most of the "unification magic" is done here
%%
%% Returns either "false" or an iterator for the new matchings variables.
%% The return value is an iterator because there can be several possible
%% matching due to the annotations of the variables
match_vars(Bindings,
#var{id = ID1},
#var{id = ID2}) when ID1 == ID2;
ID1 == ?UNDERSCORE;
ID2 == ?UNDERSCORE->
%%io:format("ID1 : ~p~nID2: ~p~n",[ID1,ID2]),
iterator:create_iterator([Bindings]);
match_vars(Bindings,
Var1= #var{ functor = Func1,
args = Args1},
Var2=#var{functor = Func2,
args = Args2}) ->
io : format("Bindings : ~p ~ ] ) ,
%% io:format("\nVariables:match -> Var1: ~p~n\tVar2: ~p~n",
%% [Var1,Var2]),
Res = case {Func1,Func2} of
{?NOFUNCTOR,?NOFUNCTOR} when Args1==?UNBOUND,Args2 == ?UNBOUND;
Args1==?UNBOUND,Args2 ==
?UNBOUNDLIST;
Args1==?UNBOUNDLIST,Args2 ==
?UNBOUNDLIST->
Var1 and Var2 are unbound whether lists or not
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
%io:format("NewVar: ~p~n",[NewVar]),
iterator:create_iterator([update(Bindings,
[NewVar])]);
%%%%%%
{?NOFUNCTOR,?NOFUNCTOR} when Args1==?UNBOUNDLIST,
Args2 == ?UNBOUND->
Var1 and Var2 are unbound , but Var1 must be a list
%% Therefore, Var2 must be a list as well
NewVar1 =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
NewVar2 =
Var2#var{args =?UNBOUNDLIST},
%% io:format("NewVar1: ~p~n",[NewVar1]),
%% io:format("NewVar2: ~p~n",[NewVar2]),
iterator:create_iterator([update(Bindings,
[NewVar1,NewVar2])]);
%%%%%%
{?NOFUNCTOR,_} when Args1 == ?UNBOUND, Args2 == ?ISLIST;
Args1 == ?UNBOUND, Args2 == ?ISATOM;
Args1 == ?UNBOUND, Args2 == ?STRONGNEG;
Args1 == ?UNBOUNDLIST, Args2 == ?ISLIST;
Args1 == ?UNBOUNDLIST, Args2 == ?ISATOM, Func2 == [] ->
only Var1 is unbound , Var2 is atom / list / string / strongneg
Check if Var2 contains Var1
case check_contains(Bindings,Var1#var.id,Var2) of
true ->
%% e.g. A = A[3], A = [1,A,3]
false;
false ->
%% io:format("Not Contained1~n~n"),
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
%% io:format("NewVar: ~p~n",[NewVar]),
io : format("Bindings : ~p ~ ] ) ,
%% io:format("Updated is: ~p~n",
[ update(Bindings,[NewVar ] ) ] ) ,
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
%%%%%%
{?NOFUNCTOR,_} when Args1 == ?UNBOUND,Args2 =/= ?ISREF,
Func2 == {Var1#var.id}->
Var1 is unbound , Var2 is a struct whose functor is Var1
%% e.g. A = A[4]
iterator:create_iterator([Bindings]);
%%%%%%
{?NOFUNCTOR,_} when Args1 == ?UNBOUND,Args2 =/= ?ISREF->
only Var1 is unbound , Var2 is a struct ( not a ref then )
%% If Var2 is a ref, the matching is further attempted with the referred var
Check if Var2 contains Var1
case check_contains(Bindings,Var1#var.id,Var2) of
true ->
%% e.g. A = pred(a,A)
false;
false ->
%%io:format("Not Contained2~n~n"),
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
%%%%%%
{_,?NOFUNCTOR} when Args2 == ?UNBOUND, Args1 == ?ISLIST;
Args2 == ?UNBOUND,Args1 == ?ISATOM;
Args2 == ?UNBOUND,Args1 == ?STRONGNEG;
Args2 == ?UNBOUNDLIST, Args1 == ?ISLIST;
Args2 == ?UNBOUNDLIST, Args1 == ?ISATOM, Func1 ==[]->
only Var2 is unbound , Var1 is atom / list / string
case check_contains(Bindings,Var2#var.id,Var1) of
true ->
%% e.g. [1,A,3] = A;
false;
false ->
%% io:format("Not Contained3~n~n"),
NewVar =
Var2#var{functor = {Var1#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
%%%%%%
{_,?NOFUNCTOR} when Args2 == ?UNBOUND,
Args1 =/= ?ISREF->% only Var2 is unbound
only Var2 is unbound , Var1 is a struct ( not a ref then )
If Var1 is a ref , the matching is further attempted with the referred var
case check_contains(Bindings,Var2#var.id,Var1) of
true ->
e.g. pred(B , C ) = A ;
false;
false ->
%% io:format("Not Contained3~n~n"),
NewVar =
Var2#var{functor = {Var1#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
%%%%%%
{{Ref1}, {Ref2}} when Args1 == ?ISREF,
Args2 == ?ISREF->
%%Fun1 and Fun2 are a reference to a var
match_vars(Bindings,
get_var(Ref1,Bindings),
get_var(Ref2,Bindings));
%%%
{{Ref1}, _} when Args1 == ?ISREF-> %Fun1 is a reference
match_vars(Bindings,
get_var(Ref1,Bindings),Var2);
%%%
{_, {Ref2}} when Args2 == ?ISREF-> %Fun2 is a reference
match_vars(Bindings,
Var1,get_var(Ref2,Bindings));
%%%%%%
{Atom,Atom} when Args1 == ?ISATOM,
Args2 == ?ISATOM->
%%Fun1 and Fun2 are atoms
iterator:create_iterator([Bindings]);
%%%%
{{_Header1,_Tail1}, {_Header2,_Tail2}}
Matching two lists
match_lists(Bindings,Var1,Var2);
%%%%%%
{{Ref1}, {Ref2}} when Args1 == ?STRONGNEG,
Args2 == ?STRONGNEG->
%%Fun1 and Fun2 are strong negations
match_vars(Bindings,
get_var(Ref1,Bindings),
get_var(Ref2,Bindings));
%%%%%%%%%%%%%%%%%%%%%%STRUCTS (the hard part)
{{_},{_}} when is_tuple(Args1), is_tuple(Args2),
size(Args1) == size(Args2)->
Var1 and Var2 represent full structs
%%These structs can be corrupted(e.g 1[B]), so we correct them
%%Note: possible source of inneficiency
{CorrectedBindings1,NewVar1} =
correct_structs(Bindings,Var1),
{Bindings2,NewVar2} =
correct_structs(CorrectedBindings1,Var2),
#var{functor = NewFunc1,
args = NewArgs1,
annots = NewAnnots1} = NewVar1,
#var{functor = NewFunc2,
args = NewArgs2,
annots = NewAnnots2} = NewVar2,
%% io:format("FuncVar1: ~p~n",[FuncVar1]),
if is_tuple(NewArgs1), is_tuple(NewArgs2),
size(NewArgs2) == size(NewArgs1) ->
NewVar1 and NewVar2 are full structs with the same number of args
%% Then, the code can be reused
{Ref1} = NewFunc1,
{Ref2} = NewFunc2,
FuncVar1 =
get_var(Ref1,Bindings2),
FuncVar2 =
get_var(Ref2,Bindings2),
case
match_vars(Bindings2,FuncVar1,FuncVar2) of
false ->
false;
ItNewBindings ->
%% io:format("ItNewBindings: ~p~n",[ItNewBindings]),
ArgumentPairs = lists:zip(tuple_to_list(NewArgs1),
tuple_to_list(NewArgs2)),
%io:format("Pairs: ~p~n",
% [ArgumentPairs]),
%% This function uses the iterators for the bindings of
%% each argument to try matching the next arguments
Match = fun (_,false) ->
false;
({{Elem1},{Elem2}},ItUseBindings) ->
%% Use the iterator for the next argument
FunNextArgs =
fun (NextArgBindings) ->
match_vars(
NextArgBindings,
get_var(Elem1,
NextArgBindings),
get_var(Elem2,
NextArgBindings))
end,
iterator:create_iterator_fun(ItUseBindings,
FunNextArgs)
end,
There can be several unifications for the variables in functor+args
ItArgsBindings=lists:foldl(Match,ItNewBindings,
ArgumentPairs),
AnnotFun =
%% Function that matches the annotations
fun (false)->
false ;
(ArgsBindings) ->
match_annotations(
ArgsBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2))
end,
iterator:create_iterator_fun(ItArgsBindings,
AnnotFun)
end;
true ->
NewVar1 and NewVar2 are not two structs with same number of args
match_vars(Bindings2,NewVar1,NewVar2)
end;
%%%%%%%%%%%%
{{_},_} when is_tuple(Args1)->
Var1 is a struct e.g. : A[B ]
%%This struct can be corrupted(e.g 1[B]), so we correct it
%% Var2 is also corrected in case it was a struct
%%Note: possible source of inneficiency
{CorrectedBindings1,NewVar1} =
correct_structs(Bindings,Var1),
{Bindings2,NewVar2} =
correct_structs(CorrectedBindings1,Var2),
#var{functor = NewFunc1,
args = NewArgs1,
annots = NewAnnots1} = NewVar1,
#var{functor = NewFunc2,
args = NewArgs2,
annots = NewAnnots2} = NewVar2,
%% io:format("NewVar1: ~p~n",[NewVar1]),
%% io:format("NewVar2: ~p~n",[NewVar2]),
if is_tuple(NewArgs1)->
NewVar1 is still a struct
%% Then, the code can be reused
{Ref1} = NewFunc1,
case get_var(Ref1,Bindings2) of
%%%%% Functor is an unbound variable, its functor and args
%%%%% may vary
UnboundVar =#var{args = ?UNBOUND} ->
if
NewArgs2 == ?ISATOM, NewAnnots1 == [];
Var2 is an atom e.g A [ ] = " 123 "
NewArgs2 == ?ISLIST, NewAnnots1 == [];
%% Var2 is a list e.g A[] = [1,2]
NewArgs2 == ?STRONGNEG, NewAnnots1 == [] ->
%% Var2 is a negation struct e.g A[] = ~a(b,c)[L],
%% Binding the unbound var
BoundVar =
UnboundVar#var{functor =
{NewVar2#var.id},
args = ?ISREF,
annots = []},
%% Binding the struct to the var
FinalVar1 =
NewVar1#var{args = ?ISREF,
functor = {Ref1},
annots = []},
NewBindings =
update(Bindings2,[BoundVar,
FinalVar1]),
iterator:create_iterator([NewBindings]);
%%%%%%
is_tuple(NewArgs2)->
%% Var2 is a (bound) struct: A[B] = c(d,e)[L]
%% A[B] = c[L]
%% The unbound var is matched to the struct Var2 without the annotations
e.g. A = c(d , e )
BoundVar =
UnboundVar#var{functor = NewFunc2, args = NewArgs2,annots = []},
Var1 is bound to the Struct Var2 plus the annotations of Var1
FinalVar1 =
NewVar1#var{ functor = NewFunc2, args = NewArgs2},
%% Now, the annotations must be matched,
UseBindings =
update(Bindings2,[BoundVar,FinalVar1]),
match_annotations(
UseBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2));
%%%%%%
true ->
%% No more possibilities
false
end;
%%%%%%%%%%%%%%%%%%% Functor is a bound variable and NewVar2 is a struct
_ when is_tuple(NewArgs2)->
{Ref2} = NewFunc2,
case get_var(Ref2,Bindings2) of
Functor2 is an unbound variable
%%%%% e.g: a(b,c)[annot] = A[L],
UnboundVar =#var{args = ?UNBOUND} ->
The unbound var is matched to the struct NewVar1 without the annotations
e.g. a(b , c)=A
BoundVar =
UnboundVar#var{functor = NewFunc1, args = NewArgs1,annots = []},
FinalVar2 is bound to the Struct NewVar1 plus the annotations of Var2
FinalVar2 =
NewVar2#var{functor = NewFunc1, args = NewArgs1},
%% Now, the annotations must be matched,
UseBindings =
update(Bindings2,[BoundVar,FinalVar2]),
match_annotations(
UseBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2));
%%%%%%
BoundVar = #var{} when size(NewArgs1) == size(BoundVar#var.args)->
%% The number of args is now equal. Iterate and compare again
match_vars(Bindings2,NewVar1,NewVar2);
_ ->
false
%% The number of args is different, otherwise the previous clause
%% would've matched
end;
_ ->
false %% no other possibilities left, are there?
end;
true ->
NewVar1 is no longer a struct , then iterate
match_vars(Bindings2,NewVar1,NewVar2)
end;
%%%%%%%%%%%%%%%
{_,{_}} when is_tuple(Args2)->
Var2 is a struct while Var1 is not
{Bindings2,NewVar2} =
correct_structs(Bindings,Var2),
#var{functor = NewFunc2,
args = NewArgs2} = NewVar2,
{Ref2} = NewFunc2,
if is_tuple(NewArgs2) ->
%% NewVar2 is still a tuple, then reuse code
case get_var(Ref2,Bindings2) of
%%%%% NewFunctor2 is an unbound variable
UnboundVar =#var{args = ?UNBOUND} ->
if
%%Note: the necessity of NewAnnot2 ==[] is dropped as
%% it is in the right-hand side of the equality
Args1 == ?ISATOM;
Var1 is an atom e.g " 123 " = A[B ]
Args1 == ?ISLIST;
Var1 is a list e.g [ 1,2 ] = A[B ]
Args1 == ?STRONGNEG ->
Var1 is a negation struct e.g ~a(b , c)[L ] = A[B ]
%% Binding the unbound var
BoundVar =
UnboundVar#var{functor =
{Var1#var.id},
args = ?ISREF,
annots = []},
%% Binding the struct to the var and dropping its annotations
FinalVar2 =
NewVar2#var{args = ?ISREF,
functor = {Ref2},
annots = []},
NewBindings =
update(Bindings2,[BoundVar,
FinalVar2]),
iterator:create_iterator([NewBindings]);
true ->
false
end;
#var{args = ?ISATOM,
functor = FunctorAtom}
when Args1 == ?ISATOM, NewArgs2 == {},
Func1==FunctorAtom ->
Var1 and NewVar2 can be the same atom :
%% e.g. a = a[b]
%% No new matchings
iterator:create_iterator([Bindings2]);
_Other ->
%% io:format("FuncVar2 is a bound var: ~p~n",
%% [Other]),
false
end;
true ->
%% Var2 is no longer a struct, then iterate
match_vars(Bindings2, Var1,NewVar2)
end;
%%%%%%
_ ->
%io:format("Size1: ~p~nSize2: ~p~n",[size(Args1),
% size(Args2)]),
%% io:format("Vars can never match -> Var1: ~p~n\tVar2: ~p~n",
%% [Var1,Var2]),
false
end, %% case {Func1,Func2}
%% io:format("Result for match_vars: ~p~n",[Res]),
case Res of
%% false ->
%% io:format("FALSE!~n~n");
%% _ ->
%% io:format("Match!~n~n")
%% end,
Res;
match_vars(_Bindings,P1,P2) ->
io:format("[variables:match_vars/2, error] \nP1: ~p~nP2: ~p~n",[P1,P2]),
a = b.
Tries to match all the annotations of Annots1 with those of Annots2
%%
%% Returns either "false" or an iterator for the possible matchings
match_annotations(Bindings,
[],
_ItAnnots2) ->
%% ALL ANOTATIONS MATCHED: success!
iterator:create_iterator([Bindings]);
match_annotations(Bindings, [{Annot1}|Rest],ItAnnots2) ->
%% io:format("Matching the annotation: ~p~n",[Annot1]),
io : format("In the set ~p ~ n",[iterator : ) ] ) ,
Var1 = get_var(Annot1,
Bindings),
MatchAnnot1Fun =
fun ({AnnotFrom2}) ->
%% The structs in Annots2 must be corrected
%% Note: this is a possible source of inneficiency
%% Var2 =
%% et_var(AnnotFrom2,Bindings),
{ UseBindings , CorrectedVar2 } =
%% variables:correct_structs(Bindings,Var2),
match_vars(UseBindings , Var1 ,
%% CorrectedVar2) end,
Var2 = get_var(AnnotFrom2,Bindings),
match_vars(Bindings, Var1,
Var2) end,
%% This iterators tries to match annot1
ItMatchesForAnnot1 =
iterator:create_iterator_fun(ItAnnots2,MatchAnnot1Fun),
MatchRestFun =
fun (false) -> %%Some annotation not matched
false;
(MatchesForAnnot1) ->
%% io:format("Annotation1 matched: ~p~n",
%% [variables:valuate(MatchesForAnnot1,
variables : get_var(Annot1 ,
) ) ] ) ,
match_annotations(MatchesForAnnot1, Rest,ItAnnots2)
end,
If can be matched , match the rest .
iterator:create_iterator_fun(ItMatchesForAnnot1,MatchRestFun).
%% Returns a variable without variable references (if possible)
%% and without vars in the functor/args/annots
%% i.e. replaces vars for its corresponding values
get_valuated_var(ID,Bindings)
when is_atom(ID)->
Var = get_var(ID,Bindings),
%io:format("Single var: ~p~n",[Var]),
get_valuated_var(Var,Bindings);
get_valuated_var(Var = #var{functor = Func,
args = Args,
annots = Annots},
Bindings) ->
% io:format("Valuating var: ~p~n",[Var]),
io : format("{Func : ~p , : ~p}~n",[Func , ] ) ,
{NewFunc,NewArgs,NewAnnots} =
case {Func,Args} of
{_,?ISATOM} when is_atom(Func)->
{Func,Args,
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots)};
{?NOFUNCTOR,?UNBOUND} -> %unbound variable
{Func,Args,
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots)};
{{VarRef}, ?ISREF}-> % Functor is a reference to a variable
ReferredVar =
get_valuated_var(VarRef,Bindings),
io : format("ReferredVar : ~p ~ n",[ReferredVar ] ) ,
{ReferredVar#var.functor, ReferredVar#var.args,
ReferredVar#var.annots};
{{VarRef}, _ }-> % A structure
ReferredVar =
get_valuated_var(VarRef,Bindings),
ValuatedArgs =
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
tuple_to_list(Args)),
ValuatedAnnots =
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots),
{ReferredVar,
list_to_tuple(ValuatedArgs),
ValuatedAnnots}
end,
ReturnVar = #var{
CHECK IF THIS IS NOT , any name should work
functor = NewFunc,
args = NewArgs,
annots = NewAnnots
},
io : format("Returning : ~p ~ n",[ReturnVar ] ) ,
ReturnVar.
We must : 1 ) Change the name of the variables in the params , so that there
are no clashes with those in .
%%
2 ) Get the new variables generated and add them to bindings .
%%
3 ) Identify the correspondence , so that it can be reverted after
%% the execution of the plan.
%% Replaces all the bindings received with new names
%% Returns a new valuation, and the list of replacements :
{ NewBindings , Replacements }
replace_bindings(Prefix,Bindings) ->
io : format("Received Bindings : ~p ~ ] ) ,
ListBindings = orddict:to_list(Bindings),
ListValues = [Value || {_key,Value} <- ListBindings],
Replacements = obtain_replacements(Prefix,length(Bindings),ListValues),
NewListValues = use_replacements(ListValues,Replacements),
NewBindings =
update([],NewListValues),
%% io:format("Replaced Bindings: ~p~n",[NewBindings]),
%% io:format("Replacements: ~p~n",[Replacements]),
{NewBindings,Replacements}.
%% Obtains the id replacements for the variables in a list of vars
%%
%% Numbers, atoms and '[]' (such that functor =:= id) are
%% spared the change of name
obtain_replacements(Prefix,Num,VarList) ->
%% io:format("Replacing: ~p~n",[VarList]),
%% io:format("REPLACING FROM ~s~p \n",
[ Prefix , ] ) ,
Result =
obtain_replacements(Prefix,Num,VarList,[]),
%% io:format("Result: ~p~n",[Result]),
%% io:format("REPLACING To ~s~p \n",
%% [Prefix,length(
%% lists:filter(FilterFun,Result))+Num-1]),
Result.
obtain_replacements(_Prefix,_Num,[],Replacements) ->
io : format("Final replacements : ~p ~ n",[Replacements ] ) ,
TODO add this replacement only when there are lists
orddict:store(
'[]',
'[]',
Replacements);
obtain_replacements(Prefix,Num,[Value|Rest],Replacements) ->
NewReplacements =
my_replace_vars(Prefix, Num, Value,Replacements),
obtain_replacements(Prefix,Num,Rest,NewReplacements).
my_replace_vars(Prefix , Param , ) when is_integer(Num)- >
my_replace_vars(Prefix , , , [ ] ) .
Returns a list of [ { VarID , NewVarID } ] for each binding
%% It is invoked by obtain_replacements.
%%TODO: use a more efficient way of knowing the amount of new variables
my_replace_vars(_Prefix, _Num,
Var = #var{id = ID, args = Args, functor = Func},
Replacements) when Args == ?ISATOM, Func =:= ID;
ID == '[]'->
%% if
Args = = ? ISATOM - > io : format("Atomvar spared : ~p ~ n " ,
%% [Var]);
%% true ->
%% ok
%% end,
%% io:format("rep var: ~p~n",[Var]),
case orddict:find(ID, Replacements) of
{ok, _} ->
Replacements;
error ->
%% Atoms where Func =:= ID are not changed
orddict:store(ID,
ID,
Replacements)
end;
my_replace_vars(Prefix, Num,
Var = #var{id = Id, args = Args},
Replacements) when Args == ?ISATOM;
Args == ?UNBOUND;
Args == ?UNBOUNDLIST->
%% if
Args = = ? ISATOM - > io : format("Atomvar renamed : ~p ~ n " ,
%% [Var]);
%% true ->
%% ok
%% end,
%% io:format("rep var: ~p~n",[Var]),
case orddict:find(Id, Replacements) of
{ok, _} ->
Replacements;
error ->
%% Counts how many of the replacements introduce new variables that use
%% the prefix
FilterFun =
fun ({A,A}) ->
%% io:format("not counting: ~p~n",[A]),
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID =
list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,Replacements))+Num)),
io : format("Adding1 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
NewVar = Var#var{id = NewVarID } ,
orddict:store(Id,
NewVarID,
Replacements)
end;
my_replace_vars(Prefix, Num,
Var =#var{id = Id,functor = Func, args = Args},
Replacements) when Args == ?ISREF;
Args == ?STRONGNEG->
%% io:format("rep var: ~p~n",[Var]),
case orddict:find(Id, Replacements) of
{ok, _} ->
Replacements;
error ->
%% Counts how many of the replacements introduce new variables that use
%% the prefix
FilterFun =
fun ({A,A}) ->
%% io:format("not counting: ~p~n",[A]),
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID =
list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,Replacements))+Num)),
io : format("Adding2 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
my_replace_vars(Prefix,Num,
Func,
orddict:store(Id,
NewVarID,
Replacements))
end;
my_replace_vars(_Prefix, _Num,
?EMPTYLISTVAR,
Replacements) ->
%% io:format("rep var: ~p~n",[?EMPTYLISTVAR]),
Replacements;
my_replace_vars(Prefix, Num,
Var =#var{id = Id, functor = {Header,Tail}, args = ?ISLIST},
Replacements) ->
%% io:format("rep var: ~p~n",[Var]),
case orddict:find(Id,Replacements) of
{ok,_} ->
Replacements;
error ->
%% Function to apply replacements of a list of vars
Fun = fun (X,AccumReplacements) ->
my_replace_vars(Prefix,Num,X,AccumReplacements) end,
HeaderReplacements =
lists:foldl(Fun,
Replacements,
Header),
io : : ~p ~ n",[Tail ] ) ,
TailReplacements =
case Tail of
%% [{'[]'}]->
%% orddict:store('[]',
%% '[]',
%% HeaderReplacements);
%% [#var{id = '[]'}]->
%% orddict:store('[]',
%% '[]',
%% HeaderReplacements);
%% _ ->
lists:foldl(Fun,
HeaderReplacements,
Tail),
%%end,
%% Counts how many of the replacements introduce new variables that use
%% the prefix
FilterFun =
fun ({A,A}) ->
%% io:format("not counting: ~p~n",[A]),
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID = list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,
TailReplacements))+
Num)),
io : format("Adding3 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
orddict:store(Id,
NewVarID,
TailReplacements)
end;
my_replace_vars(Prefix, Num,
Var = #var{id = Id, functor = Func,
args = Args, annots = Annots},
Replacements) when is_tuple(Args)->
%% io:format("rep var: ~p~n",[Var]),
Input is a struct
case orddict:find(Id,Replacements) of
{ok,_} ->
Replacements;
error ->
Fun = fun (X,AccumReplacements) ->
my_replace_vars(Prefix,Num,X,AccumReplacements) end,
ReplacementsFunc =
lists:foldl(Fun,
Replacements,
[Func]),
% io:format("ReplacementsFunc: ~p~n",
[ ReplacementsFunc ] ) ,
ReplacementsArgs =
lists:foldl(Fun,
ReplacementsFunc,
tuple_to_list(Args)),
%% io:format("ReplacementsArgs: ~p~n",
[ ReplacementsArgs ] ) ,
ReplacementsAnnots =
lists:foldl(Fun,
ReplacementsArgs,
Annots),
%% Counts how many of the replacements introduce new variables that use
%% the prefix
FilterFun =
fun ({A,A}) ->
%% io:format("not counting: ~p~n",[A]),
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID = list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,
ReplacementsAnnots))+
Num)),
%% io:format("ReplacementsAnnots: ~p~n",[ReplacementsAnnots]),
io : format("Adding4 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
% io:format("ReplacementsAnnots: ~p~n",
% [ReplacementsAnnots]),
orddict:store(Id,
NewVarID,
ReplacementsAnnots)
end;
my_replace_vars(Prefix , , # binary_operation {
%% left_part = Left,
%% right_part = Right},Replacements) ->
%% %% Binary functions are fully valuated
LeftReplacements =
my_replace_vars(Prefix , , Left , Replacements ) ,
AllReplacements =
my_replace_vars(Prefix , , Left , LeftReplacements ) ,
AllReplacements ;
my_replace_vars(Prefix, Num, {VarRef},Replacements) ->
VarRef is not replaced , as it could be an atom
Replacements.
%% case orddict:find(VarRef,Replacements) of
%% {ok,_} ->
%% Replacements;
%% error ->
%% NewVarID = list_to_atom(Prefix++
%% integer_to_list(length(Replacements)+
) ) ,
%% orddict:store(VarRef,
%% NewVarID,
%% Replacements)
%% end.
%% Applies the replacement of ids for the variables in a list or a var
%%
%% Returns the replaced var(s)
use_replacements(VarList,Replacements) when is_list(VarList)->
Fun = fun(Var) ->
use_replacements(Var,Replacements) end,
lists:map(Fun,VarList);
use_replacements(?EMPTYLISTVAR, _Replacements) ->
?EMPTYLISTVAR;
use_replacements(Var=#var{id=ID,
functor = Func, args =Args,
annots =Annots},
Replacements) ->
Fun = fun (Vars) ->
use_replacements(Vars,Replacements) end,
io : format("ID : ~p ~ nRepl : ~p ~ n",[ID , Replacements ] ) ,
{ok,NewID} =
orddict:find(ID,Replacements),
NewVar =
case Args of
_ when Args == ?UNBOUND;
Args == ?ISATOM;
Args == ?UNBOUNDLIST->
Var#var{id = NewID};
_ when Args == ?ISREF;
Args == ?STRONGNEG->
%% NewFunc =
%% case Func of
%% {Ref} ->
%% {ok,NewRef} =
%% orddict:find(Ref,Replacements),
%% NewRef;
%% #var{id = SomeID} ->
%% {ok,NewRef} =
%% orddict:find(SomeID,Replacements),
%% NewRef
%% end,
CreatedVar =Var#var{id = NewID,
functor = Fun(Func)},
%% io:format("CreatedVar: ~p~n",[CreatedVar]),
CreatedVar
;
%%[NewVar|Vars];
_ when Args == ?ISLIST->
{Header,Tail} = Func,
NewHeader =
Fun(Header),
NewTail =
case Tail of
%% [{'[]'}]->
%% [{'[]'}];
%% _ ->
Fun(Tail),
%end,
NewVar =
Var#var{id = NewID,
functor = {NewHeader,NewTail}};
%%[NewVar|Vars];
_ when is_tuple(Args)->
NewFunc =
Fun(Func),
NewArgs =
list_to_tuple(Fun(tuple_to_list(Args))),
NewAnnots =
Fun(Annots),
Var#var{id = NewID,
functor = NewFunc,
args = NewArgs,
annots = NewAnnots}
end,
NewVar;
use_replacements({VarRef},Replacements) ->
%% io:format("VarRef: ~p~nRepl: ~p~n",[VarRef,Replacements]),
{ok,NewVarRef} = orddict:find(VarRef,Replacements),
{NewVarRef}.
use_replacements(BO =
%% #binary_operation{left_part = Left,
%% right_part = Right},
%% Replacements) ->
BO#binary_operation {
%% left_part = use_replacements(Left,Replacements),
%% right_part = use_replacements(Right, Replacements)}.
Matches the variables in two lists
%% Returns an iterator
Two empty lists
?EMPTYLISTVAR,
?EMPTYLISTVAR)->
%% #var{args = ?ISLIST,
%% functor = {[],[{'[]'}]}},
%% #var{args = ?ISLIST,
%% functor = {[],[{'[]'}]}})->
iterator:create_iterator([Bindings]);
%%TODO: try to correct the lists so that it is possible to check when
%% these lists have different size
match_lists(Bindings, % Lists of different size
#var{args = ?ISLIST,
functor = {Header1,
[{'[]'}]}
},
#var{args = ?ISLIST,
functor = {Header2,
[{'[]'}]}
}
) when length(Header1) =/= length(Header2)->
false;
One of the lists is empty
?EMPTYLISTVAR,
#var{args = ?ISLIST,
functor = {[{LastElement}],
[{Tail}]}}
)->
case valuate(Bindings,
get_var(Tail,Bindings)) of
?EMPTYLISTVAR ->
%% Tail2 must already be the empty list
%% lastelement can be matched to the emptylist,
match_vars(Bindings,?EMPTYLISTVAR,
get_var(LastElement,Bindings));
_ ->
false
end;
One of the lists is empty
#var{args = ?ISLIST,
functor = {[{LastElement}],
[{Tail}]}},
?EMPTYLISTVAR
)->
case valuate(Bindings,
get_var(Tail,Bindings)) of
?EMPTYLISTVAR ->
%% Tail2 must already be the empty list
%% lastelement can be matched to the emptylist,
match_vars(Bindings,get_var(LastElement,Bindings),
?EMPTYLISTVAR);
_ ->
false
end;
match_lists(Bindings,
#var{args = ?ISLIST,
functor = {Header1,Tail1}},
#var{args = ?ISLIST,
functor = {Header2,
Tail2}})-> %%Comparing elements
%% io:format("Matchin: ~p and ~p~n",[ Header1++[Tail1],
%% Header2++[Tail2]]),
match_elems_in_list( Bindings,
Header1++[Tail1],
Header2++[Tail2]).
Receives a list with the elements of two lists that must be matched .
%% If a match can be found, the bindings are updated. If they cannot,
%% false is returned
match_elems_in_list(Bindings,
[[{ElemInTail1}]],
[[{ElemInTail2}]])-> % Matching tails
match_vars(Bindings,
get_var(ElemInTail1,Bindings),
get_var(ElemInTail2,Bindings));
match_elems_in_list(Bindings,
[[{'[]'}]],
List2)when length(List2) > 1->
One element is the empty list while the other has at least one
%% element in the header.
false;
match_elems_in_list(Bindings,
[[{Elem1}]],
List2)->
%% Matching Tail of List1 with List2
NewListVarId = list_to_atom(lists:flatten("EJASONLISTVAR"++
integer_to_list(length(Bindings)))),
% io:format("Splitting list: ~p~n",[List2]),
{NewHeader,[NewTail]} = lists:split(length(List2)-1,List2),
NewListVar =
#var{id = NewListVarId,
functor = {NewHeader,NewTail},
args = ?ISLIST},
NewBindings =
orddict:store(NewListVarId,
NewListVar,
Bindings),
case match_vars(NewBindings,
NewListVar,
get_var(Elem1,NewBindings)) of
false ->
false;
ItNewNewBindings ->
ItNewNewBindings
end;
match_elems_in_list(Bindings,
List1,
[[{Elem2}]])->
%% Matching Tail of List2 with List1
match_elems_in_list(Bindings,
[[{Elem2}]],
List1);
match_elems_in_list(Bindings,
[{Elem1}|Elems1],
[{Elem2}|Elems2])->
% Not in the tail yet
case match_vars(
Bindings,
get_var(Elem1,Bindings),
get_var(Elem2,Bindings)) of
false ->
false;
ItNewBindings->
MatchFun =
fun (NewBindings) ->
match_elems_in_list(NewBindings,
Elems1,
Elems2) end,
iterator:create_iterator_fun(ItNewBindings,
MatchFun)
end.
%% %% Returns a list with all the vars contained inside the input var(s)
%% %% The input var included
%% %% TODO: check difference with jasonNewParser:find_vars
%% %% TODO: check difference with variables:vars_to_import
%% gather_vars([]) ->
%% [];
%% gather_vars(VarList) when is_list(VarList)->
Fun = fun(Var , Acc ) - > gather_vars(Var , Acc ) end ,
lists : foldl(Fun,[],VarList ) ;
%% gather_vars(Var) ->
%% gather_vars(Var,[]).
gather_vars(Var = # var{args = ,
Acc ) when = = ? ISATOM ; = = ? UNBOUND ; = = ? ISREF ;
= = ? STRONGNEG ; = = ? UNBOUNDLIST- >
%% [Var|Acc];
%% gather_vars(Var = #var{functor = {Header,Tail},
%% args = ?ISLIST},
) - >
%% Fun =
fun(X ) - > gather_vars(X ) end ,
%% Vars = lists:flatten(lists:map(Fun, lists:append([Header,Tail]))),
lists : flatten(lists : append([[Var|Acc ] , ] ) ) ;
gather_vars(Var = # var{functor = Func , args = , annots = Annots } ,
) - > % % Var is a struct
%% Fun =
fun(X ) - > gather_vars(X ) end ,
VarsFunc = Fun(Func ) ,
VarsArgs = lists : map(Fun , ) ) ,
VarsAnnots = lists : map(Fun , ) ,
lists : flatten(lists : append([[Var|Acc],VarsFunc , VarsArgs , VarsAnnots ] ) ) ;
%% gather_vars({_Ref},Acc) ->
Acc ;
%% gather_vars({parenthesis,
%% Element},Acc) ->
%% lists:flatten(lists:append(
%% [gather_vars(Element),
%% Acc]));
gather_vars(#binary_operation{left_part = BodyLeft ,
right_part = BodyRight } ,
%% Acc) ->
%% lists:flatten( lists:append(
%% [lists:map(
%% fun variables:gather_vars/1,
BodyLeft ) ,
%% lists:map(fun variables:gather_vars/1,
BodyRight ) ,
%% Acc])).
%% %% gather_vars(#predicate{name = Name,
% % arguments = , annotations = Annot } ,
% % Acc ) - >
% % Fun = fun(X ) - > gather_vars(X ) end ,
%% %% lists:flatten(lists:append(
% % lists : ) ) ) .
% Finds a variable in the bindings.
% Returns - A variable (if found) or {ID} (otherwise) if it is an id.
% - An atom if the associated variable is just an atom
% - A number if ID is a number
get_var('[]',_)->
?EMPTYLISTVAR;
get_var(ID,Bindings)->
%%io:format("ID: ~p~n",[ID]),
case orddict:find(ID,Bindings) of
{ok,Var}->
Var;
error ->
io:format("[variables.erl] Warning: variable ~p not found,\n",
[ID]),
a=b,
{ID}
end.
%% Updates the bindings list (orddict) for the variables
1st argument is the binding list to be updated
2nd argument is a list of new bindings
%% Returns NewBindings
update(Bindings,[])->
Bindings;
update(Bindings,[Var|Rest]) ->
%io:format("Updated Var: ~p~n",[Var]),
NewBindings = orddict:store(Var#var.id,
Var,
Bindings),
update(NewBindings,Rest).
%% %% Strip a struct of all elements that are not
%% %% variable records
%% retain_variables(Atom) when is_atom(Atom) ->
%% [];
%% retain_variables(Var) when is_record(Var,var)->
Var ;
%% retain_variables(Tuple) when is_tuple(Tuple)->
%% retain_variables(tuple_to_list(Tuple));
%% retain_variables(List) when is_list(List)->
%% Fun = fun (X) ->
%% retain_variables(X) end,
%% lists:flatten(lists:map(
%% Fun,
%% List)).
%% %% Gets all the variables in a list that are already bound
%% get_ground_variables(List) when is_list(List) ->
Fun = fun ( # var{is_ground = IG } ) - >
IG end ,
%% lists:filter(Fun,List).
%% %% Valuates a list of params using its bindings
%% valuate_return_params(Params,Bindings)->
%% Fun = fun (X) ->
variables : valuate_param(X , ) end ,
%% % io:format("Ret Params: ~p~n",[lists:map(Fun,Params)]),
%% % io:format("Valuated: ~p~n",[fully_valuate(lists:map(Fun,Params))]),
Fun2 = fun ( Var = # var{})- >
fully_valuate(Var ) end ,
%% lists:map(Fun2,lists:map(Fun,Params)).
%% %% Replaces every variable for its binding, whenever it exists
%% fully_valuate(List) when is_list(List) ->
%% Fun = fun (X) ->
%% fully_valuate(X) end,
%% lists:map(Fun,List);
% fully_valuate({Var = # var{bind = ? } ,
%% % {},[]})->% Added to match standalone variables in annots
% Var ;
%% fully_valuate({PredName,
, Annots } ) when is_tuple(Args ) , is_list(Annots)- >
%% Fun = fun (X) ->
%% fully_valuate(X) end,
%% {fully_valuate(PredName),
TArgs = list_to_tuple(lists : map(Fun , ) ) ) ,
lists : map(Fun , ) } ;
%% fully_valuate({VarName}) ->
%% VarName;
fully_valuate(Var = # var{id = ID , args = ? } ) - >
%% ID;
%% fully_valuate(Var = #var{id = ID, args = ?UNBOUNDLIST}) ->
%% ID;
% % fully_valuate(Bindings,{PredName , , Annot}= Pred )
% % when ) ,
%% %% is_tuple(Args),
%% %% is_list(Annot)
%% %% ->
% % fully_valuate(Bindings , PredName ) ,
% % list_to_tuple(fully_valuate(Bindings , ) ) ) ,
%% %% fully_valuate(Bindings,Annot)},
% % % io : format("We evaluate Pred : ~p ~ nas ~p ~ n",[Pred , ] ) ,
% % ;
%% fully_valuate(Bind) ->
% io : format("No further valuation for ~p ~ n",[Bind ] ) ,
%% Bind.
%% %% retain_unique_vars(Element)->
% % VarsList = retain_variables(Element ) ,
%% %% utils:erase_repeated_vars(VarsList).
% % Creates a three - element timestamp tuple from the input .
%% %% Used to match variables in the annotations when these variables
%% %% are used like the parameters
% % make_timestamp_tuple({VarName , , Annots})- >
%% %% {getTimeStamp(VarName),
%% %% getTimeStamp(Args),
%% %% getTimeStamp(Annots)}.
% % getTimeStamp(#var{timestamp = TS})- >
%% %% TS;
%% %% getTimeStamp(Vars) when is_tuple(Vars)->
%% %% list_to_tuple(getTimeStamp(tuple_to_list(Vars)));
% % getTimeStamp(VarsList ) when is_list(VarsList ) - >
%% %% Fun = fun(Var) ->
%% %% getTimeStamp(Var) end,
% % lists : map(Fun , VarsList ) .
%% Applies valuate to a list.
%% Added as an extra function to avoid collision with the valuate
%% for a string variable
valuate_list(Bindings,List) when is_list(List) ->
%% io:format("Valuating a real List: ~p~n",[List]),
Fun = fun (X) -> valuate(Bindings,X) end,
lists:map(Fun,List).
%% ;
%% valuate_list(Bindings, Other) ->
%% exit(kill),
%% io:format("WARNING variables:valuate/2 should be used instead.~n"),
%% valuate(Bindings,Other).
%% Replaces the var references for the proper vars (not their values,
%% except in the case of ?ISREF) using the argument "Bindings"
%%
%% When a binary operation is found, it is resolved, which may fail.
%%
%% In the case of lists, it is checked whether the the tail references a list
%% NOTE: badformed lists are not allowed by ejason
valuate(_,[])->
[];
valuate(Bindings,{Functor , , Annots } ) - >
= { valuate(Bindings , Functor ) ,
%% list_to_tuple(
valuate_list(Bindings , ) ) ) ,
valuate_list(Bindings , ) } ,
%% % io:format("RET: ~p~n",[Ret]),
Ret ;
valuate(Bindings,Atom) when is_atom(Atom) ->
%% io:format("Valuating an atom: ~p~n",[Atom]),
valuate(Bindings,{Atom});
valuate(Bindings,Number) when is_number(Number) ->
%% io:format("Valuating a number: ~p~n",[Number]),
valuate(Bindings,{Number});
valuate(_,?EMPTYLISTVAR) ->
?EMPTYLISTVAR;
valuate(_,{'[]'}) ->
?EMPTYLISTVAR;
valuate(Bindings,{VarRef})->
%% io:format("Valuating a ref: ~p~n",[VarRef]),
case orddict:find(VarRef,Bindings) of
{ok,Var}->
io : format("Bindings for Ref : ~p ~ ] ) ,
%% io:format("ref corresponds to: ~p~n",
[ ] ) ,
valuate(Bindings,Var);
error ->
io:format("[~p DEBUG:] Variable ~p not found ~n",
[?MODULE,VarRef]),
io:format("in Bindings: ~n~p~n",[Bindings]),
a = b
end;
valuate(Bindings, List) when is_list(List) ->
%% io:format("Valuating list?: ~p~n",[List]),
valuate(Bindings,{List});
valuate(Bindings,Var = #var{functor = Func, args = Args,
annots = Annots}) ->
%% io:format("ValuatingVar: ~p~n",[Var]),
ValuatedVar =
case Args of
?ISATOM ->
Var;
?UNBOUND ->
Var;
?UNBOUNDLIST ->
Var;
?ISREF ->
valuate(Bindings,Func);
?STRONGNEG ->
Var#var{
functor = valuate(Bindings,Func)};
%%Var#var{functor = valuate(Bindings,Func)};
?ISLIST ->
{Header,Tail} = Func,
NewHeader=
valuate_list(Bindings,Header),
NewTail =
case valuate_list(Bindings,Tail) of
[?EMPTYLISTVAR]->
[?EMPTYLISTVAR];
[VarTail = #var{args = TailArgs}]
%% the tail of a list must be another list
when TailArgs == ?ISLIST; TailArgs == ?UNBOUNDLIST ->
% io:format("VarTAIL: ~p~n",[VarTail]),
[VarTail];
[VarTail = #var{args = TailArgs}]
when TailArgs == ?UNBOUND ->
io : : ~p ~ n",[VarTail ] ) ,
[VarTail#var{args = ?UNBOUNDLIST}];
OtherTail ->
io:format("OtherTAIL: ~p~n",[OtherTail]),
%% a=b,
%% io:format("[WARNING: Improper Tail ~p]~n",
%% [ OtherTail]),
exit(improper_list)
end,
io : : ~p ~ n ~ n ~ n",[NewTail ] ) ,
Var#var{
functor = {NewHeader,
NewTail},
annots = []};
valuate_list(Bindings , ) } ;
_ when is_tuple(Args)->
Var#var{
functor = valuate(Bindings,Func),
args = list_to_tuple(
valuate_list(Bindings,tuple_to_list(Args))),
annots = valuate_list(Bindings,Annots)
};
_ ->
io:format("[~p DEBUG:] Cannot valuate Var ~p ~n",
[?MODULE,Var]),
exit(error)
end,
%% io:format("ValuatredVar: ~p~n",[ValuatedVar]),
ValuatedVar;
valuate(Bindings,
BO=#binary_operation{
left_part = LeftPart,
right_part= RightPart}) ->
io : format("Valuating Binary : ~p ~ nBindings:~p ~ n",[BO , ] ) ,
Operation =
BO#binary_operation{
left_part = valuate(Bindings,LeftPart),
right_part =
case RightPart of
no_right_part ->
no_right_part;
_ ->
valuate(Bindings,RightPart)
end},
%% case Solution
Solution =
operations:resolve(Operation),
#var{id = Solution,
functor = Solution,
args = ?ISATOM,
annots = []}.
%% #var{id=list_to_atom("SOLVEDBINARYOPERATION"++make_timestamp_string()),
%% functor = Solution,
%% args = ?ISATOM}-
%% %% Turns a variable into its tuple representation.
%% %% Unbound variables are left unmodified
%% var_to_tuple(#var{functor = Func,
args = ,
%% annots = Annots}) ->
{ NewFunc , NewArgs } =
case of
? ISATOM - >
%% {Func,{}};
%% ?ISLIST ->
%% {var_to_tuple(Func),{}};
? UNBOUND - >
%% {Func,{}};
%% ?UNBOUNDLIST ->
%% {Func,{}};
%% _ ->
%% {var_to_tuple(Func),
%% list_to_tuple(var_to_tuple(tuple_to_list(Args)))}
%% end,
{ NewFunc ,
NewArgs ,
%% var_to_tuple(Annots)};
%% var_to_tuple(List) when is_list(List) ->
%% Fun = fun (X) -> var_to_tuple(X) end,
%% lists:map(Fun,List).
Structs from plan formulas can be wrong ( e.g. A = a[g ] and B = ] )
%% will generate a struct whose functor points to A, a variable
%% that already has annotations, which is problematic
%% This function deletes these problems.
%% It is used in the arguments of the operations
ModifiedVar is valuated ( i.e. bound variables are replaced )
%%
%% Works with BO as well.
%%
A struct like " A = 1[B ] " , is turned to " A = 1 "
Returns { NewBindings , ModifiedVar }
correct_structs(Bindings,
UnvaluatedVar)->
%% io:format("Correcting Struct: ~p~n",[UnvaluatedVar]),
ValuatedVar = valuate(Bindings,UnvaluatedVar),
%% io:format("Valuated Struct: ~p~n",[ValuatedVar]),
CorrectedVar = correct_structs(ValuatedVar),
io : : ~p ~ n",[CorrectedVar ] ) ,
NewVars = lists:flatten(vars_to_import(CorrectedVar)),
gather_vars(CorrectedVar ) ) ,
%% io:format("NewCleanVars: ~p~n",[NewVars]),
CleanVar = clean_var(CorrectedVar),
%% io:format("Final Corrected Struct: ~p~n",[CleanVar]),
NewBindings = update(Bindings,NewVars),
{NewBindings,
CleanVar}.
%% This auxiliary function corrects the structs but does not
%% modify the bindings (vars_to_import can be used to obtain the new bindings)
%% It is achieved by correct_structs/2
correct_structs(Var =
#var{functor = _Func,
args = Args,
annots = _Annots}) when Args == ? ISATOM;
Args == ?UNBOUNDLIST;
Args == ?UNBOUND->
%% Atoms cannot have annotations
Var#var{annots = []};
correct_structs(NegVar = #var{functor = Func,
args = ?STRONGNEG}) ->
%% io:format("NegVar",[NegVar]),
%% io:format("CorrectFunc: ~p~n",
%% [correct_structs(Func)]),
CorrectedVar =
Strongneg refers to struct vars for simplicity
%% ~ a -> ~ a()[]
case correct_structs(Func) of
AtomVar= #var{args = ?ISATOM, functor = F} when is_atom(F) ->
NewVarID =
list_to_atom("EJASONSTRUCTINNEG"++
?MODULE:make_timestamp_string()),
NewVar=#var{ id = NewVarID,
functor = AtomVar,
args = {},
annots = []},
NegVar#var{
functor = NewVar,
annots = []
};
AtomVar= #var{args = ?ISATOM, id = AtomID} ->
Atomvar is a string or number : ~1 , "
%% Changed into an atom: ~1 -> 1, ~"a" -> "a"
NegVar#var{args = ?ISREF, functor = {AtomID}};
StructVar = #var{args = Args} when is_tuple(Args)->
NegVar#var{
functor = StructVar,
annots = []
};
%% Correcting: ~~Var
NegVar = #var{args = ?STRONGNEG,
functor = #var{id=NegatedRef}}->
NegVar#var{
functor = {NegatedRef},
args = ?ISREF,
annots =[]};
UnboundVar = #var{functor = ?NOFUNCTOR}->
NegVar;
List = #var{args = ?ISLIST, id = ListID}->
%% Error should not use ~[1,2,3]
%% replaced by [1,2,3]
NegVar#var{args = ?ISREF, functor = {ListID},annots =[]}
end;
%% io:format("CorrectedFunc: ~p~n",[CorrectedFunc]),
correct_structs(?EMPTYLISTVAR) ->
?EMPTYLISTVAR;
correct_structs(StructVar =
#var{functor = {Header,Tail},
args = ?ISLIST}) ->
CorrectedHeader =
lists:map(fun correct_structs/1,
Header),
CorrectedTail =
case Tail of
%% [{'[]'}] ->
%% Tail;
% _ ->
lists:map(fun correct_structs/1,
Tail),
%% end,
%% io:format("CorrectedHeader: ~p~nCorrectedTail: ~p~n",
%% [CorrectedHeader, CorrectedTail]),
StructVar#var{
functor = {CorrectedHeader,
CorrectedTail},
annots = []
%% Lists cannot have annotations
};
correct_structs(StructVar =
#var{functor = Func,
args = Args,
annots = Annots}) when is_tuple(Args)->
%% io:format("StructVar: ~p~n",[StructVar]),
case Func of
#var{args = ?ISATOM} ->
CorrectedArgs =
list_to_tuple(
lists:map(fun correct_structs/1,
tuple_to_list(Args))),
%% io:format("CorrectedAnnots: ~p~n",[CorrectedAnnots]),
%% Lists and numbers cannot have annotations
NewStruct =
case Func#var.functor of
FunctorNumOrStr when is_number(FunctorNumOrStr), Args =={};
is_list(FunctorNumOrStr), Args=={}->
%%The struct is turned into a reference
StructVar#var{
functor = {Func#var.id},
args = ?ISREF,
annots = []
};
_ ->
StructVar#var{functor = Func,
annots = lists:map(fun correct_structs/1,
Annots),
args = CorrectedArgs
}
end,
StructVar#var{functor = Func ,
%% args = CorrectedArgs,
%% annots = CorrectedAnnots
%% },
%% io:format("CorrectedStruct: ~p~n",[NewStruct]),
NewStruct;
#var{args = FuncArgs} when
FuncArgs == ?UNBOUNDLIST, Args =={};
FuncArgs == ?ISLIST, Args == {}->
e.g. Var = [ 1,2][L ]
CorrectedFunc =
correct_structs(Func),
StructVar#var{
functor = CorrectedFunc,
args = ?ISREF,
annots = []
%% Annots ignored if the functor
%% an atom (number, string) or a list
};
#var{args = FuncArgs} when FuncArgs == ?UNBOUND, Args == {}->
%%If the functor is an unbound variable, nothing changes
%% Var = A[L]
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots),
StructVar#var{
annots = CorrectedAnnots
};
#var{functor = StrongNegFunc,
args = ?STRONGNEG,
annots = _} ->
%% The functor is a strong negation, merge the
%% annotations
%% e.g. ~a(b)[c][d] -> ~a(b)[c,d]
AnnotsFunc = StrongNegFunc#var.annots,
%% io:format("StrongNegFun: ~p~n",
%% [StrongNegFunc]),
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots++AnnotsFunc),
StructVar#var{
functor = StrongNegFunc#var{
annots = CorrectedAnnots},
args = ?STRONGNEG,
annots = []
} ;
#var{functor = FuncFunc,
args = ArgsFunc,
annots = AnnotsFunc} when is_tuple(ArgsFunc),
Args == {}->
%% The functor is another structure, merge the
%% annotations
%% e.g. a(b)[c][d]
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots++AnnotsFunc),
StructVar#var{
functor = FuncFunc,
args = ArgsFunc,
annots = CorrectedAnnots
}
end;
correct_structs(BO = #binary_operation{left_part = BodyLeft,
right_part = BodyRight}) ->
%% io:format("BO: ~p~n",[BO]),
BO#binary_operation{
left_part =correct_structs(BodyLeft),
right_part = case BodyRight of
no_right_part ->
no_right_part;
_ -> correct_structs(BodyRight)
end}.
%% Receives a var that is bad formed (e.g. an ?ISREF variable whose
%% functor is a variable A, not a reference to A)
%% Does not guarantee that the variables replaced by their
%% references are already captured in some bindings (orddict)
%% It can be achieved using vars_to_import prior to calling clean_var
clean_var(DirtyVar = #var{args = ?ISREF,
functor = #var{id = ID}}) ->
DirtyVar#var{functor = {ID}};
clean_var(DirtyVar = #var{
functor = Functor,
args = Args,
annots = Annots
}) when Args =/= ?ISATOM, Args =/= ?ISLIST, Args =/= ?ISREF,
Args =/= ?UNBOUND, Args =/= ?UNBOUNDLIST,
Args =/= ?STRONGNEG->
io : format("DirtyVar : ~p ~ n",[DirtyVar ] ) ,
NewFunc =
case Functor of
{_} -> %% well-formed functor
Functor;
#var{id = FuncID} ->
{FuncID}
end,
RefFun =
fun(#var{id = ID}) ->
{ID};
({Ref}) ->
{Ref};
(BO = #binary_operation{}) ->
BO
end,
NewArgs =
list_to_tuple(lists:map(RefFun,tuple_to_list(Args))),
NewAnnots =
lists:map(RefFun,Annots),
DirtyVar#var{functor = NewFunc,
args = NewArgs,
annots = NewAnnots};
Var = # var{args = ? ISLIST ,
%% id = '[]'}
) ->
?EMPTYLISTVAR;
clean_var(Var =#var{args = ?ISLIST,
functor = {Header,Tail}}) ->
io : ] Cleaning List : ~p ~ n",[Var ] ) ,
RefFun =
fun(#var{id = ID}) ->
{ID};
({Ref}) ->
{Ref}
end,
NewHeader =
lists:map(RefFun, Header),
NewTail =
lists:map(RefFun, Tail),
%% io:format("NewHeader: ~p~nNewTail: ~p~n",[NewHeader,NewTail]),
Var#var{functor = {NewHeader,NewTail}};
clean_var(OtherVar) ->
OtherVar.
%% Receives a variable/binary_operation and generates the
%% list of bindings that can be extracted from it.
%%
%% Unlike in gather_vars (<-deleted), the variables in structs/lists
%% are replaced by their references
%%
%%
%% It is used by the belief_base to identify the new bindings
%% imported.
%% It is used by the parser to get the variables in an action
vars_to_import(Var = #var{args = Args}) when Args == ?ISATOM;
Args == ?UNBOUND;
Args == ?UNBOUNDLIST->
[Var];
vars_to_import(Var = #var{args = Args}) when Args == ?ISREF;
Args == ?STRONGNEG->
io : format("Var : ~p ~ n",[Var ] ) ,
RefVar =
Var#var.functor,
io : format("RefVar : ~p ~ n",[RefVar ] ) ,
{NewVar,NewVarsFunctor} =
case RefVar of
#var{} -> %% If the reference is a variable, take its values
{Var#var{functor = {RefVar#var.id}, annots = []},
vars_to_import(RefVar)};
_ ->
{Var#var{functor = RefVar, annots = []},
[]}
end,
%% io:format("NewVar: ~p~n",[NewVar]),
lists:flatten(lists:append( [ [NewVar],
NewVarsFunctor]));
vars_to_import(?EMPTYLISTVAR) ->
[];
vars_to_import(Var =#var{functor = {Header,Tail}, args = ?ISLIST} ) ->
io : : ~p ~ n",[Var ] ) ,
FunImport = fun (X) ->
vars_to_import(X) end,
FunID =
fun (#var{id = ID})->
{ID} end,
%%io:format("vars_to_import on header\n"),
VarsHeader = lists:map(FunImport, Header),
case Tail of
%% [{'[]'}] ->
%% [];
%% _ ->
%%io:format("vars_to_import on tail\n"),
lists:map(FunImport, Tail),
%end,
NewHeader =
lists:map(FunID, Header),
NewTail =
case Tail of
%% [{'[]'}] ->
%% [{'[]'}];
%%_ ->
lists:map(FunID, Tail),
%% end,
NewVar =
Var#var{
functor = {NewHeader,NewTail}
},
Ret = lists:flatten(lists:append([ [NewVar|VarsHeader], VarsTail])),
%% io:format("Return: ~p~n",[Ret]),
Ret;
vars_to_import(Var =#var{functor = Functor, args = Args, annots = Annots} ) ->
io : : ~p ~ n",[Var ] ) ,
FunImport = fun (X) ->
vars_to_import(X) end,
FunID =
fun (#var{id = ID})->
{ID};
(BO=#binary_operation{})->
BO
end,
VarsFunctor = FunImport(Functor),
%% io:format("vars_to_import on functor: ~p\n",[VarsFunctor]),
VarsArgs =
%% Remove binary operations
lists:filter(
fun %%( {_}) ->
%% true;
(#binary_operation{}) ->
false;
(#var{}) ->
true
end,
lists:flatten(lists:map(FunImport,tuple_to_list(Args)))),
%% io:format("vars_to_import on args: ~p\n",[VarsArgs]),
VarsAnnots = lists:flatten(lists:map(FunImport, Annots)),
io : on ; ~p\n",[VarsAnnots ] ) ,
NewFunctor = FunID(Functor),
NewArgs = list_to_tuple(lists:map(FunID,tuple_to_list(Args))),
NewAnnots = lists:map(FunID,Annots),
NewVar =
Var#var{
functor = NewFunctor,
args = NewArgs,
annots = NewAnnots
},
Ret= lists:flatten(lists:append([ [NewVar|VarsFunctor], VarsArgs,VarsAnnots])),
%%io:format("Return: ~p~n",[Ret]),
Ret;
vars_to_import(#binary_operation{left_part = Left,
right_part = Right})->
VarsLeft =
case Left of
_ when is_atom(Left)->
[];
_ ->
vars_to_import(Left)
end,
VarsRight =
case Right of
_ when is_atom(Right)->
[];
_ ->
vars_to_import(Right)
end,
lists:flatten(
lists:append([ VarsLeft,VarsRight])).
Erlang timestamp function " erlang : timestamp "
%% Used to create unique var names
Returns a string of length 18 with a timestamp
%%TODO: move to the module utils
make_timestamp_string()->
List = tuple_to_list( erlang:timestamp()),
[A,B,C] =
lists:map(fun (Num) -> string:right(integer_to_list(Num), 6, $0) end,
List),
A++B++C.
%% Function to update a valuation with the new matching from a query to the
%% Belief Base or the call of a plan.
%% NOTE: This function is a bottleneck for execution performance. An alternative
%% shall be found to increase the performance of the solution
import_new_matchings(OriginalBindings, FirstReplacements,
NewVarsPrefix, ImportBindings)->
%%io:format("Result from Belief: ~p~n",[BeliefBindings]),
%% TODO: avoid giving "new" values to the
%% variables already matched when given as params
%% We must generate new variables to add to bindings
io : format("First Replacements : ~p ~ n",[FirstReplacements ] ) ,
%% These are the variables that where given as param
OriginalVars =
[X|| {X,_} <- FirstReplacements],
%% These are the new variables generated for the call
UsedVars =
[{Y} || {_,Y} <- FirstReplacements],
%% io:format("OriginalVars: ~p~n",[OriginalVars]),
%% io:format("UsedVars: ~p~n",[UsedVars]),
%% io:format("ImportBindings: ~p~n",[ImportBindings]),
%% These are the vars that correspond to those of the
%% call. We need them, because when we valuate them the ones
%% that are of type ?ISREF get lost.
CallVars =
[get_var(ID,ImportBindings) ||
{ID} <- UsedVars],
%% io:format("CallVars: ~p~n",[CallVars]),
%% %% The variables from the call that "disappear" are added to the list
%% %% of valuated vars
%%link first to last!?ISREF -> ?ISREF -> ?ISREF..
ReplaceIsRef =
fun(IsRefVar = #var{args = ?ISREF}) ->
%% io:format("ValuatingVar: ~p~n",[IsRefVar]),
%% io:format("Using: ~p~n",[ImportBindings]),
ValVar = valuate(ImportBindings,
IsRefVar),
%% io:format("ValuatedVar: ~p~n",[ValVar]),
IsRefVar#var{functor = {ValVar#var.id}};
(Other) ->
%% io:format("Why??: ~p~n",Other),
a = b
end,
%% io:format("1\n"),
ErasedVars =
[ReplaceIsRef(X) || X <-lists:filter(
fun(#var{args = ?ISREF}) ->
true;
(_) -> false
end, CallVars)],
%% io:format("3\n"),
ValuatedVars =
valuate_list(ImportBindings,
UsedVars)++ ErasedVars,
%% io:format("ValuatedVars: ~p~n",[ValuatedVars]),
%% These are the variables to rename
%% (the ones from the query/call and those referenced
%% by them)
%% io:format("ValuatedVars: ~p~n",[ValuatedVars]),
VarsToRename =
sets:to_list(
sets:from_list(
lists:flatten(lists:map(
fun vars_to_import/1,
ValuatedVars)))),
%% io:format("VarsToRenamen: ~p~n",[VarsToRename]),
%% io:format("Renaming from: ~p ~n",
%% [length(OriginalBindings)]),
NewRepl =
obtain_replacements(
NewVarsPrefix,
1,%% length(OriginalBindings),
VarsToRename),
%% io:format("NewRepls: ~p~n",[NewRepl]),
RenamedVars =
use_replacements(VarsToRename,
NewRepl),
%% This function maps the variables in the params
%% to those of the last replacement
FinalFun =
fun(VarID) ->
{ok,Repl1} =
orddict:find(
VarID, FirstReplacements),
%% io:format("Repl1: ~p~n",[Repl1]),
{ok,Repl2} =
orddict:find(
Repl1, NewRepl),
%% io:format("Repl2: ~p~n",[Repl2]),
case VarID =:= Repl2 of
true->
%% to avoid self-refs
get_var(VarID,OriginalBindings);
false ->
#var{id = VarID,
args = ?ISREF,
functor = {Repl2}}
end
end,
FinalMatches =
lists:map(FinalFun,
OriginalVars),
%% TODO: for debugging purposes, erase
%% Checks if some variable is going to be wrongly updated
CheckFun =
fun(#var{id = VarID, functor = Func}) when Func =/= VarID->
case orddict:find(VarID,OriginalBindings) of
error ->
false;
_ ->
true
end;
(_) ->
false
end,
Updated = lists:filter(CheckFun, lists:flatten(
lists:append(
[RenamedVars,
FinalMatches]))),
%% case Updated of
%% [] ->
%% ok;
%% _ ->
io : format("Updated : ~p ~ nIn : ~p ~ n",[Updated , OriginalBindings ] ) ,
timer : sleep(30000 )
%% end,
%% Replace the original bindings with the new ones
FinalResult =
update(
OriginalBindings,
lists:flatten(
lists:append(
[RenamedVars,
FinalMatches]))),
%% io:format("Ending with a total of: ~p ~n",
%% [length(FinalResult)]),
%% timer:sleep(5000),
%% io:format("FinalResult: ~p~n",[FinalResult]),
FinalResult.
Merges two valuations .
%% Raises an exception if there is a clash (values for the same var differ)
merge_bindings(Bindings1,Bindings2) ->
MergeFun =
fun(_,SameValue,SameValue)->
SameValue;
(Key,Value1,Value2) ->
io:format(
"Error: the valuations have conflicting values for var: \n"++
"Key: ~p\nValue1:~p\nValue2:~p\n",
[Key,Value1,Value2]),
exit(valuation_merge_bindings_error)
end,
orddict:merge(MergeFun, Bindings1,Bindings2).
Checks if the variables referred by IDVar2 contain
%% IDVar1 e.g. A = ~pred(B,A).
check_contains(_Bindings,IDVar1,{IDVar1}) ->
true;
check_contains(_Bindings,IDVar1,#var{id = IDVar1}) ->
true;
check_contains(Bindings,IDVar1,{IDVar2}) ->
check_contains(Bindings,IDVar1,
get_var(IDVar2,Bindings));
check_contains(_Bindings,_IDVar1,#var{args = Args}) when Args ==?ISATOM;
Args ==?UNBOUND;
Args ==?UNBOUNDLIST->
false;
check_contains(Bindings,IDVar1,#var{functor= Ref,
args = ?ISREF}) ->
check_contains(Bindings, IDVar1, Ref);
check_contains(_,_,?EMPTYLISTVAR) ->
false;
check_contains(Bindings,IDVar1,#var{functor =
{Header,Tail},
args = ?ISLIST}) ->
FunCond = fun (Var) ->
check_contains(Bindings,IDVar1,Var) end,
lists:any(FunCond,Header++Tail);
check_contains(Bindings,IDVar1,#var{functor = Ref,
args = ?STRONGNEG}) ->
check_contains(Bindings, IDVar1, Ref);
check_contains(Bindings,IDVar1,#var{functor = Func,
args = Args,
annots = Annots}) when is_tuple(Args) ->
FunCond = fun (Var) ->
check_contains(Bindings,IDVar1,Var) end,
lists:any(FunCond,[Func|tuple_to_list(Args)] ++ Annots).
%% Checks whether some term contains free variables or not.
%% The variable is fully valuated.
%% Returns true if the term is ground, or not otherwise
is_ground( #var{functor = ?NOFUNCTOR}) ->
false;
is_ground(?EMPTYLISTVAR) ->
true;
is_ground(#var{args = ?ISATOM}) ->
true;
is_ground(#var{args = ?STRONGNEG,functor = Func}) ->
is_ground(Func);
is_ground(#var{functor = {Header,Tail}, args = ?ISLIST}) ->
NotGroundFun =
fun(Var)->
not is_ground(Var) end,
not lists:any(NotGroundFun, Header++Tail);
is_ground(#var{args = Args, functor = Func, annots=Annots})
when is_tuple(Args)->
NotGroundFun =
fun(Var)->
not is_ground(Var) end,
not lists:any(NotGroundFun, [Func|tuple_to_list(Args)]++Annots).
Turns ejason variables into their erlang equivalent
Structs are turned into 3 - element tuples :
%%% a(b,c)[d,e] -> {a,[b,c],[d,e]}
Unbound vars are turned into 3 - element tuples :
%%% A[b] -> {[],[],[b]}
Strong negation structs are turned into 4 - element tuples :
%%% ~a(b,c)[d,e] -> {'~',a,[b,c],[d,e]}
ejason_to_erl(?EMPTYLISTVAR)->
[];
ejason_to_erl(Var = #var{functor = Func, args = ?ISATOM}) ->
Func;
ejason_to_erl(#var{functor = StructVar,
args = ?STRONGNEG}) ->
StructList =
['~']++ case ejason_to_erl(StructVar) of
{Functor,Args,Annots} ->
tuple_to_list({Functor,Args,Annots});
Atom when is_atom(Atom) ->
[Atom,[],[]]
end,
list_to_tuple(StructList);
ejason_to_erl(V = #var{functor = {[Header],[Tail]},
args = ?ISLIST}) ->
io : ] VarList = ~p ~ n",[V ] ) ,
[ejason_to_erl(Header)|
case Tail of %% avoid always adding an empty list as last element
[?EMPTYLISTVAR] ->
[];
_ ->
ejason_to_erl(Tail)
end];
ejason_to_erl(#var{functor = ?NOFUNCTOR, args = ?UNBOUND, annots = Annots}) ->
Fun = fun (X) -> ejason_to_erl(X) end,
{[],[],lists:map(Fun, Annots)};
ejason_to_erl(#var{functor = ?NOFUNCTOR, args = ?UNBOUNDLIST}) ->
{[],[],[]};
ejason_to_erl(#var{functor = Func, args = Args,annots = Annots}) ->
{ejason_to_erl(Func),
lists:map(fun ejason_to_erl/1, tuple_to_list(Args)),
lists:map(fun ejason_to_erl/1, Annots)}.
Turns erlang terms into eJason variables - ONLY ONE VARIABLE !
%% NOTE: Strings are treated like lists.
erl_to_ejason([])->
?EMPTYLISTVAR;
erl_to_ejason([LastElem]) ->
Time = make_timestamp_string(),
#var{
id = list_to_atom(?ERLTOEJASONVAR++Time),
functor =
{[erl_to_ejason(LastElem)],
[?EMPTYLISTVAR]},
args = ?ISLIST
};
erl_to_ejason([Header|Tail]) ->
Time = make_timestamp_string(),
ListHeader =
erl_to_ejason(Header),
ListTail =
erl_to_ejason(Tail),
#var{
id = list_to_atom(?ERLTOEJASONVAR++Time),
functor={[ListHeader],[ListTail]},
args = ?ISLIST
};
erl_to_ejason(Atom) when is_atom(Atom);
is_number(Atom)->
#var{
id = Atom,
functor= Atom,
args = ?ISATOM,
annots = []
};
erl_to_ejason(Other) ->
io:format("[variables:erl_to_ejason] There is currently no support"++
" for the automatic translation of"++
" an Erlang term:~n ~p into eJason.~n",[Other]).
%% Turn arguments into unbound variables
%% e.g a(b,c) -> a(_,_)
keep_functor(Var = #var{args = Args}) when is_tuple(Args)->
ArgsList = tuple_to_list(Args),
UnboundVarsFun =
fun(_) ->
#var{id = list_to_atom(
"UNBOUNDVAR"++make_timestamp_string()),
functor = ?NOFUNCTOR,
args = ?UNBOUND,
annots = []} end,
NewList =
lists:map(UnboundVarsFun, ArgsList),
Var#var{args = list_to_tuple(NewList)}.
%%% TODO: create a function that allows the search for determined
annotations , like : search(Bindings , , " annotation(Value1 , _ ) " ,
%%% ["Value1",...,"ValueN"] that returns a list of var matchings
%%%% [Value1,...,ValueN].
%%% Look for an annotation "container(ContainerName)"
Used by actions : ... ,create_agent )
%%% actions:execute(..., send)
%% If none is found, "node()" is returned
find_container_name(Bindings,Annots)->
%% io:format("Looking for container in: ~p~n",[Annots]),
ContainerNameVar = #var{id =
list_to_atom(
"CONTAINERNAMEVAR"++
make_timestamp_string()),
functor =?NOFUNCTOR,
args = ?UNBOUND},
ContainerAtomVar =
#var{args = ?ISATOM,
id = container,
functor = container},
ContainerVar =
#var{ id =
list_to_atom("CONTAINERVAR"++
make_timestamp_string()),
functor = {container},
args = {{ContainerNameVar#var.id}}},
UseBindings =
update(Bindings,[ContainerVar,ContainerAtomVar,
ContainerNameVar]),
%% Match annotations receives a lists of var references
UseAnnots = lists:map(fun (#var{id = ID}) -> {ID} end,
Annots),
FoundContainerName =
case match_annotations(
UseBindings,
[{ContainerVar#var.id}],
iterator:create_iterator(UseAnnots)) of
false ->
node();
ItAnnots when is_function(ItAnnots) ->
case iterator:first(ItAnnots) of
false ->
node();
NewBindings ->
%% container(Name) found. Extract match.
SuggestedContainerVar =
valuate(
NewBindings,
get_var(ContainerNameVar#var.id,
NewBindings)),
%% %% io:format("StructVar: ~p~n",[StructVar]),
%% #var{functor = #var{id = container},
args = { SuggestedContainerVar } } = StructVar ,
SuggestedContainerVar
end
end,
%% io:format("FoundContainerName: ~p~n",[FoundContainerName]),
ContainerName =
case FoundContainerName of
%% _ when is_atom(FoundContainerName) ->
%% FoundContainerName;
#var{args = ?ISATOM} ->
%% container(SomeName)
FoundContainerName#var.functor;
#var{functor = #var{args = ?ISATOM} }->
%%container(SomeName[morelabels])
(FoundContainerName#var.functor)#var.functor;
_ ->
io:format("[Variables Debug:] Invalid containerName: ~p~n",
[FoundContainerName]),
node()
end,
%% io:format("ContainerName: ~p~n",[ContainerName]),
ContainerName.
%%% Look for annotations "persist(Options) or demonitor(Options)"
%%% Used by actions:execute(..., monitor_agent)
%% If none is found, the equivalent of "persist(any)" is returned
find_monitor_options(_Bindings, ?PERSISTANY)->
#monitor_options{
persist_unknown_agent = true,
persist_created_agent = true,
persist_dead_agent = true,
persist_restarted_agent = true,
persist_revived_agent = true,
persist_unreachable_agent = true
};
find_monitor_options(Bindings, Configuration)->
ErlConfiguration = ejason_to_erl(Configuration),
%% io:format("[variables] Configuration: ~p~n",[ErlConfiguration]),
%% removes anything that is not an atom from the configuration
Filter = fun ( {Functor,[],_}) -> Functor;
(Atom ) when is_atom(Atom)-> Atom;
(_) -> []
end,
case ErlConfiguration of
{demonitor,[Persist],_} when Persist == any orelse
Persist == [any]->
#monitor_options{
persist_unknown_agent = false,
persist_created_agent = false,
persist_dead_agent = false,
persist_restarted_agent = false,
persist_revived_agent = false,
persist_unreachable_agent = false
};
{persist, [Persist], _} when Persist =/= any andalso
Persist =/= [any]->
PersistList = case Persist of
_ when is_atom(Persist) ->
[Persist];
_ when is_list(Persist) ->
lists:map(Filter, Persist)
end,
#monitor_options{
persist_unknown_agent = lists:member(unknown_agent,
PersistList),
persist_dead_agent = lists:member(dead_agent,
PersistList),
persist_restarted_agent = lists:member(restarted_agent,
PersistList),
persist_revived_agent = lists:member(revived_agent,
PersistList),
persist_unreachable_agent = lists:member(unreachable_agent,
PersistList),
persist_created_agent = lists:member(created_agent,
PersistList)
};
{demonitor, [Demonitor], _}->
DemonitorList = case Demonitor of
_ when is_atom(Demonitor) ->
[Demonitor];
_ when is_list(Demonitor) ->
lists:map(Filter, Demonitor)
end,
#monitor_options{
persist_unknown_agent = not lists:member(unknown_agent,
DemonitorList),
persist_dead_agent = not lists:member(dead_agent,
DemonitorList),
persist_restarted_agent = not lists:member(restarted_agent,
DemonitorList),
persist_revived_agent = not lists:member(revived_agent,
DemonitorList),
persist_unreachable_agent = not lists:member(unreachable_agent,
DemonitorList),
persist_created_agent = not lists:member(created_agent,
DemonitorList)
};
%%{persist,[any],_} ->
_ -> %% Any other thing is wrong, therefore ignored
find_monitor_options(Bindings,?PERSISTANY)
end.
%%% Look for annotations "supervision_policy(Options)"
%%% Used by actions:execute(..., supervise_agents)
%% %% No ping does not use any
find_supervision_options(_Bindings , ? >
%% #monitor_options{
%% persist_unknown_agent = true,
%% persist_created_agent = true,
%% persist_dead_agent = true,
%% persist_restarted_agent = true,
%% persist_revived_agent = true,
%% persist_unreachable_agent = true
%% };
find_supervision_options({supervision_policy,
[OptionsList],
_}) when is_list(OptionsList)->
io : format("[Variables ] Received Supervision Options : ~p ~ n " ,
%% [OptionsList]),
%% Looks for some pattern in the list
Filter = fun ( {Functor,[],_}) -> Functor;
(Atom ) when is_atom(Atom)-> Atom;
(_) -> []
end,
PreSupervisionPolicy =
case lists:member(no_ping, OptionsList) of
true ->
%% If no_ping is given, do not test divergence
#supervision_policy{no_ping = true};
false ->
#supervision_policy{
no_ping = false,
ping = find_ping_policy(OptionsList),
unblock = find_unblock_policy(OptionsList),
restart = find_restart_policy(OptionsList)}
end,
SupervisionPolicy =
PreSupervisionPolicy#supervision_policy{
revival = find_revival_policy(OptionsList),
restart_strategy = find_restart_strategy(OptionsList)
},
%% io:format("[Variables] Using supervision policy: ~p~n",[SupervisionPolicy]),
SupervisionPolicy;
find_supervision_options(_Other)->
io:format("[Variables DEBUG] Default supervision options. Received: ~p~n",
[_Other]),
#supervision_policy{
ping = #ping_policy{},
unblock = #unblock_policy{},
restart = #restart_policy{}
}.
find_ping_policy([])->
#ping_policy{};
find_ping_policy([{ping,[Frequency, Time, MaxPings], _}|_])
when is_integer(Frequency),
is_integer(Time),
is_integer(MaxPings)->
#ping_policy{
frequency = Frequency,
time = Time,
maxpings = MaxPings};
find_ping_policy([_|Rest]) ->
find_ping_policy(Rest).
find_unblock_policy([])->
#unblock_policy{};
find_unblock_policy([{unblock,[never], _}|_])->
#unblock_policy{
time = infinity,
maxunblocks = 0};
find_unblock_policy([{unblock,[always], _}|_])->
#unblock_policy{
time = 0,
maxunblocks = 1};
find_unblock_policy([{unblock,[MaxUnblocks, Time], _}|_])
when is_integer(Time),
is_integer(MaxUnblocks) ->
#unblock_policy{
time = Time,
maxunblocks = MaxUnblocks};
find_unblock_policy([_|Rest]) ->
find_unblock_policy(Rest).
find_restart_policy([])->
#restart_policy{};
find_restart_policy([{restart,[never], _}|_])->
#restart_policy{
time = infinity,
maxrestarts = 0};
find_restart_policy([{restart,[always], _}|_])->
#restart_policy{
time = 0,
maxrestarts = 1};
find_restart_policy([{restart,[MaxRestarts, Time], _}|_])
when is_integer(Time),
is_integer(MaxRestarts) ->
#restart_policy{
time = Time,
maxrestarts = MaxRestarts};
find_restart_policy([_|Rest]) ->
find_restart_policy(Rest).
find_revival_policy([])->
#revival_policy{};
find_revival_policy([{revive,[never], _}|_])->
#revival_policy{
time = infinity,
maxrevivals = 0};
find_revival_policy([{revive,[always], _}|_])->
#revival_policy{
time = 0,
maxrevivals = 1};
find_revival_policy([{revive,[MaxRevive, Time], _}|_])
when is_integer(Time),
is_integer(MaxRevive) ->
#revival_policy{
time = Time,
maxrevivals = MaxRevive};
find_revival_policy([_|Rest]) ->
find_revival_policy(Rest).
find_restart_strategy([])->
Default = #supervision_policy{},
Default#supervision_policy.restart_strategy;
find_restart_strategy([{strategy,[Strategy], _}|Rest])
when is_atom(Strategy) ->
case lists:member(Strategy, [one_for_one, one_for_all, rest_for_one]) of
true ->
Strategy;
false ->
find_restart_strategy(Rest)
end;
find_restart_strategy([_|Rest]) ->
find_restart_strategy(Rest).
| null | https://raw.githubusercontent.com/avalor/eJason/3e5092d42de0a3df5c5e5ec42cb552a2f282bbb1/src/variables.erl | erlang | Checks whether the leftmost variable/atom can be matched to the
rightmost one. (e.g. a(b) matches a(b)[c]
but a(b)[c] does not match a(b)
NOTE that annotations are ignored. If used in the belief base, they
must be ignored.
Note: most of the "unification magic" is done here
Returns either "false" or an iterator for the new matchings variables.
The return value is an iterator because there can be several possible
matching due to the annotations of the variables
io:format("ID1 : ~p~nID2: ~p~n",[ID1,ID2]),
io:format("\nVariables:match -> Var1: ~p~n\tVar2: ~p~n",
[Var1,Var2]),
io:format("NewVar: ~p~n",[NewVar]),
Therefore, Var2 must be a list as well
io:format("NewVar1: ~p~n",[NewVar1]),
io:format("NewVar2: ~p~n",[NewVar2]),
e.g. A = A[3], A = [1,A,3]
io:format("Not Contained1~n~n"),
io:format("NewVar: ~p~n",[NewVar]),
io:format("Updated is: ~p~n",
e.g. A = A[4]
If Var2 is a ref, the matching is further attempted with the referred var
e.g. A = pred(a,A)
io:format("Not Contained2~n~n"),
e.g. [1,A,3] = A;
io:format("Not Contained3~n~n"),
only Var2 is unbound
io:format("Not Contained3~n~n"),
Fun1 and Fun2 are a reference to a var
Fun1 is a reference
Fun2 is a reference
Fun1 and Fun2 are atoms
Fun1 and Fun2 are strong negations
STRUCTS (the hard part)
These structs can be corrupted(e.g 1[B]), so we correct them
Note: possible source of inneficiency
io:format("FuncVar1: ~p~n",[FuncVar1]),
Then, the code can be reused
io:format("ItNewBindings: ~p~n",[ItNewBindings]),
io:format("Pairs: ~p~n",
[ArgumentPairs]),
This function uses the iterators for the bindings of
each argument to try matching the next arguments
Use the iterator for the next argument
Function that matches the annotations
This struct can be corrupted(e.g 1[B]), so we correct it
Var2 is also corrected in case it was a struct
Note: possible source of inneficiency
io:format("NewVar1: ~p~n",[NewVar1]),
io:format("NewVar2: ~p~n",[NewVar2]),
Then, the code can be reused
Functor is an unbound variable, its functor and args
may vary
Var2 is a list e.g A[] = [1,2]
Var2 is a negation struct e.g A[] = ~a(b,c)[L],
Binding the unbound var
Binding the struct to the var
Var2 is a (bound) struct: A[B] = c(d,e)[L]
A[B] = c[L]
The unbound var is matched to the struct Var2 without the annotations
Now, the annotations must be matched,
No more possibilities
Functor is a bound variable and NewVar2 is a struct
e.g: a(b,c)[annot] = A[L],
Now, the annotations must be matched,
The number of args is now equal. Iterate and compare again
The number of args is different, otherwise the previous clause
would've matched
no other possibilities left, are there?
NewVar2 is still a tuple, then reuse code
NewFunctor2 is an unbound variable
Note: the necessity of NewAnnot2 ==[] is dropped as
it is in the right-hand side of the equality
Binding the unbound var
Binding the struct to the var and dropping its annotations
e.g. a = a[b]
No new matchings
io:format("FuncVar2 is a bound var: ~p~n",
[Other]),
Var2 is no longer a struct, then iterate
io:format("Size1: ~p~nSize2: ~p~n",[size(Args1),
size(Args2)]),
io:format("Vars can never match -> Var1: ~p~n\tVar2: ~p~n",
[Var1,Var2]),
case {Func1,Func2}
io:format("Result for match_vars: ~p~n",[Res]),
false ->
io:format("FALSE!~n~n");
_ ->
io:format("Match!~n~n")
end,
Returns either "false" or an iterator for the possible matchings
ALL ANOTATIONS MATCHED: success!
io:format("Matching the annotation: ~p~n",[Annot1]),
The structs in Annots2 must be corrected
Note: this is a possible source of inneficiency
Var2 =
et_var(AnnotFrom2,Bindings),
variables:correct_structs(Bindings,Var2),
CorrectedVar2) end,
This iterators tries to match annot1
Some annotation not matched
io:format("Annotation1 matched: ~p~n",
[variables:valuate(MatchesForAnnot1,
Returns a variable without variable references (if possible)
and without vars in the functor/args/annots
i.e. replaces vars for its corresponding values
io:format("Single var: ~p~n",[Var]),
io:format("Valuating var: ~p~n",[Var]),
unbound variable
Functor is a reference to a variable
A structure
the execution of the plan.
Replaces all the bindings received with new names
Returns a new valuation, and the list of replacements :
io:format("Replaced Bindings: ~p~n",[NewBindings]),
io:format("Replacements: ~p~n",[Replacements]),
Obtains the id replacements for the variables in a list of vars
Numbers, atoms and '[]' (such that functor =:= id) are
spared the change of name
io:format("Replacing: ~p~n",[VarList]),
io:format("REPLACING FROM ~s~p \n",
io:format("Result: ~p~n",[Result]),
io:format("REPLACING To ~s~p \n",
[Prefix,length(
lists:filter(FilterFun,Result))+Num-1]),
It is invoked by obtain_replacements.
TODO: use a more efficient way of knowing the amount of new variables
if
[Var]);
true ->
ok
end,
io:format("rep var: ~p~n",[Var]),
Atoms where Func =:= ID are not changed
if
[Var]);
true ->
ok
end,
io:format("rep var: ~p~n",[Var]),
Counts how many of the replacements introduce new variables that use
the prefix
io:format("not counting: ~p~n",[A]),
io:format("rep var: ~p~n",[Var]),
Counts how many of the replacements introduce new variables that use
the prefix
io:format("not counting: ~p~n",[A]),
io:format("rep var: ~p~n",[?EMPTYLISTVAR]),
io:format("rep var: ~p~n",[Var]),
Function to apply replacements of a list of vars
[{'[]'}]->
orddict:store('[]',
'[]',
HeaderReplacements);
[#var{id = '[]'}]->
orddict:store('[]',
'[]',
HeaderReplacements);
_ ->
end,
Counts how many of the replacements introduce new variables that use
the prefix
io:format("not counting: ~p~n",[A]),
io:format("rep var: ~p~n",[Var]),
io:format("ReplacementsFunc: ~p~n",
io:format("ReplacementsArgs: ~p~n",
Counts how many of the replacements introduce new variables that use
the prefix
io:format("not counting: ~p~n",[A]),
io:format("ReplacementsAnnots: ~p~n",[ReplacementsAnnots]),
io:format("ReplacementsAnnots: ~p~n",
[ReplacementsAnnots]),
left_part = Left,
right_part = Right},Replacements) ->
%% Binary functions are fully valuated
case orddict:find(VarRef,Replacements) of
{ok,_} ->
Replacements;
error ->
NewVarID = list_to_atom(Prefix++
integer_to_list(length(Replacements)+
orddict:store(VarRef,
NewVarID,
Replacements)
end.
Applies the replacement of ids for the variables in a list or a var
Returns the replaced var(s)
NewFunc =
case Func of
{Ref} ->
{ok,NewRef} =
orddict:find(Ref,Replacements),
NewRef;
#var{id = SomeID} ->
{ok,NewRef} =
orddict:find(SomeID,Replacements),
NewRef
end,
io:format("CreatedVar: ~p~n",[CreatedVar]),
[NewVar|Vars];
[{'[]'}]->
[{'[]'}];
_ ->
end,
[NewVar|Vars];
io:format("VarRef: ~p~nRepl: ~p~n",[VarRef,Replacements]),
#binary_operation{left_part = Left,
right_part = Right},
Replacements) ->
left_part = use_replacements(Left,Replacements),
right_part = use_replacements(Right, Replacements)}.
Returns an iterator
#var{args = ?ISLIST,
functor = {[],[{'[]'}]}},
#var{args = ?ISLIST,
functor = {[],[{'[]'}]}})->
TODO: try to correct the lists so that it is possible to check when
these lists have different size
Lists of different size
Tail2 must already be the empty list
lastelement can be matched to the emptylist,
Tail2 must already be the empty list
lastelement can be matched to the emptylist,
Comparing elements
io:format("Matchin: ~p and ~p~n",[ Header1++[Tail1],
Header2++[Tail2]]),
If a match can be found, the bindings are updated. If they cannot,
false is returned
Matching tails
element in the header.
Matching Tail of List1 with List2
io:format("Splitting list: ~p~n",[List2]),
Matching Tail of List2 with List1
Not in the tail yet
%% Returns a list with all the vars contained inside the input var(s)
%% The input var included
%% TODO: check difference with jasonNewParser:find_vars
%% TODO: check difference with variables:vars_to_import
gather_vars([]) ->
[];
gather_vars(VarList) when is_list(VarList)->
gather_vars(Var) ->
gather_vars(Var,[]).
[Var|Acc];
gather_vars(Var = #var{functor = {Header,Tail},
args = ?ISLIST},
Fun =
Vars = lists:flatten(lists:map(Fun, lists:append([Header,Tail]))),
% Var is a struct
Fun =
gather_vars({_Ref},Acc) ->
gather_vars({parenthesis,
Element},Acc) ->
lists:flatten(lists:append(
[gather_vars(Element),
Acc]));
Acc) ->
lists:flatten( lists:append(
[lists:map(
fun variables:gather_vars/1,
lists:map(fun variables:gather_vars/1,
Acc])).
%% gather_vars(#predicate{name = Name,
% arguments = , annotations = Annot } ,
% Acc ) - >
% Fun = fun(X ) - > gather_vars(X ) end ,
%% lists:flatten(lists:append(
% lists : ) ) ) .
Finds a variable in the bindings.
Returns - A variable (if found) or {ID} (otherwise) if it is an id.
- An atom if the associated variable is just an atom
- A number if ID is a number
io:format("ID: ~p~n",[ID]),
Updates the bindings list (orddict) for the variables
Returns NewBindings
io:format("Updated Var: ~p~n",[Var]),
%% Strip a struct of all elements that are not
%% variable records
retain_variables(Atom) when is_atom(Atom) ->
[];
retain_variables(Var) when is_record(Var,var)->
retain_variables(Tuple) when is_tuple(Tuple)->
retain_variables(tuple_to_list(Tuple));
retain_variables(List) when is_list(List)->
Fun = fun (X) ->
retain_variables(X) end,
lists:flatten(lists:map(
Fun,
List)).
%% Gets all the variables in a list that are already bound
get_ground_variables(List) when is_list(List) ->
lists:filter(Fun,List).
%% Valuates a list of params using its bindings
valuate_return_params(Params,Bindings)->
Fun = fun (X) ->
% io:format("Ret Params: ~p~n",[lists:map(Fun,Params)]),
% io:format("Valuated: ~p~n",[fully_valuate(lists:map(Fun,Params))]),
lists:map(Fun2,lists:map(Fun,Params)).
%% Replaces every variable for its binding, whenever it exists
fully_valuate(List) when is_list(List) ->
Fun = fun (X) ->
fully_valuate(X) end,
lists:map(Fun,List);
fully_valuate({Var = # var{bind = ? } ,
% {},[]})->% Added to match standalone variables in annots
Var ;
fully_valuate({PredName,
Fun = fun (X) ->
fully_valuate(X) end,
{fully_valuate(PredName),
fully_valuate({VarName}) ->
VarName;
ID;
fully_valuate(Var = #var{id = ID, args = ?UNBOUNDLIST}) ->
ID;
% fully_valuate(Bindings,{PredName , , Annot}= Pred )
% when ) ,
%% is_tuple(Args),
%% is_list(Annot)
%% ->
% fully_valuate(Bindings , PredName ) ,
% list_to_tuple(fully_valuate(Bindings , ) ) ) ,
%% fully_valuate(Bindings,Annot)},
% % io : format("We evaluate Pred : ~p ~ nas ~p ~ n",[Pred , ] ) ,
% ;
fully_valuate(Bind) ->
io : format("No further valuation for ~p ~ n",[Bind ] ) ,
Bind.
%% retain_unique_vars(Element)->
% VarsList = retain_variables(Element ) ,
%% utils:erase_repeated_vars(VarsList).
% Creates a three - element timestamp tuple from the input .
%% Used to match variables in the annotations when these variables
%% are used like the parameters
% make_timestamp_tuple({VarName , , Annots})- >
%% {getTimeStamp(VarName),
%% getTimeStamp(Args),
%% getTimeStamp(Annots)}.
% getTimeStamp(#var{timestamp = TS})- >
%% TS;
%% getTimeStamp(Vars) when is_tuple(Vars)->
%% list_to_tuple(getTimeStamp(tuple_to_list(Vars)));
% getTimeStamp(VarsList ) when is_list(VarsList ) - >
%% Fun = fun(Var) ->
%% getTimeStamp(Var) end,
% lists : map(Fun , VarsList ) .
Applies valuate to a list.
Added as an extra function to avoid collision with the valuate
for a string variable
io:format("Valuating a real List: ~p~n",[List]),
;
valuate_list(Bindings, Other) ->
exit(kill),
io:format("WARNING variables:valuate/2 should be used instead.~n"),
valuate(Bindings,Other).
Replaces the var references for the proper vars (not their values,
except in the case of ?ISREF) using the argument "Bindings"
When a binary operation is found, it is resolved, which may fail.
In the case of lists, it is checked whether the the tail references a list
NOTE: badformed lists are not allowed by ejason
list_to_tuple(
% io:format("RET: ~p~n",[Ret]),
io:format("Valuating an atom: ~p~n",[Atom]),
io:format("Valuating a number: ~p~n",[Number]),
io:format("Valuating a ref: ~p~n",[VarRef]),
io:format("ref corresponds to: ~p~n",
io:format("Valuating list?: ~p~n",[List]),
io:format("ValuatingVar: ~p~n",[Var]),
Var#var{functor = valuate(Bindings,Func)};
the tail of a list must be another list
io:format("VarTAIL: ~p~n",[VarTail]),
a=b,
io:format("[WARNING: Improper Tail ~p]~n",
[ OtherTail]),
io:format("ValuatredVar: ~p~n",[ValuatedVar]),
case Solution
#var{id=list_to_atom("SOLVEDBINARYOPERATION"++make_timestamp_string()),
functor = Solution,
args = ?ISATOM}-
%% Turns a variable into its tuple representation.
%% Unbound variables are left unmodified
var_to_tuple(#var{functor = Func,
annots = Annots}) ->
{Func,{}};
?ISLIST ->
{var_to_tuple(Func),{}};
{Func,{}};
?UNBOUNDLIST ->
{Func,{}};
_ ->
{var_to_tuple(Func),
list_to_tuple(var_to_tuple(tuple_to_list(Args)))}
end,
var_to_tuple(Annots)};
var_to_tuple(List) when is_list(List) ->
Fun = fun (X) -> var_to_tuple(X) end,
lists:map(Fun,List).
will generate a struct whose functor points to A, a variable
that already has annotations, which is problematic
This function deletes these problems.
It is used in the arguments of the operations
Works with BO as well.
io:format("Correcting Struct: ~p~n",[UnvaluatedVar]),
io:format("Valuated Struct: ~p~n",[ValuatedVar]),
io:format("NewCleanVars: ~p~n",[NewVars]),
io:format("Final Corrected Struct: ~p~n",[CleanVar]),
This auxiliary function corrects the structs but does not
modify the bindings (vars_to_import can be used to obtain the new bindings)
It is achieved by correct_structs/2
Atoms cannot have annotations
io:format("NegVar",[NegVar]),
io:format("CorrectFunc: ~p~n",
[correct_structs(Func)]),
~ a -> ~ a()[]
Changed into an atom: ~1 -> 1, ~"a" -> "a"
Correcting: ~~Var
Error should not use ~[1,2,3]
replaced by [1,2,3]
io:format("CorrectedFunc: ~p~n",[CorrectedFunc]),
[{'[]'}] ->
Tail;
_ ->
end,
io:format("CorrectedHeader: ~p~nCorrectedTail: ~p~n",
[CorrectedHeader, CorrectedTail]),
Lists cannot have annotations
io:format("StructVar: ~p~n",[StructVar]),
io:format("CorrectedAnnots: ~p~n",[CorrectedAnnots]),
Lists and numbers cannot have annotations
The struct is turned into a reference
args = CorrectedArgs,
annots = CorrectedAnnots
},
io:format("CorrectedStruct: ~p~n",[NewStruct]),
Annots ignored if the functor
an atom (number, string) or a list
If the functor is an unbound variable, nothing changes
Var = A[L]
The functor is a strong negation, merge the
annotations
e.g. ~a(b)[c][d] -> ~a(b)[c,d]
io:format("StrongNegFun: ~p~n",
[StrongNegFunc]),
The functor is another structure, merge the
annotations
e.g. a(b)[c][d]
io:format("BO: ~p~n",[BO]),
Receives a var that is bad formed (e.g. an ?ISREF variable whose
functor is a variable A, not a reference to A)
Does not guarantee that the variables replaced by their
references are already captured in some bindings (orddict)
It can be achieved using vars_to_import prior to calling clean_var
well-formed functor
id = '[]'}
io:format("NewHeader: ~p~nNewTail: ~p~n",[NewHeader,NewTail]),
Receives a variable/binary_operation and generates the
list of bindings that can be extracted from it.
Unlike in gather_vars (<-deleted), the variables in structs/lists
are replaced by their references
It is used by the belief_base to identify the new bindings
imported.
It is used by the parser to get the variables in an action
If the reference is a variable, take its values
io:format("NewVar: ~p~n",[NewVar]),
io:format("vars_to_import on header\n"),
[{'[]'}] ->
[];
_ ->
io:format("vars_to_import on tail\n"),
end,
[{'[]'}] ->
[{'[]'}];
_ ->
end,
io:format("Return: ~p~n",[Ret]),
io:format("vars_to_import on functor: ~p\n",[VarsFunctor]),
Remove binary operations
( {_}) ->
true;
io:format("vars_to_import on args: ~p\n",[VarsArgs]),
io:format("Return: ~p~n",[Ret]),
Used to create unique var names
TODO: move to the module utils
Function to update a valuation with the new matching from a query to the
Belief Base or the call of a plan.
NOTE: This function is a bottleneck for execution performance. An alternative
shall be found to increase the performance of the solution
io:format("Result from Belief: ~p~n",[BeliefBindings]),
TODO: avoid giving "new" values to the
variables already matched when given as params
We must generate new variables to add to bindings
These are the variables that where given as param
These are the new variables generated for the call
io:format("OriginalVars: ~p~n",[OriginalVars]),
io:format("UsedVars: ~p~n",[UsedVars]),
io:format("ImportBindings: ~p~n",[ImportBindings]),
These are the vars that correspond to those of the
call. We need them, because when we valuate them the ones
that are of type ?ISREF get lost.
io:format("CallVars: ~p~n",[CallVars]),
%% The variables from the call that "disappear" are added to the list
%% of valuated vars
link first to last!?ISREF -> ?ISREF -> ?ISREF..
io:format("ValuatingVar: ~p~n",[IsRefVar]),
io:format("Using: ~p~n",[ImportBindings]),
io:format("ValuatedVar: ~p~n",[ValVar]),
io:format("Why??: ~p~n",Other),
io:format("1\n"),
io:format("3\n"),
io:format("ValuatedVars: ~p~n",[ValuatedVars]),
These are the variables to rename
(the ones from the query/call and those referenced
by them)
io:format("ValuatedVars: ~p~n",[ValuatedVars]),
io:format("VarsToRenamen: ~p~n",[VarsToRename]),
io:format("Renaming from: ~p ~n",
[length(OriginalBindings)]),
length(OriginalBindings),
io:format("NewRepls: ~p~n",[NewRepl]),
This function maps the variables in the params
to those of the last replacement
io:format("Repl1: ~p~n",[Repl1]),
io:format("Repl2: ~p~n",[Repl2]),
to avoid self-refs
TODO: for debugging purposes, erase
Checks if some variable is going to be wrongly updated
case Updated of
[] ->
ok;
_ ->
end,
Replace the original bindings with the new ones
io:format("Ending with a total of: ~p ~n",
[length(FinalResult)]),
timer:sleep(5000),
io:format("FinalResult: ~p~n",[FinalResult]),
Raises an exception if there is a clash (values for the same var differ)
IDVar1 e.g. A = ~pred(B,A).
Checks whether some term contains free variables or not.
The variable is fully valuated.
Returns true if the term is ground, or not otherwise
a(b,c)[d,e] -> {a,[b,c],[d,e]}
A[b] -> {[],[],[b]}
~a(b,c)[d,e] -> {'~',a,[b,c],[d,e]}
avoid always adding an empty list as last element
NOTE: Strings are treated like lists.
Turn arguments into unbound variables
e.g a(b,c) -> a(_,_)
TODO: create a function that allows the search for determined
["Value1",...,"ValueN"] that returns a list of var matchings
[Value1,...,ValueN].
Look for an annotation "container(ContainerName)"
actions:execute(..., send)
If none is found, "node()" is returned
io:format("Looking for container in: ~p~n",[Annots]),
Match annotations receives a lists of var references
container(Name) found. Extract match.
%% io:format("StructVar: ~p~n",[StructVar]),
#var{functor = #var{id = container},
io:format("FoundContainerName: ~p~n",[FoundContainerName]),
_ when is_atom(FoundContainerName) ->
FoundContainerName;
container(SomeName)
container(SomeName[morelabels])
io:format("ContainerName: ~p~n",[ContainerName]),
Look for annotations "persist(Options) or demonitor(Options)"
Used by actions:execute(..., monitor_agent)
If none is found, the equivalent of "persist(any)" is returned
io:format("[variables] Configuration: ~p~n",[ErlConfiguration]),
removes anything that is not an atom from the configuration
{persist,[any],_} ->
Any other thing is wrong, therefore ignored
Look for annotations "supervision_policy(Options)"
Used by actions:execute(..., supervise_agents)
%% No ping does not use any
#monitor_options{
persist_unknown_agent = true,
persist_created_agent = true,
persist_dead_agent = true,
persist_restarted_agent = true,
persist_revived_agent = true,
persist_unreachable_agent = true
};
[OptionsList]),
Looks for some pattern in the list
If no_ping is given, do not test divergence
io:format("[Variables] Using supervision policy: ~p~n",[SupervisionPolicy]), | -module(variables).
-compile(export_all).
-include("include/macros.hrl").
-include("include/parser.hrl").
-include("include/variables.hrl").
-include("include/records.hrl").
-define(ERLTOEJASONVAR, "ERLTOEJASONVAR").
match_vars(Bindings,
#var{id = ID1},
#var{id = ID2}) when ID1 == ID2;
ID1 == ?UNDERSCORE;
ID2 == ?UNDERSCORE->
iterator:create_iterator([Bindings]);
match_vars(Bindings,
Var1= #var{ functor = Func1,
args = Args1},
Var2=#var{functor = Func2,
args = Args2}) ->
io : format("Bindings : ~p ~ ] ) ,
Res = case {Func1,Func2} of
{?NOFUNCTOR,?NOFUNCTOR} when Args1==?UNBOUND,Args2 == ?UNBOUND;
Args1==?UNBOUND,Args2 ==
?UNBOUNDLIST;
Args1==?UNBOUNDLIST,Args2 ==
?UNBOUNDLIST->
Var1 and Var2 are unbound whether lists or not
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])]);
{?NOFUNCTOR,?NOFUNCTOR} when Args1==?UNBOUNDLIST,
Args2 == ?UNBOUND->
Var1 and Var2 are unbound , but Var1 must be a list
NewVar1 =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
NewVar2 =
Var2#var{args =?UNBOUNDLIST},
iterator:create_iterator([update(Bindings,
[NewVar1,NewVar2])]);
{?NOFUNCTOR,_} when Args1 == ?UNBOUND, Args2 == ?ISLIST;
Args1 == ?UNBOUND, Args2 == ?ISATOM;
Args1 == ?UNBOUND, Args2 == ?STRONGNEG;
Args1 == ?UNBOUNDLIST, Args2 == ?ISLIST;
Args1 == ?UNBOUNDLIST, Args2 == ?ISATOM, Func2 == [] ->
only Var1 is unbound , Var2 is atom / list / string / strongneg
Check if Var2 contains Var1
case check_contains(Bindings,Var1#var.id,Var2) of
true ->
false;
false ->
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
io : format("Bindings : ~p ~ ] ) ,
[ update(Bindings,[NewVar ] ) ] ) ,
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
{?NOFUNCTOR,_} when Args1 == ?UNBOUND,Args2 =/= ?ISREF,
Func2 == {Var1#var.id}->
Var1 is unbound , Var2 is a struct whose functor is Var1
iterator:create_iterator([Bindings]);
{?NOFUNCTOR,_} when Args1 == ?UNBOUND,Args2 =/= ?ISREF->
only Var1 is unbound , Var2 is a struct ( not a ref then )
Check if Var2 contains Var1
case check_contains(Bindings,Var1#var.id,Var2) of
true ->
false;
false ->
NewVar =
Var1#var{functor = {Var2#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
{_,?NOFUNCTOR} when Args2 == ?UNBOUND, Args1 == ?ISLIST;
Args2 == ?UNBOUND,Args1 == ?ISATOM;
Args2 == ?UNBOUND,Args1 == ?STRONGNEG;
Args2 == ?UNBOUNDLIST, Args1 == ?ISLIST;
Args2 == ?UNBOUNDLIST, Args1 == ?ISATOM, Func1 ==[]->
only Var2 is unbound , Var1 is atom / list / string
case check_contains(Bindings,Var2#var.id,Var1) of
true ->
false;
false ->
NewVar =
Var2#var{functor = {Var1#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
{_,?NOFUNCTOR} when Args2 == ?UNBOUND,
only Var2 is unbound , Var1 is a struct ( not a ref then )
If Var1 is a ref , the matching is further attempted with the referred var
case check_contains(Bindings,Var2#var.id,Var1) of
true ->
e.g. pred(B , C ) = A ;
false;
false ->
NewVar =
Var2#var{functor = {Var1#var.id},
args =?ISREF},
iterator:create_iterator([update(Bindings,
[NewVar])])
end;
{{Ref1}, {Ref2}} when Args1 == ?ISREF,
Args2 == ?ISREF->
match_vars(Bindings,
get_var(Ref1,Bindings),
get_var(Ref2,Bindings));
match_vars(Bindings,
get_var(Ref1,Bindings),Var2);
match_vars(Bindings,
Var1,get_var(Ref2,Bindings));
{Atom,Atom} when Args1 == ?ISATOM,
Args2 == ?ISATOM->
iterator:create_iterator([Bindings]);
{{_Header1,_Tail1}, {_Header2,_Tail2}}
Matching two lists
match_lists(Bindings,Var1,Var2);
{{Ref1}, {Ref2}} when Args1 == ?STRONGNEG,
Args2 == ?STRONGNEG->
match_vars(Bindings,
get_var(Ref1,Bindings),
get_var(Ref2,Bindings));
{{_},{_}} when is_tuple(Args1), is_tuple(Args2),
size(Args1) == size(Args2)->
Var1 and Var2 represent full structs
{CorrectedBindings1,NewVar1} =
correct_structs(Bindings,Var1),
{Bindings2,NewVar2} =
correct_structs(CorrectedBindings1,Var2),
#var{functor = NewFunc1,
args = NewArgs1,
annots = NewAnnots1} = NewVar1,
#var{functor = NewFunc2,
args = NewArgs2,
annots = NewAnnots2} = NewVar2,
if is_tuple(NewArgs1), is_tuple(NewArgs2),
size(NewArgs2) == size(NewArgs1) ->
NewVar1 and NewVar2 are full structs with the same number of args
{Ref1} = NewFunc1,
{Ref2} = NewFunc2,
FuncVar1 =
get_var(Ref1,Bindings2),
FuncVar2 =
get_var(Ref2,Bindings2),
case
match_vars(Bindings2,FuncVar1,FuncVar2) of
false ->
false;
ItNewBindings ->
ArgumentPairs = lists:zip(tuple_to_list(NewArgs1),
tuple_to_list(NewArgs2)),
Match = fun (_,false) ->
false;
({{Elem1},{Elem2}},ItUseBindings) ->
FunNextArgs =
fun (NextArgBindings) ->
match_vars(
NextArgBindings,
get_var(Elem1,
NextArgBindings),
get_var(Elem2,
NextArgBindings))
end,
iterator:create_iterator_fun(ItUseBindings,
FunNextArgs)
end,
There can be several unifications for the variables in functor+args
ItArgsBindings=lists:foldl(Match,ItNewBindings,
ArgumentPairs),
AnnotFun =
fun (false)->
false ;
(ArgsBindings) ->
match_annotations(
ArgsBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2))
end,
iterator:create_iterator_fun(ItArgsBindings,
AnnotFun)
end;
true ->
NewVar1 and NewVar2 are not two structs with same number of args
match_vars(Bindings2,NewVar1,NewVar2)
end;
{{_},_} when is_tuple(Args1)->
Var1 is a struct e.g. : A[B ]
{CorrectedBindings1,NewVar1} =
correct_structs(Bindings,Var1),
{Bindings2,NewVar2} =
correct_structs(CorrectedBindings1,Var2),
#var{functor = NewFunc1,
args = NewArgs1,
annots = NewAnnots1} = NewVar1,
#var{functor = NewFunc2,
args = NewArgs2,
annots = NewAnnots2} = NewVar2,
if is_tuple(NewArgs1)->
NewVar1 is still a struct
{Ref1} = NewFunc1,
case get_var(Ref1,Bindings2) of
UnboundVar =#var{args = ?UNBOUND} ->
if
NewArgs2 == ?ISATOM, NewAnnots1 == [];
Var2 is an atom e.g A [ ] = " 123 "
NewArgs2 == ?ISLIST, NewAnnots1 == [];
NewArgs2 == ?STRONGNEG, NewAnnots1 == [] ->
BoundVar =
UnboundVar#var{functor =
{NewVar2#var.id},
args = ?ISREF,
annots = []},
FinalVar1 =
NewVar1#var{args = ?ISREF,
functor = {Ref1},
annots = []},
NewBindings =
update(Bindings2,[BoundVar,
FinalVar1]),
iterator:create_iterator([NewBindings]);
is_tuple(NewArgs2)->
e.g. A = c(d , e )
BoundVar =
UnboundVar#var{functor = NewFunc2, args = NewArgs2,annots = []},
Var1 is bound to the Struct Var2 plus the annotations of Var1
FinalVar1 =
NewVar1#var{ functor = NewFunc2, args = NewArgs2},
UseBindings =
update(Bindings2,[BoundVar,FinalVar1]),
match_annotations(
UseBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2));
true ->
false
end;
_ when is_tuple(NewArgs2)->
{Ref2} = NewFunc2,
case get_var(Ref2,Bindings2) of
Functor2 is an unbound variable
UnboundVar =#var{args = ?UNBOUND} ->
The unbound var is matched to the struct NewVar1 without the annotations
e.g. a(b , c)=A
BoundVar =
UnboundVar#var{functor = NewFunc1, args = NewArgs1,annots = []},
FinalVar2 is bound to the Struct NewVar1 plus the annotations of Var2
FinalVar2 =
NewVar2#var{functor = NewFunc1, args = NewArgs1},
UseBindings =
update(Bindings2,[BoundVar,FinalVar2]),
match_annotations(
UseBindings,
NewAnnots1,
iterator:create_iterator(NewAnnots2));
BoundVar = #var{} when size(NewArgs1) == size(BoundVar#var.args)->
match_vars(Bindings2,NewVar1,NewVar2);
_ ->
false
end;
_ ->
end;
true ->
NewVar1 is no longer a struct , then iterate
match_vars(Bindings2,NewVar1,NewVar2)
end;
{_,{_}} when is_tuple(Args2)->
Var2 is a struct while Var1 is not
{Bindings2,NewVar2} =
correct_structs(Bindings,Var2),
#var{functor = NewFunc2,
args = NewArgs2} = NewVar2,
{Ref2} = NewFunc2,
if is_tuple(NewArgs2) ->
case get_var(Ref2,Bindings2) of
UnboundVar =#var{args = ?UNBOUND} ->
if
Args1 == ?ISATOM;
Var1 is an atom e.g " 123 " = A[B ]
Args1 == ?ISLIST;
Var1 is a list e.g [ 1,2 ] = A[B ]
Args1 == ?STRONGNEG ->
Var1 is a negation struct e.g ~a(b , c)[L ] = A[B ]
BoundVar =
UnboundVar#var{functor =
{Var1#var.id},
args = ?ISREF,
annots = []},
FinalVar2 =
NewVar2#var{args = ?ISREF,
functor = {Ref2},
annots = []},
NewBindings =
update(Bindings2,[BoundVar,
FinalVar2]),
iterator:create_iterator([NewBindings]);
true ->
false
end;
#var{args = ?ISATOM,
functor = FunctorAtom}
when Args1 == ?ISATOM, NewArgs2 == {},
Func1==FunctorAtom ->
Var1 and NewVar2 can be the same atom :
iterator:create_iterator([Bindings2]);
_Other ->
false
end;
true ->
match_vars(Bindings2, Var1,NewVar2)
end;
_ ->
false
case Res of
Res;
match_vars(_Bindings,P1,P2) ->
io:format("[variables:match_vars/2, error] \nP1: ~p~nP2: ~p~n",[P1,P2]),
a = b.
Tries to match all the annotations of Annots1 with those of Annots2
match_annotations(Bindings,
[],
_ItAnnots2) ->
iterator:create_iterator([Bindings]);
match_annotations(Bindings, [{Annot1}|Rest],ItAnnots2) ->
io : format("In the set ~p ~ n",[iterator : ) ] ) ,
Var1 = get_var(Annot1,
Bindings),
MatchAnnot1Fun =
fun ({AnnotFrom2}) ->
{ UseBindings , CorrectedVar2 } =
match_vars(UseBindings , Var1 ,
Var2 = get_var(AnnotFrom2,Bindings),
match_vars(Bindings, Var1,
Var2) end,
ItMatchesForAnnot1 =
iterator:create_iterator_fun(ItAnnots2,MatchAnnot1Fun),
MatchRestFun =
false;
(MatchesForAnnot1) ->
variables : get_var(Annot1 ,
) ) ] ) ,
match_annotations(MatchesForAnnot1, Rest,ItAnnots2)
end,
If can be matched , match the rest .
iterator:create_iterator_fun(ItMatchesForAnnot1,MatchRestFun).
get_valuated_var(ID,Bindings)
when is_atom(ID)->
Var = get_var(ID,Bindings),
get_valuated_var(Var,Bindings);
get_valuated_var(Var = #var{functor = Func,
args = Args,
annots = Annots},
Bindings) ->
io : format("{Func : ~p , : ~p}~n",[Func , ] ) ,
{NewFunc,NewArgs,NewAnnots} =
case {Func,Args} of
{_,?ISATOM} when is_atom(Func)->
{Func,Args,
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots)};
{Func,Args,
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots)};
ReferredVar =
get_valuated_var(VarRef,Bindings),
io : format("ReferredVar : ~p ~ n",[ReferredVar ] ) ,
{ReferredVar#var.functor, ReferredVar#var.args,
ReferredVar#var.annots};
ReferredVar =
get_valuated_var(VarRef,Bindings),
ValuatedArgs =
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
tuple_to_list(Args)),
ValuatedAnnots =
lists:map(fun (X) -> get_valuated_var(X,Bindings) end,
Annots),
{ReferredVar,
list_to_tuple(ValuatedArgs),
ValuatedAnnots}
end,
ReturnVar = #var{
CHECK IF THIS IS NOT , any name should work
functor = NewFunc,
args = NewArgs,
annots = NewAnnots
},
io : format("Returning : ~p ~ n",[ReturnVar ] ) ,
ReturnVar.
We must : 1 ) Change the name of the variables in the params , so that there
are no clashes with those in .
2 ) Get the new variables generated and add them to bindings .
3 ) Identify the correspondence , so that it can be reverted after
{ NewBindings , Replacements }
replace_bindings(Prefix,Bindings) ->
io : format("Received Bindings : ~p ~ ] ) ,
ListBindings = orddict:to_list(Bindings),
ListValues = [Value || {_key,Value} <- ListBindings],
Replacements = obtain_replacements(Prefix,length(Bindings),ListValues),
NewListValues = use_replacements(ListValues,Replacements),
NewBindings =
update([],NewListValues),
{NewBindings,Replacements}.
obtain_replacements(Prefix,Num,VarList) ->
[ Prefix , ] ) ,
Result =
obtain_replacements(Prefix,Num,VarList,[]),
Result.
obtain_replacements(_Prefix,_Num,[],Replacements) ->
io : format("Final replacements : ~p ~ n",[Replacements ] ) ,
TODO add this replacement only when there are lists
orddict:store(
'[]',
'[]',
Replacements);
obtain_replacements(Prefix,Num,[Value|Rest],Replacements) ->
NewReplacements =
my_replace_vars(Prefix, Num, Value,Replacements),
obtain_replacements(Prefix,Num,Rest,NewReplacements).
my_replace_vars(Prefix , Param , ) when is_integer(Num)- >
my_replace_vars(Prefix , , , [ ] ) .
Returns a list of [ { VarID , NewVarID } ] for each binding
my_replace_vars(_Prefix, _Num,
Var = #var{id = ID, args = Args, functor = Func},
Replacements) when Args == ?ISATOM, Func =:= ID;
ID == '[]'->
Args = = ? ISATOM - > io : format("Atomvar spared : ~p ~ n " ,
case orddict:find(ID, Replacements) of
{ok, _} ->
Replacements;
error ->
orddict:store(ID,
ID,
Replacements)
end;
my_replace_vars(Prefix, Num,
Var = #var{id = Id, args = Args},
Replacements) when Args == ?ISATOM;
Args == ?UNBOUND;
Args == ?UNBOUNDLIST->
Args = = ? ISATOM - > io : format("Atomvar renamed : ~p ~ n " ,
case orddict:find(Id, Replacements) of
{ok, _} ->
Replacements;
error ->
FilterFun =
fun ({A,A}) ->
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID =
list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,Replacements))+Num)),
io : format("Adding1 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
NewVar = Var#var{id = NewVarID } ,
orddict:store(Id,
NewVarID,
Replacements)
end;
my_replace_vars(Prefix, Num,
Var =#var{id = Id,functor = Func, args = Args},
Replacements) when Args == ?ISREF;
Args == ?STRONGNEG->
case orddict:find(Id, Replacements) of
{ok, _} ->
Replacements;
error ->
FilterFun =
fun ({A,A}) ->
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID =
list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,Replacements))+Num)),
io : format("Adding2 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
my_replace_vars(Prefix,Num,
Func,
orddict:store(Id,
NewVarID,
Replacements))
end;
my_replace_vars(_Prefix, _Num,
?EMPTYLISTVAR,
Replacements) ->
Replacements;
my_replace_vars(Prefix, Num,
Var =#var{id = Id, functor = {Header,Tail}, args = ?ISLIST},
Replacements) ->
case orddict:find(Id,Replacements) of
{ok,_} ->
Replacements;
error ->
Fun = fun (X,AccumReplacements) ->
my_replace_vars(Prefix,Num,X,AccumReplacements) end,
HeaderReplacements =
lists:foldl(Fun,
Replacements,
Header),
io : : ~p ~ n",[Tail ] ) ,
TailReplacements =
case Tail of
lists:foldl(Fun,
HeaderReplacements,
Tail),
FilterFun =
fun ({A,A}) ->
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID = list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,
TailReplacements))+
Num)),
io : format("Adding3 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
orddict:store(Id,
NewVarID,
TailReplacements)
end;
my_replace_vars(Prefix, Num,
Var = #var{id = Id, functor = Func,
args = Args, annots = Annots},
Replacements) when is_tuple(Args)->
Input is a struct
case orddict:find(Id,Replacements) of
{ok,_} ->
Replacements;
error ->
Fun = fun (X,AccumReplacements) ->
my_replace_vars(Prefix,Num,X,AccumReplacements) end,
ReplacementsFunc =
lists:foldl(Fun,
Replacements,
[Func]),
[ ReplacementsFunc ] ) ,
ReplacementsArgs =
lists:foldl(Fun,
ReplacementsFunc,
tuple_to_list(Args)),
[ ReplacementsArgs ] ) ,
ReplacementsAnnots =
lists:foldl(Fun,
ReplacementsArgs,
Annots),
FilterFun =
fun ({A,A}) ->
false;
(_Other) ->
io : format("yes , counting : ~p ~ n",[Other ] ) ,
true
end,
NewVarID = list_to_atom(Prefix++
integer_to_list(
length(
lists:filter(FilterFun,
ReplacementsAnnots))+
Num)),
io : format("Adding4 NewVar : { ~p,~p}~n",[Id , NewVarID ] ) ,
orddict:store(Id,
NewVarID,
ReplacementsAnnots)
end;
my_replace_vars(Prefix , , # binary_operation {
LeftReplacements =
my_replace_vars(Prefix , , Left , Replacements ) ,
AllReplacements =
my_replace_vars(Prefix , , Left , LeftReplacements ) ,
AllReplacements ;
my_replace_vars(Prefix, Num, {VarRef},Replacements) ->
VarRef is not replaced , as it could be an atom
Replacements.
) ) ,
use_replacements(VarList,Replacements) when is_list(VarList)->
Fun = fun(Var) ->
use_replacements(Var,Replacements) end,
lists:map(Fun,VarList);
use_replacements(?EMPTYLISTVAR, _Replacements) ->
?EMPTYLISTVAR;
use_replacements(Var=#var{id=ID,
functor = Func, args =Args,
annots =Annots},
Replacements) ->
Fun = fun (Vars) ->
use_replacements(Vars,Replacements) end,
io : format("ID : ~p ~ nRepl : ~p ~ n",[ID , Replacements ] ) ,
{ok,NewID} =
orddict:find(ID,Replacements),
NewVar =
case Args of
_ when Args == ?UNBOUND;
Args == ?ISATOM;
Args == ?UNBOUNDLIST->
Var#var{id = NewID};
_ when Args == ?ISREF;
Args == ?STRONGNEG->
CreatedVar =Var#var{id = NewID,
functor = Fun(Func)},
CreatedVar
;
_ when Args == ?ISLIST->
{Header,Tail} = Func,
NewHeader =
Fun(Header),
NewTail =
case Tail of
Fun(Tail),
NewVar =
Var#var{id = NewID,
functor = {NewHeader,NewTail}};
_ when is_tuple(Args)->
NewFunc =
Fun(Func),
NewArgs =
list_to_tuple(Fun(tuple_to_list(Args))),
NewAnnots =
Fun(Annots),
Var#var{id = NewID,
functor = NewFunc,
args = NewArgs,
annots = NewAnnots}
end,
NewVar;
use_replacements({VarRef},Replacements) ->
{ok,NewVarRef} = orddict:find(VarRef,Replacements),
{NewVarRef}.
use_replacements(BO =
BO#binary_operation {
Matches the variables in two lists
Two empty lists
?EMPTYLISTVAR,
?EMPTYLISTVAR)->
iterator:create_iterator([Bindings]);
#var{args = ?ISLIST,
functor = {Header1,
[{'[]'}]}
},
#var{args = ?ISLIST,
functor = {Header2,
[{'[]'}]}
}
) when length(Header1) =/= length(Header2)->
false;
One of the lists is empty
?EMPTYLISTVAR,
#var{args = ?ISLIST,
functor = {[{LastElement}],
[{Tail}]}}
)->
case valuate(Bindings,
get_var(Tail,Bindings)) of
?EMPTYLISTVAR ->
match_vars(Bindings,?EMPTYLISTVAR,
get_var(LastElement,Bindings));
_ ->
false
end;
One of the lists is empty
#var{args = ?ISLIST,
functor = {[{LastElement}],
[{Tail}]}},
?EMPTYLISTVAR
)->
case valuate(Bindings,
get_var(Tail,Bindings)) of
?EMPTYLISTVAR ->
match_vars(Bindings,get_var(LastElement,Bindings),
?EMPTYLISTVAR);
_ ->
false
end;
match_lists(Bindings,
#var{args = ?ISLIST,
functor = {Header1,Tail1}},
#var{args = ?ISLIST,
functor = {Header2,
match_elems_in_list( Bindings,
Header1++[Tail1],
Header2++[Tail2]).
Receives a list with the elements of two lists that must be matched .
match_elems_in_list(Bindings,
[[{ElemInTail1}]],
match_vars(Bindings,
get_var(ElemInTail1,Bindings),
get_var(ElemInTail2,Bindings));
match_elems_in_list(Bindings,
[[{'[]'}]],
List2)when length(List2) > 1->
One element is the empty list while the other has at least one
false;
match_elems_in_list(Bindings,
[[{Elem1}]],
List2)->
NewListVarId = list_to_atom(lists:flatten("EJASONLISTVAR"++
integer_to_list(length(Bindings)))),
{NewHeader,[NewTail]} = lists:split(length(List2)-1,List2),
NewListVar =
#var{id = NewListVarId,
functor = {NewHeader,NewTail},
args = ?ISLIST},
NewBindings =
orddict:store(NewListVarId,
NewListVar,
Bindings),
case match_vars(NewBindings,
NewListVar,
get_var(Elem1,NewBindings)) of
false ->
false;
ItNewNewBindings ->
ItNewNewBindings
end;
match_elems_in_list(Bindings,
List1,
[[{Elem2}]])->
match_elems_in_list(Bindings,
[[{Elem2}]],
List1);
match_elems_in_list(Bindings,
[{Elem1}|Elems1],
[{Elem2}|Elems2])->
case match_vars(
Bindings,
get_var(Elem1,Bindings),
get_var(Elem2,Bindings)) of
false ->
false;
ItNewBindings->
MatchFun =
fun (NewBindings) ->
match_elems_in_list(NewBindings,
Elems1,
Elems2) end,
iterator:create_iterator_fun(ItNewBindings,
MatchFun)
end.
Fun = fun(Var , Acc ) - > gather_vars(Var , Acc ) end ,
lists : foldl(Fun,[],VarList ) ;
gather_vars(Var = # var{args = ,
Acc ) when = = ? ISATOM ; = = ? UNBOUND ; = = ? ISREF ;
= = ? STRONGNEG ; = = ? UNBOUNDLIST- >
) - >
fun(X ) - > gather_vars(X ) end ,
lists : flatten(lists : append([[Var|Acc ] , ] ) ) ;
gather_vars(Var = # var{functor = Func , args = , annots = Annots } ,
fun(X ) - > gather_vars(X ) end ,
VarsFunc = Fun(Func ) ,
VarsArgs = lists : map(Fun , ) ) ,
VarsAnnots = lists : map(Fun , ) ,
lists : flatten(lists : append([[Var|Acc],VarsFunc , VarsArgs , VarsAnnots ] ) ) ;
Acc ;
gather_vars(#binary_operation{left_part = BodyLeft ,
right_part = BodyRight } ,
BodyLeft ) ,
BodyRight ) ,
get_var('[]',_)->
?EMPTYLISTVAR;
get_var(ID,Bindings)->
case orddict:find(ID,Bindings) of
{ok,Var}->
Var;
error ->
io:format("[variables.erl] Warning: variable ~p not found,\n",
[ID]),
a=b,
{ID}
end.
1st argument is the binding list to be updated
2nd argument is a list of new bindings
update(Bindings,[])->
Bindings;
update(Bindings,[Var|Rest]) ->
NewBindings = orddict:store(Var#var.id,
Var,
Bindings),
update(NewBindings,Rest).
Var ;
Fun = fun ( # var{is_ground = IG } ) - >
IG end ,
variables : valuate_param(X , ) end ,
Fun2 = fun ( Var = # var{})- >
fully_valuate(Var ) end ,
, Annots } ) when is_tuple(Args ) , is_list(Annots)- >
TArgs = list_to_tuple(lists : map(Fun , ) ) ) ,
lists : map(Fun , ) } ;
fully_valuate(Var = # var{id = ID , args = ? } ) - >
valuate_list(Bindings,List) when is_list(List) ->
Fun = fun (X) -> valuate(Bindings,X) end,
lists:map(Fun,List).
valuate(_,[])->
[];
valuate(Bindings,{Functor , , Annots } ) - >
= { valuate(Bindings , Functor ) ,
valuate_list(Bindings , ) ) ) ,
valuate_list(Bindings , ) } ,
Ret ;
valuate(Bindings,Atom) when is_atom(Atom) ->
valuate(Bindings,{Atom});
valuate(Bindings,Number) when is_number(Number) ->
valuate(Bindings,{Number});
valuate(_,?EMPTYLISTVAR) ->
?EMPTYLISTVAR;
valuate(_,{'[]'}) ->
?EMPTYLISTVAR;
valuate(Bindings,{VarRef})->
case orddict:find(VarRef,Bindings) of
{ok,Var}->
io : format("Bindings for Ref : ~p ~ ] ) ,
[ ] ) ,
valuate(Bindings,Var);
error ->
io:format("[~p DEBUG:] Variable ~p not found ~n",
[?MODULE,VarRef]),
io:format("in Bindings: ~n~p~n",[Bindings]),
a = b
end;
valuate(Bindings, List) when is_list(List) ->
valuate(Bindings,{List});
valuate(Bindings,Var = #var{functor = Func, args = Args,
annots = Annots}) ->
ValuatedVar =
case Args of
?ISATOM ->
Var;
?UNBOUND ->
Var;
?UNBOUNDLIST ->
Var;
?ISREF ->
valuate(Bindings,Func);
?STRONGNEG ->
Var#var{
functor = valuate(Bindings,Func)};
?ISLIST ->
{Header,Tail} = Func,
NewHeader=
valuate_list(Bindings,Header),
NewTail =
case valuate_list(Bindings,Tail) of
[?EMPTYLISTVAR]->
[?EMPTYLISTVAR];
[VarTail = #var{args = TailArgs}]
when TailArgs == ?ISLIST; TailArgs == ?UNBOUNDLIST ->
[VarTail];
[VarTail = #var{args = TailArgs}]
when TailArgs == ?UNBOUND ->
io : : ~p ~ n",[VarTail ] ) ,
[VarTail#var{args = ?UNBOUNDLIST}];
OtherTail ->
io:format("OtherTAIL: ~p~n",[OtherTail]),
exit(improper_list)
end,
io : : ~p ~ n ~ n ~ n",[NewTail ] ) ,
Var#var{
functor = {NewHeader,
NewTail},
annots = []};
valuate_list(Bindings , ) } ;
_ when is_tuple(Args)->
Var#var{
functor = valuate(Bindings,Func),
args = list_to_tuple(
valuate_list(Bindings,tuple_to_list(Args))),
annots = valuate_list(Bindings,Annots)
};
_ ->
io:format("[~p DEBUG:] Cannot valuate Var ~p ~n",
[?MODULE,Var]),
exit(error)
end,
ValuatedVar;
valuate(Bindings,
BO=#binary_operation{
left_part = LeftPart,
right_part= RightPart}) ->
io : format("Valuating Binary : ~p ~ nBindings:~p ~ n",[BO , ] ) ,
Operation =
BO#binary_operation{
left_part = valuate(Bindings,LeftPart),
right_part =
case RightPart of
no_right_part ->
no_right_part;
_ ->
valuate(Bindings,RightPart)
end},
Solution =
operations:resolve(Operation),
#var{id = Solution,
functor = Solution,
args = ?ISATOM,
annots = []}.
args = ,
{ NewFunc , NewArgs } =
case of
? ISATOM - >
? UNBOUND - >
{ NewFunc ,
NewArgs ,
Structs from plan formulas can be wrong ( e.g. A = a[g ] and B = ] )
ModifiedVar is valuated ( i.e. bound variables are replaced )
A struct like " A = 1[B ] " , is turned to " A = 1 "
Returns { NewBindings , ModifiedVar }
correct_structs(Bindings,
UnvaluatedVar)->
ValuatedVar = valuate(Bindings,UnvaluatedVar),
CorrectedVar = correct_structs(ValuatedVar),
io : : ~p ~ n",[CorrectedVar ] ) ,
NewVars = lists:flatten(vars_to_import(CorrectedVar)),
gather_vars(CorrectedVar ) ) ,
CleanVar = clean_var(CorrectedVar),
NewBindings = update(Bindings,NewVars),
{NewBindings,
CleanVar}.
correct_structs(Var =
#var{functor = _Func,
args = Args,
annots = _Annots}) when Args == ? ISATOM;
Args == ?UNBOUNDLIST;
Args == ?UNBOUND->
Var#var{annots = []};
correct_structs(NegVar = #var{functor = Func,
args = ?STRONGNEG}) ->
CorrectedVar =
Strongneg refers to struct vars for simplicity
case correct_structs(Func) of
AtomVar= #var{args = ?ISATOM, functor = F} when is_atom(F) ->
NewVarID =
list_to_atom("EJASONSTRUCTINNEG"++
?MODULE:make_timestamp_string()),
NewVar=#var{ id = NewVarID,
functor = AtomVar,
args = {},
annots = []},
NegVar#var{
functor = NewVar,
annots = []
};
AtomVar= #var{args = ?ISATOM, id = AtomID} ->
Atomvar is a string or number : ~1 , "
NegVar#var{args = ?ISREF, functor = {AtomID}};
StructVar = #var{args = Args} when is_tuple(Args)->
NegVar#var{
functor = StructVar,
annots = []
};
NegVar = #var{args = ?STRONGNEG,
functor = #var{id=NegatedRef}}->
NegVar#var{
functor = {NegatedRef},
args = ?ISREF,
annots =[]};
UnboundVar = #var{functor = ?NOFUNCTOR}->
NegVar;
List = #var{args = ?ISLIST, id = ListID}->
NegVar#var{args = ?ISREF, functor = {ListID},annots =[]}
end;
correct_structs(?EMPTYLISTVAR) ->
?EMPTYLISTVAR;
correct_structs(StructVar =
#var{functor = {Header,Tail},
args = ?ISLIST}) ->
CorrectedHeader =
lists:map(fun correct_structs/1,
Header),
CorrectedTail =
case Tail of
lists:map(fun correct_structs/1,
Tail),
StructVar#var{
functor = {CorrectedHeader,
CorrectedTail},
annots = []
};
correct_structs(StructVar =
#var{functor = Func,
args = Args,
annots = Annots}) when is_tuple(Args)->
case Func of
#var{args = ?ISATOM} ->
CorrectedArgs =
list_to_tuple(
lists:map(fun correct_structs/1,
tuple_to_list(Args))),
NewStruct =
case Func#var.functor of
FunctorNumOrStr when is_number(FunctorNumOrStr), Args =={};
is_list(FunctorNumOrStr), Args=={}->
StructVar#var{
functor = {Func#var.id},
args = ?ISREF,
annots = []
};
_ ->
StructVar#var{functor = Func,
annots = lists:map(fun correct_structs/1,
Annots),
args = CorrectedArgs
}
end,
StructVar#var{functor = Func ,
NewStruct;
#var{args = FuncArgs} when
FuncArgs == ?UNBOUNDLIST, Args =={};
FuncArgs == ?ISLIST, Args == {}->
e.g. Var = [ 1,2][L ]
CorrectedFunc =
correct_structs(Func),
StructVar#var{
functor = CorrectedFunc,
args = ?ISREF,
annots = []
};
#var{args = FuncArgs} when FuncArgs == ?UNBOUND, Args == {}->
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots),
StructVar#var{
annots = CorrectedAnnots
};
#var{functor = StrongNegFunc,
args = ?STRONGNEG,
annots = _} ->
AnnotsFunc = StrongNegFunc#var.annots,
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots++AnnotsFunc),
StructVar#var{
functor = StrongNegFunc#var{
annots = CorrectedAnnots},
args = ?STRONGNEG,
annots = []
} ;
#var{functor = FuncFunc,
args = ArgsFunc,
annots = AnnotsFunc} when is_tuple(ArgsFunc),
Args == {}->
CorrectedAnnots =
lists:map(fun correct_structs/1,
Annots++AnnotsFunc),
StructVar#var{
functor = FuncFunc,
args = ArgsFunc,
annots = CorrectedAnnots
}
end;
correct_structs(BO = #binary_operation{left_part = BodyLeft,
right_part = BodyRight}) ->
BO#binary_operation{
left_part =correct_structs(BodyLeft),
right_part = case BodyRight of
no_right_part ->
no_right_part;
_ -> correct_structs(BodyRight)
end}.
clean_var(DirtyVar = #var{args = ?ISREF,
functor = #var{id = ID}}) ->
DirtyVar#var{functor = {ID}};
clean_var(DirtyVar = #var{
functor = Functor,
args = Args,
annots = Annots
}) when Args =/= ?ISATOM, Args =/= ?ISLIST, Args =/= ?ISREF,
Args =/= ?UNBOUND, Args =/= ?UNBOUNDLIST,
Args =/= ?STRONGNEG->
io : format("DirtyVar : ~p ~ n",[DirtyVar ] ) ,
NewFunc =
case Functor of
Functor;
#var{id = FuncID} ->
{FuncID}
end,
RefFun =
fun(#var{id = ID}) ->
{ID};
({Ref}) ->
{Ref};
(BO = #binary_operation{}) ->
BO
end,
NewArgs =
list_to_tuple(lists:map(RefFun,tuple_to_list(Args))),
NewAnnots =
lists:map(RefFun,Annots),
DirtyVar#var{functor = NewFunc,
args = NewArgs,
annots = NewAnnots};
Var = # var{args = ? ISLIST ,
) ->
?EMPTYLISTVAR;
clean_var(Var =#var{args = ?ISLIST,
functor = {Header,Tail}}) ->
io : ] Cleaning List : ~p ~ n",[Var ] ) ,
RefFun =
fun(#var{id = ID}) ->
{ID};
({Ref}) ->
{Ref}
end,
NewHeader =
lists:map(RefFun, Header),
NewTail =
lists:map(RefFun, Tail),
Var#var{functor = {NewHeader,NewTail}};
clean_var(OtherVar) ->
OtherVar.
vars_to_import(Var = #var{args = Args}) when Args == ?ISATOM;
Args == ?UNBOUND;
Args == ?UNBOUNDLIST->
[Var];
vars_to_import(Var = #var{args = Args}) when Args == ?ISREF;
Args == ?STRONGNEG->
io : format("Var : ~p ~ n",[Var ] ) ,
RefVar =
Var#var.functor,
io : format("RefVar : ~p ~ n",[RefVar ] ) ,
{NewVar,NewVarsFunctor} =
case RefVar of
{Var#var{functor = {RefVar#var.id}, annots = []},
vars_to_import(RefVar)};
_ ->
{Var#var{functor = RefVar, annots = []},
[]}
end,
lists:flatten(lists:append( [ [NewVar],
NewVarsFunctor]));
vars_to_import(?EMPTYLISTVAR) ->
[];
vars_to_import(Var =#var{functor = {Header,Tail}, args = ?ISLIST} ) ->
io : : ~p ~ n",[Var ] ) ,
FunImport = fun (X) ->
vars_to_import(X) end,
FunID =
fun (#var{id = ID})->
{ID} end,
VarsHeader = lists:map(FunImport, Header),
case Tail of
lists:map(FunImport, Tail),
NewHeader =
lists:map(FunID, Header),
NewTail =
case Tail of
lists:map(FunID, Tail),
NewVar =
Var#var{
functor = {NewHeader,NewTail}
},
Ret = lists:flatten(lists:append([ [NewVar|VarsHeader], VarsTail])),
Ret;
vars_to_import(Var =#var{functor = Functor, args = Args, annots = Annots} ) ->
io : : ~p ~ n",[Var ] ) ,
FunImport = fun (X) ->
vars_to_import(X) end,
FunID =
fun (#var{id = ID})->
{ID};
(BO=#binary_operation{})->
BO
end,
VarsFunctor = FunImport(Functor),
VarsArgs =
lists:filter(
(#binary_operation{}) ->
false;
(#var{}) ->
true
end,
lists:flatten(lists:map(FunImport,tuple_to_list(Args)))),
VarsAnnots = lists:flatten(lists:map(FunImport, Annots)),
io : on ; ~p\n",[VarsAnnots ] ) ,
NewFunctor = FunID(Functor),
NewArgs = list_to_tuple(lists:map(FunID,tuple_to_list(Args))),
NewAnnots = lists:map(FunID,Annots),
NewVar =
Var#var{
functor = NewFunctor,
args = NewArgs,
annots = NewAnnots
},
Ret= lists:flatten(lists:append([ [NewVar|VarsFunctor], VarsArgs,VarsAnnots])),
Ret;
vars_to_import(#binary_operation{left_part = Left,
right_part = Right})->
VarsLeft =
case Left of
_ when is_atom(Left)->
[];
_ ->
vars_to_import(Left)
end,
VarsRight =
case Right of
_ when is_atom(Right)->
[];
_ ->
vars_to_import(Right)
end,
lists:flatten(
lists:append([ VarsLeft,VarsRight])).
Erlang timestamp function " erlang : timestamp "
Returns a string of length 18 with a timestamp
make_timestamp_string()->
List = tuple_to_list( erlang:timestamp()),
[A,B,C] =
lists:map(fun (Num) -> string:right(integer_to_list(Num), 6, $0) end,
List),
A++B++C.
import_new_matchings(OriginalBindings, FirstReplacements,
NewVarsPrefix, ImportBindings)->
io : format("First Replacements : ~p ~ n",[FirstReplacements ] ) ,
OriginalVars =
[X|| {X,_} <- FirstReplacements],
UsedVars =
[{Y} || {_,Y} <- FirstReplacements],
CallVars =
[get_var(ID,ImportBindings) ||
{ID} <- UsedVars],
ReplaceIsRef =
fun(IsRefVar = #var{args = ?ISREF}) ->
ValVar = valuate(ImportBindings,
IsRefVar),
IsRefVar#var{functor = {ValVar#var.id}};
(Other) ->
a = b
end,
ErasedVars =
[ReplaceIsRef(X) || X <-lists:filter(
fun(#var{args = ?ISREF}) ->
true;
(_) -> false
end, CallVars)],
ValuatedVars =
valuate_list(ImportBindings,
UsedVars)++ ErasedVars,
VarsToRename =
sets:to_list(
sets:from_list(
lists:flatten(lists:map(
fun vars_to_import/1,
ValuatedVars)))),
NewRepl =
obtain_replacements(
NewVarsPrefix,
VarsToRename),
RenamedVars =
use_replacements(VarsToRename,
NewRepl),
FinalFun =
fun(VarID) ->
{ok,Repl1} =
orddict:find(
VarID, FirstReplacements),
{ok,Repl2} =
orddict:find(
Repl1, NewRepl),
case VarID =:= Repl2 of
true->
get_var(VarID,OriginalBindings);
false ->
#var{id = VarID,
args = ?ISREF,
functor = {Repl2}}
end
end,
FinalMatches =
lists:map(FinalFun,
OriginalVars),
CheckFun =
fun(#var{id = VarID, functor = Func}) when Func =/= VarID->
case orddict:find(VarID,OriginalBindings) of
error ->
false;
_ ->
true
end;
(_) ->
false
end,
Updated = lists:filter(CheckFun, lists:flatten(
lists:append(
[RenamedVars,
FinalMatches]))),
io : format("Updated : ~p ~ nIn : ~p ~ n",[Updated , OriginalBindings ] ) ,
timer : sleep(30000 )
FinalResult =
update(
OriginalBindings,
lists:flatten(
lists:append(
[RenamedVars,
FinalMatches]))),
FinalResult.
Merges two valuations .
merge_bindings(Bindings1,Bindings2) ->
MergeFun =
fun(_,SameValue,SameValue)->
SameValue;
(Key,Value1,Value2) ->
io:format(
"Error: the valuations have conflicting values for var: \n"++
"Key: ~p\nValue1:~p\nValue2:~p\n",
[Key,Value1,Value2]),
exit(valuation_merge_bindings_error)
end,
orddict:merge(MergeFun, Bindings1,Bindings2).
Checks if the variables referred by IDVar2 contain
check_contains(_Bindings,IDVar1,{IDVar1}) ->
true;
check_contains(_Bindings,IDVar1,#var{id = IDVar1}) ->
true;
check_contains(Bindings,IDVar1,{IDVar2}) ->
check_contains(Bindings,IDVar1,
get_var(IDVar2,Bindings));
check_contains(_Bindings,_IDVar1,#var{args = Args}) when Args ==?ISATOM;
Args ==?UNBOUND;
Args ==?UNBOUNDLIST->
false;
check_contains(Bindings,IDVar1,#var{functor= Ref,
args = ?ISREF}) ->
check_contains(Bindings, IDVar1, Ref);
check_contains(_,_,?EMPTYLISTVAR) ->
false;
check_contains(Bindings,IDVar1,#var{functor =
{Header,Tail},
args = ?ISLIST}) ->
FunCond = fun (Var) ->
check_contains(Bindings,IDVar1,Var) end,
lists:any(FunCond,Header++Tail);
check_contains(Bindings,IDVar1,#var{functor = Ref,
args = ?STRONGNEG}) ->
check_contains(Bindings, IDVar1, Ref);
check_contains(Bindings,IDVar1,#var{functor = Func,
args = Args,
annots = Annots}) when is_tuple(Args) ->
FunCond = fun (Var) ->
check_contains(Bindings,IDVar1,Var) end,
lists:any(FunCond,[Func|tuple_to_list(Args)] ++ Annots).
is_ground( #var{functor = ?NOFUNCTOR}) ->
false;
is_ground(?EMPTYLISTVAR) ->
true;
is_ground(#var{args = ?ISATOM}) ->
true;
is_ground(#var{args = ?STRONGNEG,functor = Func}) ->
is_ground(Func);
is_ground(#var{functor = {Header,Tail}, args = ?ISLIST}) ->
NotGroundFun =
fun(Var)->
not is_ground(Var) end,
not lists:any(NotGroundFun, Header++Tail);
is_ground(#var{args = Args, functor = Func, annots=Annots})
when is_tuple(Args)->
NotGroundFun =
fun(Var)->
not is_ground(Var) end,
not lists:any(NotGroundFun, [Func|tuple_to_list(Args)]++Annots).
Turns ejason variables into their erlang equivalent
Structs are turned into 3 - element tuples :
Unbound vars are turned into 3 - element tuples :
Strong negation structs are turned into 4 - element tuples :
ejason_to_erl(?EMPTYLISTVAR)->
[];
ejason_to_erl(Var = #var{functor = Func, args = ?ISATOM}) ->
Func;
ejason_to_erl(#var{functor = StructVar,
args = ?STRONGNEG}) ->
StructList =
['~']++ case ejason_to_erl(StructVar) of
{Functor,Args,Annots} ->
tuple_to_list({Functor,Args,Annots});
Atom when is_atom(Atom) ->
[Atom,[],[]]
end,
list_to_tuple(StructList);
ejason_to_erl(V = #var{functor = {[Header],[Tail]},
args = ?ISLIST}) ->
io : ] VarList = ~p ~ n",[V ] ) ,
[ejason_to_erl(Header)|
[?EMPTYLISTVAR] ->
[];
_ ->
ejason_to_erl(Tail)
end];
ejason_to_erl(#var{functor = ?NOFUNCTOR, args = ?UNBOUND, annots = Annots}) ->
Fun = fun (X) -> ejason_to_erl(X) end,
{[],[],lists:map(Fun, Annots)};
ejason_to_erl(#var{functor = ?NOFUNCTOR, args = ?UNBOUNDLIST}) ->
{[],[],[]};
ejason_to_erl(#var{functor = Func, args = Args,annots = Annots}) ->
{ejason_to_erl(Func),
lists:map(fun ejason_to_erl/1, tuple_to_list(Args)),
lists:map(fun ejason_to_erl/1, Annots)}.
Turns erlang terms into eJason variables - ONLY ONE VARIABLE !
erl_to_ejason([])->
?EMPTYLISTVAR;
erl_to_ejason([LastElem]) ->
Time = make_timestamp_string(),
#var{
id = list_to_atom(?ERLTOEJASONVAR++Time),
functor =
{[erl_to_ejason(LastElem)],
[?EMPTYLISTVAR]},
args = ?ISLIST
};
erl_to_ejason([Header|Tail]) ->
Time = make_timestamp_string(),
ListHeader =
erl_to_ejason(Header),
ListTail =
erl_to_ejason(Tail),
#var{
id = list_to_atom(?ERLTOEJASONVAR++Time),
functor={[ListHeader],[ListTail]},
args = ?ISLIST
};
erl_to_ejason(Atom) when is_atom(Atom);
is_number(Atom)->
#var{
id = Atom,
functor= Atom,
args = ?ISATOM,
annots = []
};
erl_to_ejason(Other) ->
io:format("[variables:erl_to_ejason] There is currently no support"++
" for the automatic translation of"++
" an Erlang term:~n ~p into eJason.~n",[Other]).
keep_functor(Var = #var{args = Args}) when is_tuple(Args)->
ArgsList = tuple_to_list(Args),
UnboundVarsFun =
fun(_) ->
#var{id = list_to_atom(
"UNBOUNDVAR"++make_timestamp_string()),
functor = ?NOFUNCTOR,
args = ?UNBOUND,
annots = []} end,
NewList =
lists:map(UnboundVarsFun, ArgsList),
Var#var{args = list_to_tuple(NewList)}.
annotations , like : search(Bindings , , " annotation(Value1 , _ ) " ,
Used by actions : ... ,create_agent )
find_container_name(Bindings,Annots)->
ContainerNameVar = #var{id =
list_to_atom(
"CONTAINERNAMEVAR"++
make_timestamp_string()),
functor =?NOFUNCTOR,
args = ?UNBOUND},
ContainerAtomVar =
#var{args = ?ISATOM,
id = container,
functor = container},
ContainerVar =
#var{ id =
list_to_atom("CONTAINERVAR"++
make_timestamp_string()),
functor = {container},
args = {{ContainerNameVar#var.id}}},
UseBindings =
update(Bindings,[ContainerVar,ContainerAtomVar,
ContainerNameVar]),
UseAnnots = lists:map(fun (#var{id = ID}) -> {ID} end,
Annots),
FoundContainerName =
case match_annotations(
UseBindings,
[{ContainerVar#var.id}],
iterator:create_iterator(UseAnnots)) of
false ->
node();
ItAnnots when is_function(ItAnnots) ->
case iterator:first(ItAnnots) of
false ->
node();
NewBindings ->
SuggestedContainerVar =
valuate(
NewBindings,
get_var(ContainerNameVar#var.id,
NewBindings)),
args = { SuggestedContainerVar } } = StructVar ,
SuggestedContainerVar
end
end,
ContainerName =
case FoundContainerName of
#var{args = ?ISATOM} ->
FoundContainerName#var.functor;
#var{functor = #var{args = ?ISATOM} }->
(FoundContainerName#var.functor)#var.functor;
_ ->
io:format("[Variables Debug:] Invalid containerName: ~p~n",
[FoundContainerName]),
node()
end,
ContainerName.
find_monitor_options(_Bindings, ?PERSISTANY)->
#monitor_options{
persist_unknown_agent = true,
persist_created_agent = true,
persist_dead_agent = true,
persist_restarted_agent = true,
persist_revived_agent = true,
persist_unreachable_agent = true
};
find_monitor_options(Bindings, Configuration)->
ErlConfiguration = ejason_to_erl(Configuration),
Filter = fun ( {Functor,[],_}) -> Functor;
(Atom ) when is_atom(Atom)-> Atom;
(_) -> []
end,
case ErlConfiguration of
{demonitor,[Persist],_} when Persist == any orelse
Persist == [any]->
#monitor_options{
persist_unknown_agent = false,
persist_created_agent = false,
persist_dead_agent = false,
persist_restarted_agent = false,
persist_revived_agent = false,
persist_unreachable_agent = false
};
{persist, [Persist], _} when Persist =/= any andalso
Persist =/= [any]->
PersistList = case Persist of
_ when is_atom(Persist) ->
[Persist];
_ when is_list(Persist) ->
lists:map(Filter, Persist)
end,
#monitor_options{
persist_unknown_agent = lists:member(unknown_agent,
PersistList),
persist_dead_agent = lists:member(dead_agent,
PersistList),
persist_restarted_agent = lists:member(restarted_agent,
PersistList),
persist_revived_agent = lists:member(revived_agent,
PersistList),
persist_unreachable_agent = lists:member(unreachable_agent,
PersistList),
persist_created_agent = lists:member(created_agent,
PersistList)
};
{demonitor, [Demonitor], _}->
DemonitorList = case Demonitor of
_ when is_atom(Demonitor) ->
[Demonitor];
_ when is_list(Demonitor) ->
lists:map(Filter, Demonitor)
end,
#monitor_options{
persist_unknown_agent = not lists:member(unknown_agent,
DemonitorList),
persist_dead_agent = not lists:member(dead_agent,
DemonitorList),
persist_restarted_agent = not lists:member(restarted_agent,
DemonitorList),
persist_revived_agent = not lists:member(revived_agent,
DemonitorList),
persist_unreachable_agent = not lists:member(unreachable_agent,
DemonitorList),
persist_created_agent = not lists:member(created_agent,
DemonitorList)
};
find_monitor_options(Bindings,?PERSISTANY)
end.
find_supervision_options(_Bindings , ? >
find_supervision_options({supervision_policy,
[OptionsList],
_}) when is_list(OptionsList)->
io : format("[Variables ] Received Supervision Options : ~p ~ n " ,
Filter = fun ( {Functor,[],_}) -> Functor;
(Atom ) when is_atom(Atom)-> Atom;
(_) -> []
end,
PreSupervisionPolicy =
case lists:member(no_ping, OptionsList) of
true ->
#supervision_policy{no_ping = true};
false ->
#supervision_policy{
no_ping = false,
ping = find_ping_policy(OptionsList),
unblock = find_unblock_policy(OptionsList),
restart = find_restart_policy(OptionsList)}
end,
SupervisionPolicy =
PreSupervisionPolicy#supervision_policy{
revival = find_revival_policy(OptionsList),
restart_strategy = find_restart_strategy(OptionsList)
},
SupervisionPolicy;
find_supervision_options(_Other)->
io:format("[Variables DEBUG] Default supervision options. Received: ~p~n",
[_Other]),
#supervision_policy{
ping = #ping_policy{},
unblock = #unblock_policy{},
restart = #restart_policy{}
}.
find_ping_policy([])->
#ping_policy{};
find_ping_policy([{ping,[Frequency, Time, MaxPings], _}|_])
when is_integer(Frequency),
is_integer(Time),
is_integer(MaxPings)->
#ping_policy{
frequency = Frequency,
time = Time,
maxpings = MaxPings};
find_ping_policy([_|Rest]) ->
find_ping_policy(Rest).
find_unblock_policy([])->
#unblock_policy{};
find_unblock_policy([{unblock,[never], _}|_])->
#unblock_policy{
time = infinity,
maxunblocks = 0};
find_unblock_policy([{unblock,[always], _}|_])->
#unblock_policy{
time = 0,
maxunblocks = 1};
find_unblock_policy([{unblock,[MaxUnblocks, Time], _}|_])
when is_integer(Time),
is_integer(MaxUnblocks) ->
#unblock_policy{
time = Time,
maxunblocks = MaxUnblocks};
find_unblock_policy([_|Rest]) ->
find_unblock_policy(Rest).
find_restart_policy([])->
#restart_policy{};
find_restart_policy([{restart,[never], _}|_])->
#restart_policy{
time = infinity,
maxrestarts = 0};
find_restart_policy([{restart,[always], _}|_])->
#restart_policy{
time = 0,
maxrestarts = 1};
find_restart_policy([{restart,[MaxRestarts, Time], _}|_])
when is_integer(Time),
is_integer(MaxRestarts) ->
#restart_policy{
time = Time,
maxrestarts = MaxRestarts};
find_restart_policy([_|Rest]) ->
find_restart_policy(Rest).
find_revival_policy([])->
#revival_policy{};
find_revival_policy([{revive,[never], _}|_])->
#revival_policy{
time = infinity,
maxrevivals = 0};
find_revival_policy([{revive,[always], _}|_])->
#revival_policy{
time = 0,
maxrevivals = 1};
find_revival_policy([{revive,[MaxRevive, Time], _}|_])
when is_integer(Time),
is_integer(MaxRevive) ->
#revival_policy{
time = Time,
maxrevivals = MaxRevive};
find_revival_policy([_|Rest]) ->
find_revival_policy(Rest).
find_restart_strategy([])->
Default = #supervision_policy{},
Default#supervision_policy.restart_strategy;
find_restart_strategy([{strategy,[Strategy], _}|Rest])
when is_atom(Strategy) ->
case lists:member(Strategy, [one_for_one, one_for_all, rest_for_one]) of
true ->
Strategy;
false ->
find_restart_strategy(Rest)
end;
find_restart_strategy([_|Rest]) ->
find_restart_strategy(Rest).
|
f577dccd9e081ad3a39312ada3e72f6ffd1205e2a597c0122311d80422445d7e | caradoc-org/caradoc | find.ml | (*****************************************************************************)
(* Caradoc: a PDF parser and validator *)
Copyright ( C ) 2016 - 2017
(* *)
(* This program is free software; you can redistribute it and/or modify *)
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation .
(* *)
(* This program is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU General Public License for more details. *)
(* *)
You should have received a copy of the GNU General Public License along
with this program ; if not , write to the Free Software Foundation , Inc. ,
51 Franklin Street , Fifth Floor , Boston , USA .
(*****************************************************************************)
open Key
open File
open Document
open Mapkey
open Errors
open Entry
open Directobject
open Indirectobject
open Stats
module Find = struct
let print_occurrences (occurrences : Entry.t list MapKey.t) (doc : Document.t) (show_ctxt : bool) (highlight : bool) : unit =
if occurrences = MapKey.empty then (
print_string "Not found\n";
exit 255
) else (
let count = ref 0 in
let count_obj = ref 0 in
MapKey.iter (fun k l ->
count_obj := !count_obj + 1;
List.iter (fun entry ->
count := !count + 1;
Printf.printf "Found%s\n" (Errors.ctxt_to_string (Errors.make_ctxt_entry k entry))
) l;
if show_ctxt then (
let selector =
if highlight then
Entry.make_selector l
else
Entry.no_selector
in
let tmp =
if k = Key.Trailer then
DirectObject.dict_to_string_hl (Document.main_trailer doc) selector
else
IndirectObject.to_string_hl (Document.find_obj doc k) selector
in
Printf.printf "%s\n\n" tmp
)
) occurrences;
Printf.printf "Found %d occurrence(s) in %d object(s).\n" !count !count_obj
)
let find_ref (key : Key.t) (filename : string) (show_ctxt : bool) (highlight : bool) : unit =
let doc = File.parse_file filename (Stats.create ()) in
let occurrences = Document.find_ref key doc in
print_occurrences occurrences doc show_ctxt highlight
let find_name (name : string) (filename : string) (show_ctxt : bool) (highlight : bool) : unit =
let doc = File.parse_file filename (Stats.create ()) in
let occurrences = Document.find_name name doc in
print_occurrences occurrences doc show_ctxt highlight
end
| null | https://raw.githubusercontent.com/caradoc-org/caradoc/100f53bc55ef682049e10fabf24869bc019dc6ce/src/tools/find.ml | ocaml | ***************************************************************************
Caradoc: a PDF parser and validator
This program is free software; you can redistribute it and/or modify
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*************************************************************************** | Copyright ( C ) 2016 - 2017
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation .
You should have received a copy of the GNU General Public License along
with this program ; if not , write to the Free Software Foundation , Inc. ,
51 Franklin Street , Fifth Floor , Boston , USA .
open Key
open File
open Document
open Mapkey
open Errors
open Entry
open Directobject
open Indirectobject
open Stats
module Find = struct
let print_occurrences (occurrences : Entry.t list MapKey.t) (doc : Document.t) (show_ctxt : bool) (highlight : bool) : unit =
if occurrences = MapKey.empty then (
print_string "Not found\n";
exit 255
) else (
let count = ref 0 in
let count_obj = ref 0 in
MapKey.iter (fun k l ->
count_obj := !count_obj + 1;
List.iter (fun entry ->
count := !count + 1;
Printf.printf "Found%s\n" (Errors.ctxt_to_string (Errors.make_ctxt_entry k entry))
) l;
if show_ctxt then (
let selector =
if highlight then
Entry.make_selector l
else
Entry.no_selector
in
let tmp =
if k = Key.Trailer then
DirectObject.dict_to_string_hl (Document.main_trailer doc) selector
else
IndirectObject.to_string_hl (Document.find_obj doc k) selector
in
Printf.printf "%s\n\n" tmp
)
) occurrences;
Printf.printf "Found %d occurrence(s) in %d object(s).\n" !count !count_obj
)
let find_ref (key : Key.t) (filename : string) (show_ctxt : bool) (highlight : bool) : unit =
let doc = File.parse_file filename (Stats.create ()) in
let occurrences = Document.find_ref key doc in
print_occurrences occurrences doc show_ctxt highlight
let find_name (name : string) (filename : string) (show_ctxt : bool) (highlight : bool) : unit =
let doc = File.parse_file filename (Stats.create ()) in
let occurrences = Document.find_name name doc in
print_occurrences occurrences doc show_ctxt highlight
end
|
1ff0c624d13ce30908bf00eee3baa65bf25e3ba4c09510081f7903dca53a0f8e | iskandr/parakeet-retired | UID.ml | open Base
(* make a unique identifier module, with a specific to_str function and
distinct counter from all other unique identifiers
*)
(*
module type S = sig
type t
val to_str : t -> string
module Set : Set.S with type elt = t
module Map : Map.S with type key = t
val gen : unit -> t
(* takes a list of ids, returns a mapping of id -> fresh id *)
val map_fresh : t list -> t Map.t
val gen_fresh_list : int -> t list
val gen_fresh_array : int -> t array
val of_int : int -> t
end
*)
module Make(A : sig val prefix : string end) = struct
type t = int
let name_to_id : (string, t) Hashtbl.t = Hashtbl.create 127
let id_to_name : (t, string) Hashtbl.t = Hashtbl.create 127
let original_prefixes : (t, string) Hashtbl.t = Hashtbl.create 127
let get_original_prefix id =
Hashtbl.find_default original_prefixes id A.prefix
let to_str x = match Hashtbl.find_option id_to_name x with
| Some name -> name
| None -> "unknown_" ^ A.prefix ^ "_" ^ (string_of_int x)
let list_to_str ?(sep=", ") xs = String.concat sep (List.map to_str xs)
(* best guess at next suffix-- still have to check whether it's free *)
let next_suffixes : (string, int) Hashtbl.t = Hashtbl.create 127
let try_next_suffix (prefix:string) : string =
match Hashtbl.find_option next_suffixes prefix with
| Some i ->
Hashtbl.replace next_suffixes prefix (i+1);
prefix ^ (string_of_int i)
| None ->
Hashtbl.add next_suffixes prefix 2;
prefix
let max_id = ref 0
let next_id () =
let id = !max_id in
max_id := id + 1;
id
let gen_named (prefix:string) : t =
let unique_name = ref (try_next_suffix prefix) in
while Hashtbl.mem name_to_id !unique_name do
unique_name := try_next_suffix prefix
done;
let id = next_id() in
Hashtbl.add original_prefixes id prefix;
Hashtbl.add name_to_id !unique_name id;
Hashtbl.add id_to_name id !unique_name;
id
let rec gen_named_list (prefix:string) (count:int) : t list =
if count <= 0 then []
else
let curr = gen_named prefix in
let rest = gen_named_list prefix (count - 1) in
curr :: rest
let gen_named_array (prefix:string) (count:int) : t array =
Array.of_list (gen_named_list prefix count)
let gen () = gen_named A.prefix
let gen_named_opt = function
| None -> gen ()
| Some name -> gen_named name
type uid = t
module Set = Set.Make(struct type t = uid let compare = compare end)
module Map = Map.Make(struct type t = uid let compare = compare end)
(* takes a list of ids, returns a mapping of id -> fresh id *)
let map_fresh idList =
let rec aux map = function
| [] -> map
| id::ids ->
let prefix = get_original_prefix id in
let fresh = gen_named prefix in
let map' = Map.add id fresh map in
aux map' ids
in aux Map.empty idList
let gen_fresh_list count =
let rec aux acc count =
if count <= 0 then acc
else
let acc' = (gen())::acc in
aux acc' (count - 1)
in aux [] count
let gen_fresh_array count = Array.of_list $ gen_fresh_list count
let of_int x = x
let to_int x = x
end
| null | https://raw.githubusercontent.com/iskandr/parakeet-retired/3d7e6e5b699f83ce8a1c01290beed0b78c0d0945/Common/UID.ml | ocaml | make a unique identifier module, with a specific to_str function and
distinct counter from all other unique identifiers
module type S = sig
type t
val to_str : t -> string
module Set : Set.S with type elt = t
module Map : Map.S with type key = t
val gen : unit -> t
(* takes a list of ids, returns a mapping of id -> fresh id
best guess at next suffix-- still have to check whether it's free
takes a list of ids, returns a mapping of id -> fresh id | open Base
val map_fresh : t list -> t Map.t
val gen_fresh_list : int -> t list
val gen_fresh_array : int -> t array
val of_int : int -> t
end
*)
module Make(A : sig val prefix : string end) = struct
type t = int
let name_to_id : (string, t) Hashtbl.t = Hashtbl.create 127
let id_to_name : (t, string) Hashtbl.t = Hashtbl.create 127
let original_prefixes : (t, string) Hashtbl.t = Hashtbl.create 127
let get_original_prefix id =
Hashtbl.find_default original_prefixes id A.prefix
let to_str x = match Hashtbl.find_option id_to_name x with
| Some name -> name
| None -> "unknown_" ^ A.prefix ^ "_" ^ (string_of_int x)
let list_to_str ?(sep=", ") xs = String.concat sep (List.map to_str xs)
let next_suffixes : (string, int) Hashtbl.t = Hashtbl.create 127
let try_next_suffix (prefix:string) : string =
match Hashtbl.find_option next_suffixes prefix with
| Some i ->
Hashtbl.replace next_suffixes prefix (i+1);
prefix ^ (string_of_int i)
| None ->
Hashtbl.add next_suffixes prefix 2;
prefix
let max_id = ref 0
let next_id () =
let id = !max_id in
max_id := id + 1;
id
let gen_named (prefix:string) : t =
let unique_name = ref (try_next_suffix prefix) in
while Hashtbl.mem name_to_id !unique_name do
unique_name := try_next_suffix prefix
done;
let id = next_id() in
Hashtbl.add original_prefixes id prefix;
Hashtbl.add name_to_id !unique_name id;
Hashtbl.add id_to_name id !unique_name;
id
let rec gen_named_list (prefix:string) (count:int) : t list =
if count <= 0 then []
else
let curr = gen_named prefix in
let rest = gen_named_list prefix (count - 1) in
curr :: rest
let gen_named_array (prefix:string) (count:int) : t array =
Array.of_list (gen_named_list prefix count)
let gen () = gen_named A.prefix
let gen_named_opt = function
| None -> gen ()
| Some name -> gen_named name
type uid = t
module Set = Set.Make(struct type t = uid let compare = compare end)
module Map = Map.Make(struct type t = uid let compare = compare end)
let map_fresh idList =
let rec aux map = function
| [] -> map
| id::ids ->
let prefix = get_original_prefix id in
let fresh = gen_named prefix in
let map' = Map.add id fresh map in
aux map' ids
in aux Map.empty idList
let gen_fresh_list count =
let rec aux acc count =
if count <= 0 then acc
else
let acc' = (gen())::acc in
aux acc' (count - 1)
in aux [] count
let gen_fresh_array count = Array.of_list $ gen_fresh_list count
let of_int x = x
let to_int x = x
end
|
7c7eafbe459c25c7bed64b7562f6211311bba390b08b3ce3bed5baa45d43e726 | Interlisp/medley | low.lisp |
-*- Package : CLOS ; Syntax : Common - Lisp ; Base : 10 -*-
File converted on 26 - Mar-91 10:29:45 from source low
. Original source { dsk}<usr > local > users > welch > lisp > clos > rev4 > il - format > low.;4 created 27 - Feb-91 17:16:47
. Copyright ( c ) 1991 by Venue
(in-package "CLOS")
;;; Shadow, Export, Require, Use-package, and Import forms should follow here
;;;
;;;*************************************************************************
Copyright ( c ) 1991 Venue
;;; This file contains portable versions of low-level functions and macros which are ripe for
;;; implementation specific customization. None of the code in this file *has* to be customized for
;;; a particular Common Lisp implementation. Moreover, in some implementations it may not make any
;;; sense to customize some of this code. ks.
(defmacro %svref (vector index)
`(locally (declare (optimize (speed 3)
(safety 0))
(inline svref))
(svref (the simple-vector ,vector)
(the fixnum ,index))))
(defsetf %svref (vector index)
(new-value)
`(locally (declare (optimize (speed 3)
(safety 0))
(inline svref))
(setf (svref (the simple-vector ,vector)
(the fixnum ,index))
,new-value)))
;;; without-interrupts OK, Common Lisp doesn't have this and for good reason. But For all of the
Common Lisp 's that CLOS runs on today , there is a meaningful way to implement this . WHAT I MEAN
IS : I want the body to be evaluated in such a way that no other code that is running CLOS can be
;;; run during that evaluation. I agree that the body won't take *long* to evaluate. That is to
;;; say that I will only use without interrupts around relatively small computations. INTERRUPTS-ON
;;; should turn interrupts back on if they were on. INTERRUPTS-OFF should turn interrupts back off.
;;; These are only valid inside the body of WITHOUT-INTERRUPTS. OK?
AKW : IT 'S CALLED , BUT NEVER REALLY USED , SO I'VE REPLACED IT WITH THE PROGN . IF WE REALLY NEED
;;; IT, CAN BE TRIVIALLY DONE WITH IL:MONITORS
(defmacro without-interrupts (&body body)
`(progn ,.body))
;;; Very Low-Level representation of instances with meta-class standard-class.
(defmacro std-instance-wrapper (x)
`(%std-instance-wrapper ,x))
(defmacro std-instance-slots (x)
`(%std-instance-slots ,x))
(defun print-std-instance (instance stream depth)
; A temporary definition used
(declare (ignore depth))
; for debugging the bootstrap
(printing-random-thing (instance stream)
code of CLOS ( See high.lisp ) .
(format stream "#<std-instance>")))
(defmacro %allocate-instance--class (no-of-slots)
`(let ((instance (%%allocate-instance--class)))
(%allocate-instance--class-1 ,no-of-slots instance)
instance))
(defmacro %allocate-instance--class-1 (no-of-slots instance)
(once-only (instance)
`(progn (setf (std-instance-slots ,instance)
(%allocate-static-slot-storage--class ,no-of-slots)))))
;;; This is the value that we stick into a slot to tell us that it is unbound. It may seem gross,
;;; but for performance reasons, we make this an interned symbol. That means that the fast check to
see if a slot is unbound is to say ( EQ < val > ' .. SLOT - UNBOUND .. ) . That is considerably faster
;;; than looking at the value of a special variable. Be careful, there are places in the code which
;;; actually use ..slot-unbound.. rather than this variable. So much for modularity
(defvar *slot-unbound* '..slot-unbound..)
(defmacro %allocate-static-slot-storage--class (no-of-slots)
`(make-array ,no-of-slots :initial-element *slot-unbound*))
(defmacro std-instance-class (instance)
`(wrapper-class (std-instance-wrapper ,instance)))
;;
;;; FUNCTION-ARGLIST
;;
[ COMMENTED OUT AKW . NEVER CALLED ] Given something which is functionp , function - arglist should
return the argument list for it . CLOS does not count on having this available , but
;;; MAKE-SPECIALIZABLE works much better if it is available. Versions of function-arglist for each
;;; specific port of clos should be put in the appropriate xxx-low file. This is what it should look
;;; like:
; (defun function-arglist (function)
; (<system-dependent-arglist-function>
; function))
( FUNCTIONS CLOS::FUNCTION - PRETTY - ARGLIST ) ( SETFS CLOS::FUNCTION - PRETTY - ARGLIST ) ( FUNCTIONS
CLOS::SET - FUNCTION - PRETTY - ARGLIST )
;;; set-function-name When given a function should give this function the name <new-name>. Note that
;;; <new-name> is sometimes a list. Some lisps get the upset in the tummy when they start thinking
;;; about functions which have lists as names. To deal with that there is set-function-name-intern
;;; which takes a list spec for a function name and turns it into a symbol if need be. When given a
;;; funcallable instance, set-function-name MUST side-effect that FIN to give it the name. When
;;; given any other kind of function set-function-name is allowed to return new function which is
;;; the 'same' except that it has the name. In all cases, set-function-name must return the new (or
;;; same) function.
(defun set-function-name #'new-name (declare (notinline set-function-name-1 intern-function-name))
(set-function-name-1 function (intern-function-name new-name)
new-name))
(defun set-function-name-1 (fn new-name uninterned-name)
(cond ((typep fn 'il:compiled-closure)
(il:\\rplptr (compiled-closure-fnheader fn)
4 new-name)
(when (and (consp uninterned-name)
(eq (car uninterned-name)
'method))
(let ((debug (si::compiled-function-debugging-info fn)))
(when debug
(setf (cdr debug)
uninterned-name)))))
(t nil))
fn)
(defun intern-function-name (name)
(cond ((symbolp name)
name)
((listp name)
(intern (let ((*package* *the-clos-package*)
(*print-case* :upcase)
(*print-gensym* 't))
(format nil "~S" name))
*the-clos-package*))))
COMPILE - LAMBDA This is like the Common Lisp function COMPILE . In fact , that is what it ends up
;;; calling.
(defun compile-lambda (lambda &rest desirability)
(declare (ignore desirability))
(compile nil lambda))
(defmacro precompile-random-code-segments (&optional system)
`(progn
(precompile-function-generators ,system)
(precompile-dfun-constructors ,system)))
(defun record-definition (type spec &rest args)
(declare (ignore type spec args))
())
(defun doctor-dfun-for-the-debugger (gf dfun) (declare (ignore gf)) dfun) | null | https://raw.githubusercontent.com/Interlisp/medley/f0b9ce3daeef95543e452ea4c59cb8e683295035/obsolete/clos/2.0/low.lisp | lisp | Syntax : Common - Lisp ; Base : 10 -*-
4 created 27 - Feb-91 17:16:47
Shadow, Export, Require, Use-package, and Import forms should follow here
*************************************************************************
This file contains portable versions of low-level functions and macros which are ripe for
implementation specific customization. None of the code in this file *has* to be customized for
a particular Common Lisp implementation. Moreover, in some implementations it may not make any
sense to customize some of this code. ks.
without-interrupts OK, Common Lisp doesn't have this and for good reason. But For all of the
run during that evaluation. I agree that the body won't take *long* to evaluate. That is to
say that I will only use without interrupts around relatively small computations. INTERRUPTS-ON
should turn interrupts back on if they were on. INTERRUPTS-OFF should turn interrupts back off.
These are only valid inside the body of WITHOUT-INTERRUPTS. OK?
IT, CAN BE TRIVIALLY DONE WITH IL:MONITORS
Very Low-Level representation of instances with meta-class standard-class.
A temporary definition used
for debugging the bootstrap
This is the value that we stick into a slot to tell us that it is unbound. It may seem gross,
but for performance reasons, we make this an interned symbol. That means that the fast check to
than looking at the value of a special variable. Be careful, there are places in the code which
actually use ..slot-unbound.. rather than this variable. So much for modularity
FUNCTION-ARGLIST
MAKE-SPECIALIZABLE works much better if it is available. Versions of function-arglist for each
specific port of clos should be put in the appropriate xxx-low file. This is what it should look
like:
(defun function-arglist (function)
(<system-dependent-arglist-function>
function))
set-function-name When given a function should give this function the name <new-name>. Note that
<new-name> is sometimes a list. Some lisps get the upset in the tummy when they start thinking
about functions which have lists as names. To deal with that there is set-function-name-intern
which takes a list spec for a function name and turns it into a symbol if need be. When given a
funcallable instance, set-function-name MUST side-effect that FIN to give it the name. When
given any other kind of function set-function-name is allowed to return new function which is
the 'same' except that it has the name. In all cases, set-function-name must return the new (or
same) function.
calling. |
File converted on 26 - Mar-91 10:29:45 from source low
. Copyright ( c ) 1991 by Venue
(in-package "CLOS")
Copyright ( c ) 1991 Venue
(defmacro %svref (vector index)
`(locally (declare (optimize (speed 3)
(safety 0))
(inline svref))
(svref (the simple-vector ,vector)
(the fixnum ,index))))
(defsetf %svref (vector index)
(new-value)
`(locally (declare (optimize (speed 3)
(safety 0))
(inline svref))
(setf (svref (the simple-vector ,vector)
(the fixnum ,index))
,new-value)))
Common Lisp 's that CLOS runs on today , there is a meaningful way to implement this . WHAT I MEAN
IS : I want the body to be evaluated in such a way that no other code that is running CLOS can be
AKW : IT 'S CALLED , BUT NEVER REALLY USED , SO I'VE REPLACED IT WITH THE PROGN . IF WE REALLY NEED
(defmacro without-interrupts (&body body)
`(progn ,.body))
(defmacro std-instance-wrapper (x)
`(%std-instance-wrapper ,x))
(defmacro std-instance-slots (x)
`(%std-instance-slots ,x))
(defun print-std-instance (instance stream depth)
(declare (ignore depth))
(printing-random-thing (instance stream)
code of CLOS ( See high.lisp ) .
(format stream "#<std-instance>")))
(defmacro %allocate-instance--class (no-of-slots)
`(let ((instance (%%allocate-instance--class)))
(%allocate-instance--class-1 ,no-of-slots instance)
instance))
(defmacro %allocate-instance--class-1 (no-of-slots instance)
(once-only (instance)
`(progn (setf (std-instance-slots ,instance)
(%allocate-static-slot-storage--class ,no-of-slots)))))
see if a slot is unbound is to say ( EQ < val > ' .. SLOT - UNBOUND .. ) . That is considerably faster
(defvar *slot-unbound* '..slot-unbound..)
(defmacro %allocate-static-slot-storage--class (no-of-slots)
`(make-array ,no-of-slots :initial-element *slot-unbound*))
(defmacro std-instance-class (instance)
`(wrapper-class (std-instance-wrapper ,instance)))
[ COMMENTED OUT AKW . NEVER CALLED ] Given something which is functionp , function - arglist should
return the argument list for it . CLOS does not count on having this available , but
( FUNCTIONS CLOS::FUNCTION - PRETTY - ARGLIST ) ( SETFS CLOS::FUNCTION - PRETTY - ARGLIST ) ( FUNCTIONS
CLOS::SET - FUNCTION - PRETTY - ARGLIST )
(defun set-function-name #'new-name (declare (notinline set-function-name-1 intern-function-name))
(set-function-name-1 function (intern-function-name new-name)
new-name))
(defun set-function-name-1 (fn new-name uninterned-name)
(cond ((typep fn 'il:compiled-closure)
(il:\\rplptr (compiled-closure-fnheader fn)
4 new-name)
(when (and (consp uninterned-name)
(eq (car uninterned-name)
'method))
(let ((debug (si::compiled-function-debugging-info fn)))
(when debug
(setf (cdr debug)
uninterned-name)))))
(t nil))
fn)
(defun intern-function-name (name)
(cond ((symbolp name)
name)
((listp name)
(intern (let ((*package* *the-clos-package*)
(*print-case* :upcase)
(*print-gensym* 't))
(format nil "~S" name))
*the-clos-package*))))
COMPILE - LAMBDA This is like the Common Lisp function COMPILE . In fact , that is what it ends up
(defun compile-lambda (lambda &rest desirability)
(declare (ignore desirability))
(compile nil lambda))
(defmacro precompile-random-code-segments (&optional system)
`(progn
(precompile-function-generators ,system)
(precompile-dfun-constructors ,system)))
(defun record-definition (type spec &rest args)
(declare (ignore type spec args))
())
(defun doctor-dfun-for-the-debugger (gf dfun) (declare (ignore gf)) dfun) |
00fd5a660205b19c34b805d0d359e69a7ae32fd426bf7459fcbee8c61e7804a9 | mmottl/aifad | learn_nothreads.ml |
AIFAD - Automated Induction of Functions over
Author :
email :
WWW :
Copyright ( C ) 2002 Austrian Research Institute for Artificial Intelligence
Copyright ( C ) 2003-
This library is free software ; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation ; either
version 2.1 of the License , or ( at your option ) any later version .
This library is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
Lesser General Public License for more details .
You should have received a copy of the GNU Lesser General Public License
along with this library ; if not , write to the Free Software Foundation ,
Inc. , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
AIFAD - Automated Induction of Functions over Algebraic Datatypes
Author: Markus Mottl
email:
WWW:
Copyright (C) 2002 Austrian Research Institute for Artificial Intelligence
Copyright (C) 2003- Markus Mottl
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*)
open Utils
open Algdt_types
open Algdt_utils
open Model_utils
open Complexity
open Model_data
open C45_io
open Data_io
open Typing
open Cmd_args
(* Learn random gain models *)
let learn_many dfspec dvars cfspec cvars cinit_tps model rand_model =
let calc_complexity model =
calc_model_complexity dfspec dvars cfspec cinit_tps model in
let c = calc_complexity model in
let rec loop n model c =
if n <= 0 then model, c
else
let new_model = rand_model dvars cvars in
let new_c = calc_complexity new_model in
if new_c <= c then loop (n - 1) new_model new_c
else loop (n - 1) model c in
let new_model, new_c = loop n_rand_gain model c in
Printf.eprintf "Model complexity: %f\n" new_c;
flush stderr;
new_model
Learn AIFAD - data
let learn spec =
let
{ ispec = dispec } as dispec_info,
({ ispec = cispec } as cispec_info) =
do_open_in spec (fun sp_ic -> read_spec (Lexing.from_channel sp_ic)) in
let dfspec, dinit_tps = flatten_ispec dispec in
let cfspec, cinit_tps = flatten_ispec cispec in
let dsamples, csamples =
match maybe_data_name with
| Some data_name ->
do_open_in data_name (fun data_ic ->
let data_lexbuf = Lexing.from_channel data_ic in
read_samples data_lexbuf dispec_info dfspec cispec_info cfspec)
| None ->
let data_lexbuf = Lexing.from_channel stdin in
read_samples data_lexbuf dispec_info dfspec cispec_info cfspec in
let dvars = make_vars dfspec dinit_tps dsamples in
let cvars = make_vars cfspec cinit_tps csamples in
let module Spec = struct
let (dfspec, cfspec) as fspecs = dfspec, cfspec
let find_split, find_rand_split =
if indep_entropy then
if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Is_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Id_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Ds_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Dd2_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
let most_prob_csums =
if indep_most_prob then Most_prob.indep_most_prob_sums cfspec
else Most_prob.dep_most_prob_sums cfspec
let split_null_branches = split_null_branches
let factorize_models =
if factorize then Factor.factorize_models
else fun _ -> Factor.FactorNone
end in
let module Split = Split_impl.Make (Spec) in
let model = Split.derive_model dvars cvars in
let module RandSpec = struct
include Spec
let find_split = find_rand_split
end in
let module SplitRand = Split_impl.Make (RandSpec) in
let model =
learn_many
dfspec dvars cfspec cvars cinit_tps model SplitRand.derive_model in
let mdat = `Model (dispec_info, cispec_info, model) in
maybe_save_mdat mdat maybe_model_name;
if print_hmod then print_model "" "`" dispec_info cispec_info model
(* Learn C4.5-data *)
let learn_c45 spec =
let c45_spec = read_c45_spec spec in
let dispec_info, dvars, cispec_info, cvars =
match maybe_data_name with
| Some data_name -> do_open_in data_name (read_c45_data c45_spec mv)
| None -> read_c45_data c45_spec mv stdin in
let dfspec, _ =
if Array.fold_left coll_n_cnstrs 0 dispec_info.cnstr_tbl = 0 then
empty_fspec, [||]
else flatten_ispec dispec_info.ispec in
let cfspec, cinit_tps = flatten_ispec cispec_info.ispec in
let module Spec = struct
let (dfspec, cfspec) as fspecs = dfspec, cfspec
let find_split, find_rand_split =
if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Is_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Id_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
let most_prob_csums = Most_prob.indep_most_prob_sums cfspec
let split_null_branches = split_null_branches
let factorize_models =
if factorize then Factor.factorize_models
else fun _ -> Factor.FactorNone
end in
let module Split = Split_impl.Make (Spec) in
let model = Split.derive_model dvars cvars in
let module RandSpec = struct
include Spec
let find_split = find_rand_split
end in
let module SplitRand = Split_impl.Make (RandSpec) in
let model =
learn_many
dfspec dvars cfspec cvars cinit_tps model SplitRand.derive_model in
let mdat = `C45Model (c45_spec, dispec_info, cispec_info, mv, model) in
maybe_save_mdat mdat maybe_model_name;
if print_hmod then print_model "t__" "`V" dispec_info cispec_info model
| null | https://raw.githubusercontent.com/mmottl/aifad/b06786f5cd60992548405078a903ee3d962ea969/src/learn_nothreads.ml | ocaml | Learn random gain models
Learn C4.5-data |
AIFAD - Automated Induction of Functions over
Author :
email :
WWW :
Copyright ( C ) 2002 Austrian Research Institute for Artificial Intelligence
Copyright ( C ) 2003-
This library is free software ; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation ; either
version 2.1 of the License , or ( at your option ) any later version .
This library is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
Lesser General Public License for more details .
You should have received a copy of the GNU Lesser General Public License
along with this library ; if not , write to the Free Software Foundation ,
Inc. , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
AIFAD - Automated Induction of Functions over Algebraic Datatypes
Author: Markus Mottl
email:
WWW:
Copyright (C) 2002 Austrian Research Institute for Artificial Intelligence
Copyright (C) 2003- Markus Mottl
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*)
open Utils
open Algdt_types
open Algdt_utils
open Model_utils
open Complexity
open Model_data
open C45_io
open Data_io
open Typing
open Cmd_args
let learn_many dfspec dvars cfspec cvars cinit_tps model rand_model =
let calc_complexity model =
calc_model_complexity dfspec dvars cfspec cinit_tps model in
let c = calc_complexity model in
let rec loop n model c =
if n <= 0 then model, c
else
let new_model = rand_model dvars cvars in
let new_c = calc_complexity new_model in
if new_c <= c then loop (n - 1) new_model new_c
else loop (n - 1) model c in
let new_model, new_c = loop n_rand_gain model c in
Printf.eprintf "Model complexity: %f\n" new_c;
flush stderr;
new_model
Learn AIFAD - data
let learn spec =
let
{ ispec = dispec } as dispec_info,
({ ispec = cispec } as cispec_info) =
do_open_in spec (fun sp_ic -> read_spec (Lexing.from_channel sp_ic)) in
let dfspec, dinit_tps = flatten_ispec dispec in
let cfspec, cinit_tps = flatten_ispec cispec in
let dsamples, csamples =
match maybe_data_name with
| Some data_name ->
do_open_in data_name (fun data_ic ->
let data_lexbuf = Lexing.from_channel data_ic in
read_samples data_lexbuf dispec_info dfspec cispec_info cfspec)
| None ->
let data_lexbuf = Lexing.from_channel stdin in
read_samples data_lexbuf dispec_info dfspec cispec_info cfspec in
let dvars = make_vars dfspec dinit_tps dsamples in
let cvars = make_vars cfspec cinit_tps csamples in
let module Spec = struct
let (dfspec, cfspec) as fspecs = dfspec, cfspec
let find_split, find_rand_split =
if indep_entropy then
if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Is_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Id_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Ds_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Dd2_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
let most_prob_csums =
if indep_most_prob then Most_prob.indep_most_prob_sums cfspec
else Most_prob.dep_most_prob_sums cfspec
let split_null_branches = split_null_branches
let factorize_models =
if factorize then Factor.factorize_models
else fun _ -> Factor.FactorNone
end in
let module Split = Split_impl.Make (Spec) in
let model = Split.derive_model dvars cvars in
let module RandSpec = struct
include Spec
let find_split = find_rand_split
end in
let module SplitRand = Split_impl.Make (RandSpec) in
let model =
learn_many
dfspec dvars cfspec cvars cinit_tps model SplitRand.derive_model in
let mdat = `Model (dispec_info, cispec_info, model) in
maybe_save_mdat mdat maybe_model_name;
if print_hmod then print_model "" "`" dispec_info cispec_info model
let learn_c45 spec =
let c45_spec = read_c45_spec spec in
let dispec_info, dvars, cispec_info, cvars =
match maybe_data_name with
| Some data_name -> do_open_in data_name (read_c45_data c45_spec mv)
| None -> read_c45_data c45_spec mv stdin in
let dfspec, _ =
if Array.fold_left coll_n_cnstrs 0 dispec_info.cnstr_tbl = 0 then
empty_fspec, [||]
else flatten_ispec dispec_info.ispec in
let cfspec, cinit_tps = flatten_ispec cispec_info.ispec in
let module Spec = struct
let (dfspec, cfspec) as fspecs = dfspec, cfspec
let find_split, find_rand_split =
if shallow_entropy then
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Is_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
else
let module GainSpec = struct
let dfspec, cfspec = fspecs
include Id_entropy end in
let module Gain = Gain_impl.Make (GainSpec) in
Gain.choose_gain_ratio gain_c45 with_min_gr,
Gain.rand_gain_ratio with_min_gr
let most_prob_csums = Most_prob.indep_most_prob_sums cfspec
let split_null_branches = split_null_branches
let factorize_models =
if factorize then Factor.factorize_models
else fun _ -> Factor.FactorNone
end in
let module Split = Split_impl.Make (Spec) in
let model = Split.derive_model dvars cvars in
let module RandSpec = struct
include Spec
let find_split = find_rand_split
end in
let module SplitRand = Split_impl.Make (RandSpec) in
let model =
learn_many
dfspec dvars cfspec cvars cinit_tps model SplitRand.derive_model in
let mdat = `C45Model (c45_spec, dispec_info, cispec_info, mv, model) in
maybe_save_mdat mdat maybe_model_name;
if print_hmod then print_model "t__" "`V" dispec_info cispec_info model
|
90debee8eb80fad19fe90956d73b5391b89a86b2678e0d7b625ca3908c15484f | rwmjones/guestfs-tools | make-template.ml | #!/usr/bin/env ocaml
libguestfs
* Copyright ( C ) 2016 - 2023 Red Hat Inc.
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc. , 51 Franklin Street , Fifth Floor , Boston , USA .
* Copyright (C) 2016-2023 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*)
This script is used to create the virt - builder templates hosted
* /
*
* Prior to November 2016 , the templates were generated using
* shell scripts located in libguestfs.git/builder/website .
* /
*
* Prior to November 2016, the templates were generated using
* shell scripts located in libguestfs.git/builder/website.
*)
#load "str.cma";;
#load "unix.cma";;
use globally installed
#load "mlguestfs.cma";;
open Printf
let windows_installers = "/mnt/media/installers/Windows"
let prog = "make-template"
(* Ensure that a file is deleted on exit. *)
let unlink_on_exit =
let files = ref [] in
at_exit (
fun () -> List.iter (fun f -> try Unix.unlink f with _ -> ()) !files
);
fun file -> files := file :: !files
let () =
(* Check we are being run from the correct directory. *)
if not (Sys.file_exists "debian.preseed") then (
eprintf "%s: run this script from the builder/templates subdirectory\n"
prog;
exit 1
);
(* Check that the ./run script was used. *)
(try ignore (Sys.getenv "VIRT_BUILDER_DIRS")
with Not_found ->
eprintf "%s: you must use `../../run ./make-template.ml ...' \
to run this script\n"
prog;
exit 1
);
(* Check we're not being run as root. *)
if Unix.geteuid () = 0 then (
eprintf "%s: don't run this script as root\n" prog;
exit 1
);
... and that LIBVIRT_DEFAULT_URI = qemu is NOT set ,
* which is the same as above .
* which is the same as above.
*)
let s = try Sys.getenv "LIBVIRT_DEFAULT_URI" with Not_found -> "" in
if s = "qemu" then (
eprintf "%s: don't set LIBVIRT_DEFAULT_URI=qemu\n" prog;
exit 1
)
;;
type os =
| Alma of int * int (* major, minor *)
| CentOS of int * int (* major, minor *)
| CentOSStream of int (* major *)
| RHEL of int * int
| Debian of int * string (* version, dist name like "wheezy" *)
| Ubuntu of string * string
| Fedora of int (* version number *)
| FreeBSD of int * int (* major, minor *)
| Windows of int * int * windows_variant (* major, minor, variant *)
and windows_variant = Client | Server
type arch = X86_64 | Aarch64 | Armv7 | I686 | PPC64 | PPC64le | S390X
type boot_media =
| Location of string (* virt-install --location (preferred) *)
| CDRom of string (* downloaded CD-ROM *)
let quote = Filename.quote
let (//) = Filename.concat
let rec main () =
assert (Sys.word_size = 64);
Random.self_init ();
Parse the command line .
let os, arch = parse_cmdline () in
(* Choose a disk size for this OS. *)
let virtual_size_gb = get_virtual_size_gb os arch in
For OSes which require a kickstart , this generates one .
* For OSes which require a preseed file , this returns one ( we
* do n't generate preseed files at the moment ) .
* For Windows this returns an unattend file in an ISO .
* For OSes which can not be automated ( FreeBSD ) , this returns None .
* For OSes which require a preseed file, this returns one (we
* don't generate preseed files at the moment).
* For Windows this returns an unattend file in an ISO.
* For OSes which cannot be automated (FreeBSD), this returns None.
*)
let ks = make_kickstart os arch in
(* Find the boot media. Normally ‘virt-install --location’ but
* for FreeBSD it downloads the boot ISO.
*)
let boot_media = make_boot_media os arch in
Choose a random temporary name for the libvirt domain .
let tmpname = sprintf "tmp-%s" (random8 ()) in
(* Choose a random temporary disk name. *)
let tmpout = sprintf "%s.img" tmpname in
unlink_on_exit tmpout;
(* Create the final output name (actually not quite final because
* we will xz-compress it).
*)
let output = filename_of_os os arch "" in
Some architectures need EFI boot .
let tmpefivars =
if needs_uefi os arch then (
let code, vars =
match arch with
| X86_64 ->
"/usr/share/edk2/ovmf/OVMF_CODE.fd",
"/usr/share/edk2/ovmf/OVMF_VARS.fd"
| Aarch64 ->
"/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw",
"/usr/share/edk2/aarch64/vars-template-pflash.raw"
| Armv7 ->
"/usr/share/edk2/arm/QEMU_EFI-pflash.raw",
"/usr/share/edk2/arm/vars-template-pflash.raw"
| _ -> assert false in
let vars_out = Sys.getcwd () // sprintf "%s.vars" tmpname in
unlink_on_exit vars_out;
let cmd = sprintf "cp %s %s" (quote vars) (quote vars_out) in
if Sys.command cmd <> 0 then exit 1;
Some (code, vars_out)
)
else None in
(* Now construct the virt-install command. *)
let vi = make_virt_install_command os arch ks tmpname tmpout tmpefivars
boot_media virtual_size_gb in
(* Print the virt-install command just before we run it, because
* this is expected to be long-running.
*)
print_virt_install_command stdout vi;
(* Save the virt-install command to a file, for documentation. *)
let chan = open_out (filename_of_os os arch ".virt-install-cmd") in
fprintf chan "# This is the virt-install command which was used to create\n";
fprintf chan "# the virt-builder template '%s'\n" (string_of_os os arch);
fprintf chan "# NB: This file is generated for documentation \
purposes ONLY!\n";
fprintf chan "# This script was never run, and is not intended to be run.\n";
fprintf chan "\n";
print_virt_install_command chan vi;
close_out chan;
Print the virt - install notes for OSes which can not be automated
* fully . ( These are different from the ‘ notes= ’ section in the
* index fragment ) .
* fully. (These are different from the ‘notes=’ section in the
* index fragment).
*)
print_install_notes os;
printf "\n\n%!";
(* Run the virt-install command. *)
let pid = Unix.fork () in
if pid = 0 then Unix.execvp "virt-install" vi;
let _, pstat = Unix.waitpid [] pid in
check_process_status_for_errors pstat;
If there were NVRAM variables , move them to the final name and
* compress them . Doing this operation later means the cleanup of
* the guest will remove them as well ( because of --nvram ) .
* compress them. Doing this operation later means the cleanup of
* the guest will remove them as well (because of --nvram).
*)
let nvram =
match tmpefivars with
| Some (_, vars) ->
let f = sprintf "%s-nvram" output in
let cmd = sprintf "mv %s %s" (quote vars) (quote f) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "xz -f --best %s" (quote f) in
if Sys.command cmd <> 0 then exit 1;
Some (f ^ ".xz")
| None -> None in
ignore (Sys.command "sync");
(* Run virt-filesystems, simply to display the filesystems in the image. *)
let cmd = sprintf "virt-filesystems -a %s --all --long -h" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1;
(* Some guests are special flowers that need post-installation
* filesystem changes.
*)
let postinstall = make_postinstall os arch in
Get the root filesystem . If the root filesystem is then
* get the partition containing it .
* get the partition containing it.
*)
let g = open_guest ~mount:(postinstall <> None) tmpout in
let roots = g#inspect_get_roots () in
let expandfs, lvexpandfs =
let rootfs = g#canonical_device_name roots.(0) in
if String.length rootfs >= 7 && String.sub rootfs 0 7 = "/dev/sd" then
non - LVM case
else (
The case , find the containing partition to expand .
let pvs = Array.to_list (g#pvs ()) in
match pvs with
| [pv] ->
let pv = g#canonical_device_name pv in
assert (String.length pv >= 7 && String.sub pv 0 7 = "/dev/sd");
pv, Some rootfs
| [] | _::_::_ -> assert false
) in
(match postinstall with
| None -> ()
| Some f -> f g
);
g#shutdown ();
g#close ();
(match os with
| Ubuntu (ver, _) when ver >= "14.04" ->
In Ubuntu > = 14.04 you ca n't complete the install without creating
* a user account . We create one called ' builder ' , but we also
* disable it . XXX Combine with virt - sysprep step .
* a user account. We create one called 'builder', but we also
* disable it. XXX Combine with virt-sysprep step.
*)
let cmd =
sprintf "virt-customize -a %s --password builder:disabled"
(quote tmpout) in
if Sys.command cmd <> 0 then exit 1
| _ -> ()
);
if can_sysprep_os os then (
Sysprep . - using guests .
printf "Sysprepping ...\n%!";
let cmd = sprintf "virt-sysprep --quiet -a %s" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1
);
(* Sparsify and copy to output name. *)
printf "Sparsifying ...\n%!";
let cmd =
sprintf "virt-sparsify --inplace --quiet %s" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1;
(* Move file to final name before compressing. *)
let cmd =
sprintf "mv %s %s" (quote tmpout) (quote output) in
if Sys.command cmd <> 0 then exit 1;
(* Compress the output. *)
printf "Compressing ...\n%!";
let cmd =
sprintf "xz -f --best --block-size=16777216 %s" (quote output) in
if Sys.command cmd <> 0 then exit 1;
let output = output ^ ".xz" in
(* Set public readable permissions on the final file. *)
let cmd = sprintf "chmod 0644 %s" (quote output) in
if Sys.command cmd <> 0 then exit 1;
printf "Template completed: %s\n%!" output;
Construct the index fragment , but do n't create this for the private
* RHEL images .
* RHEL images.
*)
(match os with
| RHEL _ -> ()
| _ ->
let index_fragment = filename_of_os os arch ".index-fragment" in
(* If there is an existing file, read the revision and increment it. *)
let revision = read_revision index_fragment in
let revision =
match revision with
(* no existing file *)
| `No_file -> None
(* file exists, but no revision line, so revision=1 *)
| `No_revision -> Some 2
(* existing file with revision line *)
| `Revision i -> Some (i+1) in
make_index_fragment os arch index_fragment output nvram revision
expandfs lvexpandfs virtual_size_gb;
Validate the fragment we have just created .
let cmd = sprintf "virt-index-validate %s" (quote index_fragment) in
if Sys.command cmd <> 0 then exit 1;
printf "Index fragment created: %s\n" index_fragment
);
printf "Finished successfully.\n%!"
and parse_cmdline () =
let anon = ref [] in
let usage = "\
../../run ./make-template.ml [--options] os version [arch]
Usage:
../../run ./make-template.ml [--options] os version [arch]
Examples:
../../run ./make-template.ml fedora 25
../../run ./make-template.ml rhel 7.3 ppc64le
The arch defaults to x86_64. Note that i686 is treated as a
separate arch.
Options:
" in
let spec = Arg.align [
] in
Arg.parse spec (fun s -> anon := s :: !anon) usage;
let os, ver, arch =
match List.rev !anon with
| [os; ver] -> os, ver, "x86_64"
| [os; ver; arch] -> os, ver, arch
| _ ->
eprintf "%s [--options] os version [arch]\n" prog;
exit 1 in
let os = os_of_string os ver
and arch = arch_of_string arch in
os, arch
and os_of_string os ver =
match os, ver with
| "alma", ver -> let maj, min = parse_major_minor ver in Alma (maj, min)
| "centos", ver -> let maj, min = parse_major_minor ver in CentOS (maj, min)
| "centosstream", ver -> CentOSStream(int_of_string ver)
| "rhel", ver -> let maj, min = parse_major_minor ver in RHEL (maj, min)
| "debian", "6" -> Debian (6, "squeeze")
| "debian", "7" -> Debian (7, "wheezy")
| "debian", "8" -> Debian (8, "jessie")
| "debian", "9" -> Debian (9, "stretch")
| "debian", "10" -> Debian (10, "buster")
| "debian", "11" -> Debian (11, "bullseye")
| "ubuntu", "10.04" -> Ubuntu (ver, "lucid")
| "ubuntu", "12.04" -> Ubuntu (ver, "precise")
| "ubuntu", "14.04" -> Ubuntu (ver, "trusty")
| "ubuntu", "16.04" -> Ubuntu (ver, "xenial")
| "ubuntu", "18.04" -> Ubuntu (ver, "bionic")
| "ubuntu", "20.04" -> Ubuntu (ver, "focal")
| "ubuntu", "22.04" -> Ubuntu (ver, "jammy")
| "fedora", ver -> Fedora (int_of_string ver)
| "freebsd", ver -> let maj, min = parse_major_minor ver in FreeBSD (maj, min)
| "windows", ver -> parse_windows_version ver
| _ ->
eprintf "%s: unknown or unsupported OS (%s, %s)\n" prog os ver; exit 1
and parse_major_minor ver =
let rex = Str.regexp "^\\([0-9]+\\)\\.\\([0-9]+\\)$" in
if Str.string_match rex ver 0 then (
int_of_string (Str.matched_group 1 ver),
int_of_string (Str.matched_group 2 ver)
)
else (
eprintf "%s: cannot parse major.minor (%s)\n" prog ver;
exit 1
)
(* *)
and parse_windows_version = function
| "7" -> Windows (6, 1, Client)
| "2k8r2" -> Windows (6, 1, Server)
| "2k12" -> Windows (6, 2, Server)
| "2k12r2" -> Windows (6, 3, Server)
| "2k16" -> Windows (10, 0, Server)
| _ ->
eprintf "%s: cannot parse Windows version, see ‘parse_windows_version’\n"
prog;
exit 1
and arch_of_string = function
| "x86_64" -> X86_64
| "aarch64" -> Aarch64
| "armv7l" -> Armv7
| "i686" -> I686
| "ppc64" -> PPC64
| "ppc64le" -> PPC64le
| "s390x" -> S390X
| s ->
eprintf "%s: unknown or unsupported arch (%s)\n" prog s; exit 1
and string_of_arch = function
| X86_64 -> "x86_64"
| Aarch64 -> "aarch64"
| Armv7 -> "armv7l"
| I686 -> "i686"
| PPC64 -> "ppc64"
| PPC64le -> "ppc64le"
| S390X -> "s390x"
and debian_arch_of_arch = function
| X86_64 -> "amd64"
| Aarch64 -> "arm64"
| Armv7 -> "armhf"
| I686 -> "i386"
| PPC64 -> "ppc64"
| PPC64le -> "ppc64el"
| S390X -> "s390x"
and filename_of_os os arch ext =
match os with
| Fedora ver ->
if arch = X86_64 then sprintf "fedora-%d%s" ver ext
else sprintf "fedora-%d-%s%s" ver (string_of_arch arch) ext
| Alma (major, minor) ->
if arch = X86_64 then sprintf "alma-%d.%d%s" major minor ext
else sprintf "alma-%d.%d-%s%s" major minor (string_of_arch arch) ext
| CentOS (major, minor) ->
if arch = X86_64 then sprintf "centos-%d.%d%s" major minor ext
else sprintf "centos-%d.%d-%s%s" major minor (string_of_arch arch) ext
| CentOSStream ver ->
if arch = X86_64 then sprintf "centosstream-%d%s" ver ext
else sprintf "centosstream-%d-%s%s" ver (string_of_arch arch) ext
| RHEL (major, minor) ->
if arch = X86_64 then sprintf "rhel-%d.%d%s" major minor ext
else sprintf "rhel-%d.%d-%s%s" major minor (string_of_arch arch) ext
| Debian (ver, _) ->
if arch = X86_64 then sprintf "debian-%d%s" ver ext
else sprintf "debian-%d-%s%s" ver (string_of_arch arch) ext
| Ubuntu (ver, _) ->
if arch = X86_64 then sprintf "ubuntu-%s%s" ver ext
else sprintf "ubuntu-%s-%s%s" ver (string_of_arch arch) ext
| FreeBSD (major, minor) ->
if arch = X86_64 then sprintf "freebsd-%d.%d%s" major minor ext
else sprintf "freebsd-%d.%d-%s%s" major minor (string_of_arch arch) ext
| Windows (major, minor, Client) ->
if arch = X86_64 then sprintf "windows-%d.%d-client%s" major minor ext
else sprintf "windows-%d.%d-client-%s%s"
major minor (string_of_arch arch) ext
| Windows (major, minor, Server) ->
if arch = X86_64 then sprintf "windows-%d.%d-server%s" major minor ext
else sprintf "windows-%d.%d-server-%s%s"
major minor (string_of_arch arch) ext
and string_of_os os arch = filename_of_os os arch ""
(* This is what virt-builder called "os-version". *)
and string_of_os_noarch = function
| Fedora ver -> sprintf "fedora-%d" ver
| Alma (major, minor) -> sprintf "alma-%d.%d" major minor
| CentOS (major, minor) -> sprintf "centos-%d.%d" major minor
| CentOSStream ver -> sprintf "centosstream-%d" ver
| RHEL (major, minor) -> sprintf "rhel-%d.%d" major minor
| Debian (ver, _) -> sprintf "debian-%d" ver
| Ubuntu (ver, _) -> sprintf "ubuntu-%s" ver
| FreeBSD (major, minor) -> sprintf "freebsd-%d.%d" major minor
| Windows (major, minor, Client) -> sprintf "windows-%d.%d-client" major minor
| Windows (major, minor, Server) -> sprintf "windows-%d.%d-server" major minor
(* Does virt-sysprep know how to sysprep this OS? *)
and can_sysprep_os = function
| RHEL _ | Alma _ | CentOS _ | CentOSStream _ | Fedora _
| Debian _ | Ubuntu _ -> true
| FreeBSD _ | Windows _ -> false
and needs_uefi os arch =
match os, arch with
| Fedora _, Armv7
| Fedora _, Aarch64
| RHEL _, Aarch64 -> true
| RHEL _, _ | Alma _, _ | CentOS _, _ | CentOSStream _, _ | Fedora _, _
| Debian _, _ | Ubuntu _, _
| FreeBSD _, _ | Windows _, _ -> false
and get_virtual_size_gb os arch =
match os with
| RHEL _ | Alma _ | CentOS _ | CentOSStream _ | Fedora _
| Debian _ | Ubuntu _
| FreeBSD _ -> 6
Windows 10
Windows from 2008 - 2012
Windows < = 2003
| Windows _ -> assert false
and make_kickstart os arch =
match os with
Kickstart .
| Fedora _ | Alma _ | CentOS _ | CentOSStream _ | RHEL _ ->
let ks_filename = filename_of_os os arch ".ks" in
Some (make_kickstart_common ks_filename os arch)
(* Preseed. *)
| Debian _ -> Some (copy_preseed_to_temporary "debian.preseed")
| Ubuntu _ -> Some (copy_preseed_to_temporary "ubuntu.preseed")
(* Not automated. *)
| FreeBSD _ -> None
(* Windows unattend.xml wrapped in an ISO. *)
| Windows _ -> Some (make_unattend_iso os arch)
and make_kickstart_common ks_filename os arch =
let buf = Buffer.create 4096 in
let bpf fs = bprintf buf fs in
bpf "\
# Kickstart file for %s
# Generated by libguestfs.git/builder/templates/make-template.ml
" (string_of_os os arch);
(* Fedora 34+ removes the "install" keyword. *)
(match os with
| Fedora n when n >= 34 -> ()
| RHEL (n, _)
| Alma (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 -> ()
| _ -> bpf "install\n";
);
bpf "\
text
reboot
lang en_US.UTF-8
keyboard us
network --bootproto dhcp
rootpw builder
firewall --enabled --ssh
timezone --utc America/New_York
";
(match os with
| RHEL (ver, _) when ver <= 4 ->
bpf "\
langsupport en_US
mouse generic
";
| _ -> ()
);
(match os with
| RHEL (3, _) -> ()
| _ ->
bpf "selinux --enforcing\n"
);
(match os with
| RHEL (5, _) -> bpf "key --skip\n"
| _ -> ()
);
bpf "\n";
bpf "bootloader --location=mbr --append=\"%s\"\n"
(kernel_cmdline_of_os os arch);
bpf "\n";
(* Required as a workaround for CentOS 8.0, see:
* -devel/2019-September/017813.html
* -devel/2019-October/017882.html
*)
(match os with
| CentOS (8, _) ->
bpf "url --url=\"/\"\n"
| _ -> ()
);
bpf "\n";
(match os with
| CentOS ((3|4|5|6) as major, _) | RHEL ((3|4|5|6) as major, _) ->
let bootfs = if major <= 5 then "ext2" else "ext4" in
let rootfs = if major <= 4 then "ext3" else "ext4" in
bpf "\
zerombr
clearpart --all --initlabel
part /boot --fstype=%s --size=512 --asprimary
part swap --size=1024 --asprimary
part / --fstype=%s --size=1024 --grow --asprimary
" bootfs rootfs;
| Alma _ | CentOS _ | CentOSStream _ | RHEL _ | Fedora _ ->
bpf "\
zerombr
clearpart --all --initlabel --disklabel=gpt
autopart --type=plain
";
| _ -> assert false (* cannot happen, see caller *)
);
bpf "\n";
(match os with
| RHEL (3, _) -> ()
| _ ->
bpf "\
# Halt the system once configuration has finished.
poweroff
";
);
bpf "\n";
bpf "\
%%packages
@core
";
(match os with
| RHEL ((3|4|5), _) -> ()
| _ ->
bpf "%%end\n"
);
bpf "\n";
Generate the % post script section . The previous scripts did
* many different things here . The current script tries to update
* the packages and enable Xen drivers only .
* many different things here. The current script tries to update
* the packages and enable Xen drivers only.
*)
let regenerate_dracut () =
bpf "\
# To make dracut config changes permanent, we need to rerun dracut.
# Rerun dracut for the installed kernel (not the running kernel).
# See commit 0fa52e4e45d80874bc5ea5f112f74be1d3f3472f and
# -June/thread.html#00045
KERNEL_VERSION=\"$(rpm -q kernel --qf '%%{version}-%%{release}.%%{arch}\\n' |
sort -V | tail -1)\"
dracut -f /boot/initramfs-$KERNEL_VERSION.img $KERNEL_VERSION
"
in
(match os with
| Fedora _ ->
bpf "%%post\n";
bpf "\
# Ensure the installation is up-to-date.
dnf -y --best upgrade
# This required otherwise the kernel will not be bootable, see
#
# #c24
grub2-mkconfig -o %s
" (quote
(if needs_uefi os arch then "/etc/grub2-efi.cfg"
else "/etc/grub2.cfg"));
let needs_regenerate_dracut = ref false in
if arch = X86_64 then (
bpf "\
# Enable Xen domU support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" xen:vbd xen:vif \"' > virt-builder-xen-drivers.conf
popd
";
needs_regenerate_dracut := true
);
if arch = PPC64 || arch = PPC64le then (
bpf "\
# Enable virtio-scsi support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" virtio-blk virtio-scsi \"' > virt-builder-virtio-scsi.conf
popd
";
needs_regenerate_dracut := true
);
if !needs_regenerate_dracut then regenerate_dracut ();
bpf "%%end\n\n"
| RHEL (7,_) ->
bpf "%%post\n";
let needs_regenerate_dracut = ref false in
if arch = PPC64 || arch = PPC64le then (
bpf "\
# Enable virtio-scsi support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" virtio-blk virtio-scsi \"' > virt-builder-virtio-scsi.conf
popd
";
needs_regenerate_dracut := true
);
if !needs_regenerate_dracut then regenerate_dracut ();
bpf "%%end\n\n"
| _ -> ()
);
bpf "# EOF\n";
(* Write out the kickstart file. *)
let chan = open_out (ks_filename ^ ".new") in
Buffer.output_buffer chan buf;
close_out chan;
let cmd =
sprintf "mv %s %s" (quote (ks_filename ^ ".new")) (quote ks_filename) in
if Sys.command cmd <> 0 then exit 1;
(* Return the kickstart filename. *)
ks_filename
and copy_preseed_to_temporary source =
d - i only works if the file is literally called " /preseed.cfg "
let d = Filename.get_temp_dir_name () // random8 () ^ ".tmp" in
let f = d // "preseed.cfg" in
Unix.mkdir d 0o700;
let cmd = sprintf "cp %s %s" (quote source) (quote f) in
if Sys.command cmd <> 0 then exit 1;
f
(* For Windows:
* -installation-of-windows-server-2012-on-kvm
*)
and make_unattend_iso os arch =
printf "enter Windows product key: ";
let product_key = read_line () in
let output_iso =
Sys.getcwd () // filename_of_os os arch "-unattend.iso" in
unlink_on_exit output_iso;
let d = Filename.get_temp_dir_name () // random8 () in
Unix.mkdir d 0o700;
let config_dir = d // "config" in
Unix.mkdir config_dir 0o700;
let f = config_dir // "autounattend.xml" in
let chan = open_out f in
let arch =
match arch with
| X86_64 -> "amd64"
| I686 -> "x86"
| _ ->
eprintf "%s: Windows architecture %s not supported\n"
prog (string_of_arch arch);
exit 1 in
Tip : If the install fails with a useless error " The answer file is
* invalid " , type Shift + F10 into the setup screen and look for a
* file called \Windows\Panther\Setupact.log ( NB :
* not \Windows\Setupact.log )
* invalid", type Shift + F10 into the setup screen and look for a
* file called \Windows\Panther\Setupact.log (NB:
* not \Windows\Setupact.log)
*)
fprintf chan "
<unattend xmlns=\"urn:schemas-microsoft-com:unattend\"
xmlns:ms=\"urn:schemas-microsoft-com:asm.v3\"
xmlns:wcm=\"\">
<settings pass=\"windowsPE\">
<component name=\"Microsoft-Windows-Setup\"
publicKeyToken=\"31bf3856ad364e35\"
language=\"neutral\"
versionScope=\"nonSxS\"
processorArchitecture=\"%s\">
<UserData>
<AcceptEula>true</AcceptEula>
<ProductKey>
<Key>%s</Key>
<WillShowUI>OnError</WillShowUI>
</ProductKey>
</UserData>
<DiskConfiguration>
<Disk wcm:action=\"add\">
<DiskID>0</DiskID>
<WillWipeDisk>true</WillWipeDisk>
<CreatePartitions>
<!-- System partition -->
<CreatePartition wcm:action=\"add\">
<Order>1</Order>
<Type>Primary</Type>
<Size>300</Size>
</CreatePartition>
<!-- Windows partition -->
<CreatePartition wcm:action=\"add\">
<Order>2</Order>
<Type>Primary</Type>
<Extend>true</Extend>
</CreatePartition>
</CreatePartitions>
<ModifyPartitions>
<!-- System partition -->
<ModifyPartition wcm:action=\"add\">
<Order>1</Order>
<PartitionID>1</PartitionID>
<Label>System</Label>
<Format>NTFS</Format>
<Active>true</Active>
</ModifyPartition>
<!-- Windows partition -->
<ModifyPartition wcm:action=\"add\">
<Order>2</Order>
<PartitionID>2</PartitionID>
<Label>Windows</Label>
<Letter>C</Letter>
<Format>NTFS</Format>
</ModifyPartition>
</ModifyPartitions>
</Disk>
<WillShowUI>OnError</WillShowUI>
</DiskConfiguration>
<ImageInstall>
<OSImage>
<WillShowUI>Never</WillShowUI>
<InstallFrom>
<MetaData>
<Key>/IMAGE/INDEX</Key>
<Value>1</Value>
</MetaData>
</InstallFrom>
<InstallTo>
<DiskID>0</DiskID>
<PartitionID>2</PartitionID>
</InstallTo>
</OSImage>
</ImageInstall>
</component>
<component name=\"Microsoft-Windows-International-Core-WinPE\"
publicKeyToken=\"31bf3856ad364e35\"
language=\"neutral\"
versionScope=\"nonSxS\"
processorArchitecture=\"%s\">
<SetupUILanguage>
<UILanguage>en-US</UILanguage>
</SetupUILanguage>
<SystemLocale>en-US</SystemLocale>
<UILanguage>en-US</UILanguage>
<UserLocale>en-US</UserLocale>
</component>
</settings>
</unattend>"
arch product_key arch;
close_out chan;
let cmd = sprintf "cd %s && mkisofs -o %s -J -r config"
(quote d) (quote output_iso) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "rm -rf %s" (quote d) in
if Sys.command cmd <> 0 then exit 1;
(* Return the name of the unattend ISO. *)
output_iso
and make_boot_media os arch =
match os, arch with
| Alma (major, minor), X86_64 ->
UK mirror
Location (sprintf "/\
%d.%d/BaseOS/x86_64/kickstart/"
major minor)
| CentOS (major, _), Aarch64 ->
(* XXX This always points to the latest CentOS, so
* effectively the minor number is always ignored.
*)
Location (sprintf "/"
major)
| CentOS (7, _), X86_64 ->
For 6.x we rebuild this every time there is a new 6.x release , and bump
* the revision in the index .
* For 7.x this always points to the latest CentOS , so
* effectively the minor number is always ignored .
* the revision in the index.
* For 7.x this always points to the latest CentOS, so
* effectively the minor number is always ignored.
*)
Location "-7/7/os/x86_64/"
| CentOS (8, _), X86_64 ->
This is probably the last CentOS 8 release .
Location "/"
| CentOSStream 8, X86_64 ->
Location (sprintf "-stream/BaseOS/\
x86_64/os")
| CentOSStream ver, X86_64 ->
Location (sprintf "-stream/BaseOS/\
x86_64/os" ver)
| Debian (_, dist), arch ->
Location (sprintf "-%s"
dist (debian_arch_of_arch arch))
(* Fedora primary architectures. *)
| Fedora ver, Armv7 ->
Location (sprintf "/\
%d/Server/armhfp/os/" ver)
| Fedora ver, X86_64 when ver < 21 ->
Location (sprintf "/\
releases/%d/Fedora/x86_64/os/" ver)
| Fedora ver, X86_64 ->
Location (sprintf "/\
%d/Server/x86_64/os/" ver)
| Fedora ver, Aarch64 ->
Location (sprintf "/\
%d/Server/aarch64/os/" ver)
(* Fedora secondary architectures.
* By using dl.fedoraproject.org we avoid randomly using mirrors
* which might have incomplete copies.
*)
| Fedora ver, I686 ->
Location (sprintf "-secondary/\
releases/%d/Server/i386/os/" ver)
| Fedora ver, PPC64 ->
Location (sprintf "-secondary/\
releases/%d/Server/ppc64/os/" ver)
| Fedora ver, PPC64le ->
Location (sprintf "-secondary/\
releases/%d/Server/ppc64le/os/" ver)
| Fedora ver, S390X ->
Location (sprintf "-secondary/\
releases/%d/Server/s390x/os/" ver)
| RHEL (3, minor), X86_64 ->
Location (sprintf "-3/\
U%d/AS/x86_64/tree" minor)
| RHEL (4, minor), X86_64 ->
Location (sprintf "-4/\
U%d/AS/x86_64/tree" minor)
| RHEL (5, minor), I686 ->
Location (sprintf "/\
RHEL-5-Server/U%d/i386/os" minor)
| RHEL (5, minor), X86_64 ->
Location (sprintf "/\
RHEL-5-Server/U%d/x86_64/os" minor)
| RHEL (6, minor), I686 ->
Location (sprintf "/\
RHEL-6/6.%d/Server/i386/os" minor)
| RHEL (6, minor), X86_64 ->
Location (sprintf "/\
RHEL-6/6.%d/Server/x86_64/os" minor)
| RHEL (7, minor), X86_64 ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/x86_64/os" minor)
| RHEL (7, minor), PPC64 ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/ppc64/os" minor)
| RHEL (7, minor), PPC64le ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/ppc64le/os" minor)
| RHEL (7, minor), S390X ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/s390x/os" minor)
| RHEL (7, minor), Aarch64 ->
Location (sprintf "/\
RHEL-ALT-7/7.%d/Server/aarch64/os" minor)
| RHEL (8, minor), arch ->
Location (sprintf "/\
rhel-6-7-8/rhel-8/RHEL-8/8.%d.0/BaseOS/%s/os"
minor (string_of_arch arch))
| RHEL (9, minor), arch ->
Location (sprintf "/\
RHEL-9/9.%d.0/BaseOS/%s/os" minor (string_of_arch arch))
| Ubuntu (_, dist), X86_64 ->
Location (sprintf "/\
%s/main/installer-amd64" dist)
| Ubuntu (_, dist), PPC64le ->
Location (sprintf "-ports/dists/\
%s/main/installer-ppc64el" dist)
| FreeBSD (major, minor), X86_64 ->
let iso = sprintf "FreeBSD-%d.%d-RELEASE-amd64-disc1.iso"
major minor in
let iso_xz = sprintf "ftp/\
amd64/amd64/ISO-IMAGES/%d.%d/%s.xz"
major minor iso in
let cmd = sprintf "wget -nc %s" (quote iso_xz) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "unxz -f --keep %s.xz" iso in
if Sys.command cmd <> 0 then exit 1;
CDRom iso
| Windows (major, minor, variant), arch ->
let iso_name =
match major, minor, variant, arch with
Windows 7
"en_windows_7_ultimate_with_sp1_x64_dvd_u_677332.iso"
| 6, 1, Server, X86_64 -> (* Windows 2008 R2 *)
"en_windows_server_2008_r2_with_sp1_x64_dvd_617601.iso"
Windows Server 2012
"en_windows_server_2012_x64_dvd_915478.iso"
Windows Server 2012 R2
"en_windows_server_2012_r2_with_update_x64_dvd_6052708.iso"
Windows Server 2016
"en_windows_server_2016_updated_feb_2018_x64_dvd_11636692.iso"
| _ ->
eprintf "%s: don't have an installer ISO for this version of \
Windows\n"
prog;
exit 1 in
CDRom (windows_installers // iso_name)
| _ ->
eprintf "%s: don't know how to calculate the --location for this OS \
and architecture\n" prog;
exit 1
and print_install_notes = function
| Ubuntu _ ->
printf "\
Some preseed functions are not automated. You may need to hit [Return]
a few times during the install.\n"
| FreeBSD _ ->
printf "\
The FreeBSD install is not automated. Select all defaults, except:
- root password: builder
- timezone: UTC
- do not add any user accounts\n"
| _ -> ()
(* If the install is not automated and we need a graphical console. *)
and needs_graphics = function
| Alma _ | CentOS _ | CentOSStream _ | RHEL _
| Debian _ | Ubuntu _ | Fedora _ -> false
| FreeBSD _ | Windows _ -> true
NB : Arguments do not need to be quoted , because we pass them
* directly to exec(2 ) .
* directly to exec(2).
*)
and make_virt_install_command os arch ks tmpname tmpout tmpefivars
boot_media virtual_size_gb =
let args = ref [] in
let add arg = args := arg :: !args in
add "virt-install";
This ensures the libvirt domain will be automatically deleted
* when virt - install exits . However it does n't work for certain
* types of guest .
* when virt-install exits. However it doesn't work for certain
* types of guest.
*)
(match os with
| Windows _ ->
printf "after Windows has installed, do:\n";
printf " virsh shutdown %s\n virsh undefine %s\n%!" tmpname tmpname;
| _ -> add "--transient"
);
Do n't try relabelling everything . This is particularly necessary
* for the Windows install ISOs which are located on NFS .
* for the Windows install ISOs which are located on NFS.
*)
(match os with
| Windows _ -> add "--security=type=none"
| _ -> ()
);
add (sprintf "--name=%s" tmpname);
(*add "--print-xml";*)
add "--ram=4096";
(match arch with
| X86_64 ->
add "--arch=x86_64";
add "--cpu=host";
add "--vcpus=4"
| PPC64 ->
add "--arch=ppc64";
add "--machine=pseries";
add "--cpu=power7";
add "--vcpus=1"
| PPC64le ->
add "--arch=ppc64le";
add "--machine=pseries";
add "--cpu=power8";
add "--vcpus=1"
| Armv7 ->
add "--arch=armv7l";
RHBZ#1633328 ,
add "--vcpus=1"
| arch ->
add (sprintf "--arch=%s" (string_of_arch arch));
add "--vcpus=1"
);
add (sprintf "--os-variant=%s" (os_variant_of_os ~for_fedora:true os arch));
(match tmpefivars with
| Some (code, vars) ->
add "--boot";
add (sprintf "loader=%s,loader_ro=yes,loader_type=pflash,nvram=%s"
code vars)
| _ -> ()
);
--initrd - inject and --extra - args flags for Linux only .
(match os with
| Debian _ | Ubuntu _
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _ ->
let ks =
match ks with None -> assert false | Some ks -> ks in
add (sprintf "--initrd-inject=%s" ks);
let os_extra =
match os with
| Debian _ | Ubuntu _ -> "auto"
| Fedora n when n >= 34 ->
sprintf "inst.ks=file:/%s" (Filename.basename ks)
| Alma (major, _) ->
(* This is only required because of missing osinfo-db data.
*
* Once this is fixed, do the same as CentOS below.
*)
sprintf "inst.ks=file:/%s inst.repo=/\
almalinux/%d/BaseOS/x86_64/os/"
(Filename.basename ks) major
| RHEL (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 ->
sprintf "inst.ks=file:/%s" (Filename.basename ks)
| Fedora _ | RHEL _ | CentOS _ | CentOSStream _ ->
sprintf "ks=file:/%s" (Filename.basename ks)
| FreeBSD _ | Windows _ -> assert false in
let proxy =
let p = try Some (Sys.getenv "http_proxy") with Not_found -> None in
match p with
| None ->
(match os with
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _
| Ubuntu _ -> ""
| Debian _ -> "mirror/http/proxy="
| FreeBSD _ | Windows _ -> assert false
)
| Some p ->
match os with
| Fedora n when n >= 34 -> sprintf "inst.proxy=" ^ p
| RHEL (n, _)
| Alma (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 ->
"inst.proxy=" ^ p
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _ ->
"proxy=" ^ p
| Debian _ | Ubuntu _ -> "mirror/http/proxy=" ^ p
| FreeBSD _ | Windows _ -> assert false in
add (sprintf "--extra-args=%s %s %s" (* sic: does NOT need to be quoted *)
os_extra proxy (kernel_cmdline_of_os os arch));
(* doesn't need --initrd-inject *)
| FreeBSD _ | Windows _ -> ()
);
add (sprintf "--disk=%s,size=%d,format=raw"
(Sys.getcwd () // tmpout) virtual_size_gb);
(match boot_media with
| Location location -> add (sprintf "--location=%s" location)
| CDRom iso -> add (sprintf "--disk=%s,device=cdrom,boot_order=1" iso)
);
Windows requires one or two extra CDs !
* See : -installation-of-windows-server-2012-on-kvm
* See: -installation-of-windows-server-2012-on-kvm
*)
(match os with
| Windows _ ->
let unattend_iso =
match ks with None -> assert false | Some ks -> ks in
(*add "--disk=/usr/share/virtio-win/virtio-win.iso,device=cdrom,boot_order=98";*)
add (sprintf "--disk=%s,device=cdrom,boot_order=99" unattend_iso)
| _ -> ()
);
add "--serial=pty";
if not (needs_graphics os) then add "--nographics";
(* Return the command line (list of arguments). *)
Array.of_list (List.rev !args)
and print_virt_install_command chan vi =
Array.iter (
fun arg ->
if arg.[0] = '-' then fprintf chan "\\\n %s " (quote arg)
else fprintf chan "%s " (quote arg)
) vi;
fprintf chan "\n\n%!"
The optional [ ? for_fedora ] flag means that we only return
* data as currently supported by the latest version of
* Fedora .
*
* This is because if you try to use [ virt - install --os - variant= ... ]
* with an os - variant which the host does n't support , it wo n't work ,
* and I currently use , so whatever is supported there matters .
* libosinfo data as currently supported by the latest version of
* Fedora.
*
* This is because if you try to use [virt-install --os-variant=...]
* with an os-variant which the host doesn't support, it won't work,
* and I currently use Fedora, so whatever is supported there matters.
*)
and os_variant_of_os ?(for_fedora = false) os arch =
if not for_fedora then (
match os with
| Fedora ver -> sprintf "fedora%d" ver
| Alma (major, _) -> sprintf "almalinux%d" major
| CentOS (major, minor) -> sprintf "centos%d.%d" major minor
| CentOSStream ver -> sprintf "centosstream%d" ver
| RHEL (major, minor) -> sprintf "rhel%d.%d" major minor
| Debian (ver, _) -> sprintf "debian%d" ver
| Ubuntu (ver, _) -> sprintf "ubuntu%s" ver
| FreeBSD (major, minor) -> sprintf "freebsd%d.%d" major minor
| Windows (6, 1, Client) -> "win7"
| Windows (6, 1, Server) -> "win2k8r2"
| Windows (6, 2, Server) -> "win2k12"
| Windows (6, 3, Server) -> "win2k12r2"
| Windows (10, 0, Server) -> "win2k16"
| Windows _ -> assert false
)
else (
match os, arch with
This special case for / ppc64{,le } is needed to work
* around a bug in virt - install :
*
* around a bug in virt-install:
*
*)
| Fedora _, (PPC64|PPC64le) -> "fedora22"
| Fedora ver, _ when ver <= 23 ->
sprintf "fedora%d" ver
max version known in Fedora 34
| Alma (major, _), _ -> sprintf "almalinux%d" major
max version known in Fedora 36
| CentOS (major, minor), _ when (major, minor) <= (7,0) ->
sprintf "centos%d.%d" major minor
max version known in Fedora 31
max version known in Fedora 36
max version known in Fedora 36
| RHEL (6, minor), _ when minor <= 8 ->
sprintf "rhel6.%d" minor
max version known in Fedora 29
| RHEL (7, minor), _ when minor <= 4 ->
sprintf "rhel7.%d" minor
max version known in Fedora 29
max version known in Fedora 36
max version known in Fedora 37
| RHEL (major, minor), _ ->
sprintf "rhel%d.%d" major minor
| Debian (ver, _), _ when ver <= 8 -> sprintf "debian%d" ver
max version known in Fedora 26
| Ubuntu (ver, _), _ -> sprintf "ubuntu%s" ver
| FreeBSD (major, minor), _ -> sprintf "freebsd%d.%d" major minor
| Windows (6, 1, Client), _ -> "win7"
| Windows (6, 1, Server), _ -> "win2k8r2"
| Windows (6, 2, Server), _ -> "win2k12"
| Windows (6, 3, Server), _ -> "win2k12r2"
| Windows (10, 0, Server), _ -> "win2k16"
| Windows _, _ -> assert false
)
and kernel_cmdline_of_os os arch =
match os, arch with
| _, X86_64
| _, I686
| _, S390X ->
"console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"
| _, Aarch64 ->
"console=ttyAMA0 earlyprintk=pl011,0x9000000 ignore_loglevel \
no_timer_check printk.time=1 rd_NO_PLYMOUTH"
| _, Armv7 ->
"console=tty0 console=ttyAMA0,115200 rd_NO_PLYMOUTH"
| (Debian _|Fedora _|Ubuntu _), (PPC64|PPC64le) ->
"console=tty0 console=hvc0 rd_NO_PLYMOUTH"
| (RHEL _ | Alma _ | CentOS _ | CentOSStream _), PPC64
| (RHEL _ | Alma _ | CentOS _ | CentOSStream _), PPC64le ->
"console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"
| FreeBSD _, _ | Windows _, _ -> assert false
and make_postinstall os arch =
match os with
| Debian _ | Ubuntu _ ->
Some (
fun g ->
Remove apt proxy configuration ( thanks : ) .
g#rm_f "/etc/apt/apt.conf";
g#touch "/etc/apt/apt.conf"
)
| RHEL (major, minor) when major >= 5 ->
Some (
fun g ->
RHEL guests require alternate yum configuration pointing to
* Red Hat 's internal servers .
* Red Hat's internal servers.
*)
let yum_conf = make_rhel_yum_conf major minor arch in
g#write "/etc/yum.repos.d/download.devel.redhat.com.repo" yum_conf
)
| RHEL _ | Fedora _ | Alma _ | CentOS _ | CentOSStream _
| FreeBSD _ | Windows _ -> None
and make_rhel_yum_conf major minor arch =
let buf = Buffer.create 4096 in
let bpf fs = bprintf buf fs in
if major <= 9 then (
let baseurl, srpms, optional =
match major, arch with
| 5, (I686|X86_64) ->
let arch = match arch with I686 -> "i386" | _ -> string_of_arch arch in
let topurl =
sprintf "-5-Server/U%d"
minor in
sprintf "%s/%s/os/Server" topurl arch,
sprintf "%s/source/SRPMS" topurl,
None
| 6, (I686|X86_64) ->
let arch = match arch with I686 -> "i386" | _ -> string_of_arch arch in
let topurl =
sprintf "-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl arch,
sprintf "%s/source/SRPMS" topurl,
Some ("Optional",
sprintf "%s/Server/optional/%s/os" arch topurl,
sprintf "%s/Server/optional/source/SRPMS" topurl)
| 7, (X86_64|PPC64|PPC64le|S390X) ->
let topurl =
sprintf "-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server/source/tree" topurl,
Some ("Optional",
sprintf "%s/Server-optional/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server-optional/source/tree" topurl)
| 7, Aarch64 ->
let topurl =
sprintf "-ALT-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server/source/tree" topurl,
Some ("Optional",
sprintf "%s/Server-optional/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server-optional/source/tree" topurl)
| (8|9), arch ->
let topurl =
sprintf "-%d/%d.%d.0"
major major minor in
sprintf "%s/BaseOS/%s/os" topurl (string_of_arch arch),
sprintf "%s/BaseOS/source/tree" topurl,
Some ("AppStream",
sprintf "%s/AppStream/%s/os" topurl (string_of_arch arch),
sprintf "%s/AppStream/source/tree" topurl)
| _ -> assert false in
bpf "\
# Yum configuration pointing to Red Hat servers.
[rhel%d]
name=RHEL %d Server
baseurl=%s
enabled=1
gpgcheck=0
keepcache=0
[rhel%d-source]
name=RHEL %d Server Source
baseurl=%s
enabled=0
gpgcheck=0
keepcache=0
" major major baseurl major major srpms;
(match optional with
| None -> ()
| Some (name, optionalbaseurl, optionalsrpms) ->
let lc_name = String.lowercase_ascii name in
bpf "\
[rhel%d-%s]
name=RHEL %d Server %s
baseurl=%s
enabled=1
gpgcheck=0
keepcache=0
[rhel%d-%s-source]
name=RHEL %d Server %s
baseurl=%s
enabled=0
gpgcheck=0
keepcache=0
" major lc_name major lc_name optionalbaseurl
major lc_name major lc_name optionalsrpms
)
) else (
not implemented for RHEL major > = 10
);
Buffer.contents buf
and make_index_fragment os arch index_fragment output nvram revision
expandfs lvexpandfs virtual_size_gb =
let virtual_size = Int64.of_int virtual_size_gb in
let virtual_size = Int64.mul virtual_size 1024_L in
let virtual_size = Int64.mul virtual_size 1024_L in
let virtual_size = Int64.mul virtual_size 1024_L in
let chan = open_out (index_fragment ^ ".new") in
let fpf fs = fprintf chan fs in
fpf "[%s]\n" (string_of_os_noarch os);
fpf "name=%s\n" (long_name_of_os os arch);
fpf "osinfo=%s\n" (os_variant_of_os os arch);
fpf "arch=%s\n" (string_of_arch arch);
fpf "file=%s\n" output;
(match revision with
| None -> ()
| Some i -> fpf "revision=%d\n" i
);
fpf "checksum[sha512]=%s\n" (sha512sum_of_file output);
fpf "format=raw\n";
fpf "size=%Ld\n" virtual_size;
fpf "compressed_size=%d\n" (size_of_file output);
fpf "expand=%s\n" expandfs;
(match lvexpandfs with
| None -> ()
| Some fs -> fpf "lvexpand=%s\n" fs
);
let notes = notes_of_os os arch nvram in
(match notes with
| first :: notes ->
fpf "notes=%s\n" first;
List.iter (fpf " %s\n") notes
| [] -> assert false
);
fpf "\n";
close_out chan;
let cmd =
sprintf "mv %s %s"
(quote (index_fragment ^ ".new")) (quote index_fragment) in
if Sys.command cmd <> 0 then exit 1
and long_name_of_os os arch =
match os, arch with
| Alma (major, minor), X86_64 ->
sprintf "AlmaLinux %d.%d" major minor
| Alma (major, minor), arch ->
sprintf "AlmaLinux %d.%d (%s)" major minor (string_of_arch arch)
| CentOS (major, minor), X86_64 ->
sprintf "CentOS %d.%d" major minor
| CentOS (major, minor), arch ->
sprintf "CentOS %d.%d (%s)" major minor (string_of_arch arch)
| CentOSStream ver, X86_64 ->
sprintf "CentOS Stream %d" ver
| CentOSStream ver, arch ->
sprintf "CentOS Stream %d (%s)" ver (string_of_arch arch)
| Debian (ver, dist), X86_64 ->
sprintf "Debian %d (%s)" ver dist
| Debian (ver, dist), arch ->
sprintf "Debian %d (%s) (%s)" ver dist (string_of_arch arch)
| Fedora ver, X86_64 ->
sprintf "Fedora® %d Server" ver
| Fedora ver, arch ->
sprintf "Fedora® %d Server (%s)" ver (string_of_arch arch)
| RHEL (major, minor), X86_64 ->
sprintf "Red Hat Enterprise Linux® %d.%d" major minor
| RHEL (major, minor), arch ->
sprintf "Red Hat Enterprise Linux® %d.%d (%s)"
major minor (string_of_arch arch)
| Ubuntu (ver, dist), X86_64 ->
sprintf "Ubuntu %s (%s)" ver dist
| Ubuntu (ver, dist), arch ->
sprintf "Ubuntu %s (%s) (%s)" ver dist (string_of_arch arch)
| FreeBSD (major, minor), X86_64 ->
sprintf "FreeBSD %d.%d" major minor
| FreeBSD (major, minor), arch ->
sprintf "FreeBSD %d.%d (%s)" major minor (string_of_arch arch)
| Windows (6, 1, Client), arch ->
sprintf "Windows 7 (%s)" (string_of_arch arch)
| Windows (6, 1, Server), arch ->
sprintf "Windows Server 2008 R2 (%s)" (string_of_arch arch)
| Windows (6, 2, Server), arch ->
sprintf "Windows Server 2012 (%s)" (string_of_arch arch)
| Windows (6, 3, Server), arch ->
sprintf "Windows Server 2012 R2 (%s)" (string_of_arch arch)
| Windows (10, 0, Server), arch ->
sprintf "Windows Server 2016 (%s)" (string_of_arch arch)
| Windows _, _ -> assert false
and notes_of_os os arch nvram =
let args = ref [] in
let add arg = args := arg :: !args in
add (long_name_of_os os arch);
add "";
(match os with
| Alma _ ->
add "This AlmaLinux image contains only unmodified @Core group packages."
| CentOS _ ->
add "This CentOS image contains only unmodified @Core group packages."
| CentOSStream _ ->
add "This CentOS Stream image contains only unmodified @Core \
group packages."
| Debian _ ->
add "This is a minimal Debian install."
| Fedora _ ->
add "This Fedora image contains only unmodified @Core group packages.";
add "";
add "Fedora and the Infinity design logo are trademarks of Red Hat, Inc.";
add "Source and further information is available from \
/"
| RHEL _ -> assert false (* cannot happen, see caller *)
| Ubuntu _ ->
add "This is a minimal Ubuntu install."
| FreeBSD _ ->
add "This is an all-default FreeBSD install."
| Windows _ ->
add "This is an unattended Windows install.";
add "";
add "You must have an MSDN subscription to use this image."
);
add "";
(* Specific notes for particular versions. *)
let reconfigure_ssh_host_keys_debian () =
add "This image does not contain SSH host keys. To regenerate them use:";
add "";
add " --firstboot-command \"dpkg-reconfigure openssh-server\"";
add "";
in
let fix_serial_console_debian () =
add "The serial console is not working in this image. To enable it, do:";
add "";
add " --edit '/etc/default/grub:";
add " s/^GRUB_CMDLINE_LINUX_DEFAULT=.*/GRUB_CMDLINE_LINUX_DEFAULT=\"console=tty0 console=ttyS0,115200n8\"/' \\";
add " --run-command update-grub";
add ""
in
let builder_account_warning () =
add "IMPORTANT WARNING:";
add "It seems to be impossible to create an Ubuntu >= 14.04 image using";
add "preseed without creating a user account. Therefore this image";
add "contains a user account 'builder'. I have disabled it, so that";
add "people who don't read release notes don't get caught out, but you";
add "might still wish to delete it completely.";
add ""
in
(match os with
| CentOS (6, _) ->
add "‘virt-builder centos-6’ will always install the latest 6.x release.";
add ""
| Debian ((8|9), _) ->
reconfigure_ssh_host_keys_debian ();
| Debian _ ->
add "This image is so very minimal that it only includes an ssh server";
reconfigure_ssh_host_keys_debian ();
| Ubuntu ("16.04", _) ->
builder_account_warning ();
fix_serial_console_debian ();
reconfigure_ssh_host_keys_debian ();
| Ubuntu (ver, _) when ver >= "14.04" ->
builder_account_warning ();
reconfigure_ssh_host_keys_debian ();
| Ubuntu _ ->
reconfigure_ssh_host_keys_debian ();
| _ -> ()
);
(match nvram with
| Some vars ->
add "You will need to use the associated UEFI NVRAM variables file:";
add (sprintf " " vars);
add "";
| None -> ()
);
add "This template was generated by a script in the libguestfs source tree:";
add " builder/templates/make-template.ml";
add "Associated files used to prepare this template can be found in the";
add "same directory.";
List.rev !args
and read_revision filename =
match (try Some (open_in filename) with Sys_error _ -> None) with
| None -> `No_file
| Some chan ->
let r = ref `No_revision in
let rex = Str.regexp "^revision=\\([0-9]+\\)$" in
(try
let rec loop () =
let line = input_line chan in
if Str.string_match rex line 0 then (
r := `Revision (int_of_string (Str.matched_group 1 line));
raise End_of_file
);
loop ()
in
loop ()
with End_of_file -> ()
);
close_in chan;
!r
and sha512sum_of_file filename =
let cmd = sprintf "sha512sum %s | awk '{print $1}'" (quote filename) in
let chan = Unix.open_process_in cmd in
let line = input_line chan in
let pstat = Unix.close_process_in chan in
check_process_status_for_errors pstat;
line
and size_of_file filename = (Unix.stat filename).Unix.st_size
and open_guest ?(mount = false) filename =
let g = new Guestfs.guestfs () in
g#add_drive_opts ~format:"raw" filename;
g#launch ();
let roots = g#inspect_os () in
if Array.length roots = 0 then (
eprintf "%s: cannot inspect this guest - \
this may mean guest installation failed\n" prog;
exit 1
);
if mount then (
let root = roots.(0) in
let mps = g#inspect_get_mountpoints root in
let cmp (a,_) (b,_) = compare (String.length a) (String.length b) in
let mps = List.sort cmp mps in
List.iter (fun (mp, dev) -> g#mount dev mp) mps
);
g
and check_process_status_for_errors = function
| Unix.WEXITED 0 -> ()
| Unix.WEXITED i ->
eprintf "command exited with %d\n%!" i;
exit 1
| Unix.WSIGNALED i ->
eprintf "command killed by signal %d\n%!" i;
exit 1
| Unix.WSTOPPED i ->
eprintf "command stopped by signal %d\n%!" i;
exit 1
and random8 =
let chars = "abcdefghijklmnopqrstuvwxyz0123456789" in
fun () ->
String.concat "" (
List.map (
fun _ ->
let c = Random.int 36 in
let c = chars.[c] in
String.make 1 c
) [1;2;3;4;5;6;7;8]
)
let () = main ()
| null | https://raw.githubusercontent.com/rwmjones/guestfs-tools/57423d907270526ea664ff15601cce956353820e/builder/templates/make-template.ml | ocaml | Ensure that a file is deleted on exit.
Check we are being run from the correct directory.
Check that the ./run script was used.
Check we're not being run as root.
major, minor
major, minor
major
version, dist name like "wheezy"
version number
major, minor
major, minor, variant
virt-install --location (preferred)
downloaded CD-ROM
Choose a disk size for this OS.
Find the boot media. Normally ‘virt-install --location’ but
* for FreeBSD it downloads the boot ISO.
Choose a random temporary disk name.
Create the final output name (actually not quite final because
* we will xz-compress it).
Now construct the virt-install command.
Print the virt-install command just before we run it, because
* this is expected to be long-running.
Save the virt-install command to a file, for documentation.
Run the virt-install command.
Run virt-filesystems, simply to display the filesystems in the image.
Some guests are special flowers that need post-installation
* filesystem changes.
Sparsify and copy to output name.
Move file to final name before compressing.
Compress the output.
Set public readable permissions on the final file.
If there is an existing file, read the revision and increment it.
no existing file
file exists, but no revision line, so revision=1
existing file with revision line
This is what virt-builder called "os-version".
Does virt-sysprep know how to sysprep this OS?
Preseed.
Not automated.
Windows unattend.xml wrapped in an ISO.
Fedora 34+ removes the "install" keyword.
Required as a workaround for CentOS 8.0, see:
* -devel/2019-September/017813.html
* -devel/2019-October/017882.html
cannot happen, see caller
Write out the kickstart file.
Return the kickstart filename.
For Windows:
* -installation-of-windows-server-2012-on-kvm
Return the name of the unattend ISO.
XXX This always points to the latest CentOS, so
* effectively the minor number is always ignored.
Fedora primary architectures.
Fedora secondary architectures.
* By using dl.fedoraproject.org we avoid randomly using mirrors
* which might have incomplete copies.
Windows 2008 R2
If the install is not automated and we need a graphical console.
add "--print-xml";
This is only required because of missing osinfo-db data.
*
* Once this is fixed, do the same as CentOS below.
sic: does NOT need to be quoted
doesn't need --initrd-inject
add "--disk=/usr/share/virtio-win/virtio-win.iso,device=cdrom,boot_order=98";
Return the command line (list of arguments).
cannot happen, see caller
Specific notes for particular versions. | #!/usr/bin/env ocaml
libguestfs
* Copyright ( C ) 2016 - 2023 Red Hat Inc.
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc. , 51 Franklin Street , Fifth Floor , Boston , USA .
* Copyright (C) 2016-2023 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*)
This script is used to create the virt - builder templates hosted
* /
*
* Prior to November 2016 , the templates were generated using
* shell scripts located in libguestfs.git/builder/website .
* /
*
* Prior to November 2016, the templates were generated using
* shell scripts located in libguestfs.git/builder/website.
*)
#load "str.cma";;
#load "unix.cma";;
use globally installed
#load "mlguestfs.cma";;
open Printf
let windows_installers = "/mnt/media/installers/Windows"
let prog = "make-template"
let unlink_on_exit =
let files = ref [] in
at_exit (
fun () -> List.iter (fun f -> try Unix.unlink f with _ -> ()) !files
);
fun file -> files := file :: !files
let () =
if not (Sys.file_exists "debian.preseed") then (
eprintf "%s: run this script from the builder/templates subdirectory\n"
prog;
exit 1
);
(try ignore (Sys.getenv "VIRT_BUILDER_DIRS")
with Not_found ->
eprintf "%s: you must use `../../run ./make-template.ml ...' \
to run this script\n"
prog;
exit 1
);
if Unix.geteuid () = 0 then (
eprintf "%s: don't run this script as root\n" prog;
exit 1
);
... and that LIBVIRT_DEFAULT_URI = qemu is NOT set ,
* which is the same as above .
* which is the same as above.
*)
let s = try Sys.getenv "LIBVIRT_DEFAULT_URI" with Not_found -> "" in
if s = "qemu" then (
eprintf "%s: don't set LIBVIRT_DEFAULT_URI=qemu\n" prog;
exit 1
)
;;
type os =
| RHEL of int * int
| Ubuntu of string * string
and windows_variant = Client | Server
type arch = X86_64 | Aarch64 | Armv7 | I686 | PPC64 | PPC64le | S390X
type boot_media =
let quote = Filename.quote
let (//) = Filename.concat
let rec main () =
assert (Sys.word_size = 64);
Random.self_init ();
Parse the command line .
let os, arch = parse_cmdline () in
let virtual_size_gb = get_virtual_size_gb os arch in
For OSes which require a kickstart , this generates one .
* For OSes which require a preseed file , this returns one ( we
* do n't generate preseed files at the moment ) .
* For Windows this returns an unattend file in an ISO .
* For OSes which can not be automated ( FreeBSD ) , this returns None .
* For OSes which require a preseed file, this returns one (we
* don't generate preseed files at the moment).
* For Windows this returns an unattend file in an ISO.
* For OSes which cannot be automated (FreeBSD), this returns None.
*)
let ks = make_kickstart os arch in
let boot_media = make_boot_media os arch in
Choose a random temporary name for the libvirt domain .
let tmpname = sprintf "tmp-%s" (random8 ()) in
let tmpout = sprintf "%s.img" tmpname in
unlink_on_exit tmpout;
let output = filename_of_os os arch "" in
Some architectures need EFI boot .
let tmpefivars =
if needs_uefi os arch then (
let code, vars =
match arch with
| X86_64 ->
"/usr/share/edk2/ovmf/OVMF_CODE.fd",
"/usr/share/edk2/ovmf/OVMF_VARS.fd"
| Aarch64 ->
"/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw",
"/usr/share/edk2/aarch64/vars-template-pflash.raw"
| Armv7 ->
"/usr/share/edk2/arm/QEMU_EFI-pflash.raw",
"/usr/share/edk2/arm/vars-template-pflash.raw"
| _ -> assert false in
let vars_out = Sys.getcwd () // sprintf "%s.vars" tmpname in
unlink_on_exit vars_out;
let cmd = sprintf "cp %s %s" (quote vars) (quote vars_out) in
if Sys.command cmd <> 0 then exit 1;
Some (code, vars_out)
)
else None in
let vi = make_virt_install_command os arch ks tmpname tmpout tmpefivars
boot_media virtual_size_gb in
print_virt_install_command stdout vi;
let chan = open_out (filename_of_os os arch ".virt-install-cmd") in
fprintf chan "# This is the virt-install command which was used to create\n";
fprintf chan "# the virt-builder template '%s'\n" (string_of_os os arch);
fprintf chan "# NB: This file is generated for documentation \
purposes ONLY!\n";
fprintf chan "# This script was never run, and is not intended to be run.\n";
fprintf chan "\n";
print_virt_install_command chan vi;
close_out chan;
Print the virt - install notes for OSes which can not be automated
* fully . ( These are different from the ‘ notes= ’ section in the
* index fragment ) .
* fully. (These are different from the ‘notes=’ section in the
* index fragment).
*)
print_install_notes os;
printf "\n\n%!";
let pid = Unix.fork () in
if pid = 0 then Unix.execvp "virt-install" vi;
let _, pstat = Unix.waitpid [] pid in
check_process_status_for_errors pstat;
If there were NVRAM variables , move them to the final name and
* compress them . Doing this operation later means the cleanup of
* the guest will remove them as well ( because of --nvram ) .
* compress them. Doing this operation later means the cleanup of
* the guest will remove them as well (because of --nvram).
*)
let nvram =
match tmpefivars with
| Some (_, vars) ->
let f = sprintf "%s-nvram" output in
let cmd = sprintf "mv %s %s" (quote vars) (quote f) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "xz -f --best %s" (quote f) in
if Sys.command cmd <> 0 then exit 1;
Some (f ^ ".xz")
| None -> None in
ignore (Sys.command "sync");
let cmd = sprintf "virt-filesystems -a %s --all --long -h" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1;
let postinstall = make_postinstall os arch in
Get the root filesystem . If the root filesystem is then
* get the partition containing it .
* get the partition containing it.
*)
let g = open_guest ~mount:(postinstall <> None) tmpout in
let roots = g#inspect_get_roots () in
let expandfs, lvexpandfs =
let rootfs = g#canonical_device_name roots.(0) in
if String.length rootfs >= 7 && String.sub rootfs 0 7 = "/dev/sd" then
non - LVM case
else (
The case , find the containing partition to expand .
let pvs = Array.to_list (g#pvs ()) in
match pvs with
| [pv] ->
let pv = g#canonical_device_name pv in
assert (String.length pv >= 7 && String.sub pv 0 7 = "/dev/sd");
pv, Some rootfs
| [] | _::_::_ -> assert false
) in
(match postinstall with
| None -> ()
| Some f -> f g
);
g#shutdown ();
g#close ();
(match os with
| Ubuntu (ver, _) when ver >= "14.04" ->
In Ubuntu > = 14.04 you ca n't complete the install without creating
* a user account . We create one called ' builder ' , but we also
* disable it . XXX Combine with virt - sysprep step .
* a user account. We create one called 'builder', but we also
* disable it. XXX Combine with virt-sysprep step.
*)
let cmd =
sprintf "virt-customize -a %s --password builder:disabled"
(quote tmpout) in
if Sys.command cmd <> 0 then exit 1
| _ -> ()
);
if can_sysprep_os os then (
Sysprep . - using guests .
printf "Sysprepping ...\n%!";
let cmd = sprintf "virt-sysprep --quiet -a %s" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1
);
printf "Sparsifying ...\n%!";
let cmd =
sprintf "virt-sparsify --inplace --quiet %s" (quote tmpout) in
if Sys.command cmd <> 0 then exit 1;
let cmd =
sprintf "mv %s %s" (quote tmpout) (quote output) in
if Sys.command cmd <> 0 then exit 1;
printf "Compressing ...\n%!";
let cmd =
sprintf "xz -f --best --block-size=16777216 %s" (quote output) in
if Sys.command cmd <> 0 then exit 1;
let output = output ^ ".xz" in
let cmd = sprintf "chmod 0644 %s" (quote output) in
if Sys.command cmd <> 0 then exit 1;
printf "Template completed: %s\n%!" output;
Construct the index fragment , but do n't create this for the private
* RHEL images .
* RHEL images.
*)
(match os with
| RHEL _ -> ()
| _ ->
let index_fragment = filename_of_os os arch ".index-fragment" in
let revision = read_revision index_fragment in
let revision =
match revision with
| `No_file -> None
| `No_revision -> Some 2
| `Revision i -> Some (i+1) in
make_index_fragment os arch index_fragment output nvram revision
expandfs lvexpandfs virtual_size_gb;
Validate the fragment we have just created .
let cmd = sprintf "virt-index-validate %s" (quote index_fragment) in
if Sys.command cmd <> 0 then exit 1;
printf "Index fragment created: %s\n" index_fragment
);
printf "Finished successfully.\n%!"
and parse_cmdline () =
let anon = ref [] in
let usage = "\
../../run ./make-template.ml [--options] os version [arch]
Usage:
../../run ./make-template.ml [--options] os version [arch]
Examples:
../../run ./make-template.ml fedora 25
../../run ./make-template.ml rhel 7.3 ppc64le
The arch defaults to x86_64. Note that i686 is treated as a
separate arch.
Options:
" in
let spec = Arg.align [
] in
Arg.parse spec (fun s -> anon := s :: !anon) usage;
let os, ver, arch =
match List.rev !anon with
| [os; ver] -> os, ver, "x86_64"
| [os; ver; arch] -> os, ver, arch
| _ ->
eprintf "%s [--options] os version [arch]\n" prog;
exit 1 in
let os = os_of_string os ver
and arch = arch_of_string arch in
os, arch
and os_of_string os ver =
match os, ver with
| "alma", ver -> let maj, min = parse_major_minor ver in Alma (maj, min)
| "centos", ver -> let maj, min = parse_major_minor ver in CentOS (maj, min)
| "centosstream", ver -> CentOSStream(int_of_string ver)
| "rhel", ver -> let maj, min = parse_major_minor ver in RHEL (maj, min)
| "debian", "6" -> Debian (6, "squeeze")
| "debian", "7" -> Debian (7, "wheezy")
| "debian", "8" -> Debian (8, "jessie")
| "debian", "9" -> Debian (9, "stretch")
| "debian", "10" -> Debian (10, "buster")
| "debian", "11" -> Debian (11, "bullseye")
| "ubuntu", "10.04" -> Ubuntu (ver, "lucid")
| "ubuntu", "12.04" -> Ubuntu (ver, "precise")
| "ubuntu", "14.04" -> Ubuntu (ver, "trusty")
| "ubuntu", "16.04" -> Ubuntu (ver, "xenial")
| "ubuntu", "18.04" -> Ubuntu (ver, "bionic")
| "ubuntu", "20.04" -> Ubuntu (ver, "focal")
| "ubuntu", "22.04" -> Ubuntu (ver, "jammy")
| "fedora", ver -> Fedora (int_of_string ver)
| "freebsd", ver -> let maj, min = parse_major_minor ver in FreeBSD (maj, min)
| "windows", ver -> parse_windows_version ver
| _ ->
eprintf "%s: unknown or unsupported OS (%s, %s)\n" prog os ver; exit 1
and parse_major_minor ver =
let rex = Str.regexp "^\\([0-9]+\\)\\.\\([0-9]+\\)$" in
if Str.string_match rex ver 0 then (
int_of_string (Str.matched_group 1 ver),
int_of_string (Str.matched_group 2 ver)
)
else (
eprintf "%s: cannot parse major.minor (%s)\n" prog ver;
exit 1
)
and parse_windows_version = function
| "7" -> Windows (6, 1, Client)
| "2k8r2" -> Windows (6, 1, Server)
| "2k12" -> Windows (6, 2, Server)
| "2k12r2" -> Windows (6, 3, Server)
| "2k16" -> Windows (10, 0, Server)
| _ ->
eprintf "%s: cannot parse Windows version, see ‘parse_windows_version’\n"
prog;
exit 1
and arch_of_string = function
| "x86_64" -> X86_64
| "aarch64" -> Aarch64
| "armv7l" -> Armv7
| "i686" -> I686
| "ppc64" -> PPC64
| "ppc64le" -> PPC64le
| "s390x" -> S390X
| s ->
eprintf "%s: unknown or unsupported arch (%s)\n" prog s; exit 1
and string_of_arch = function
| X86_64 -> "x86_64"
| Aarch64 -> "aarch64"
| Armv7 -> "armv7l"
| I686 -> "i686"
| PPC64 -> "ppc64"
| PPC64le -> "ppc64le"
| S390X -> "s390x"
and debian_arch_of_arch = function
| X86_64 -> "amd64"
| Aarch64 -> "arm64"
| Armv7 -> "armhf"
| I686 -> "i386"
| PPC64 -> "ppc64"
| PPC64le -> "ppc64el"
| S390X -> "s390x"
and filename_of_os os arch ext =
match os with
| Fedora ver ->
if arch = X86_64 then sprintf "fedora-%d%s" ver ext
else sprintf "fedora-%d-%s%s" ver (string_of_arch arch) ext
| Alma (major, minor) ->
if arch = X86_64 then sprintf "alma-%d.%d%s" major minor ext
else sprintf "alma-%d.%d-%s%s" major minor (string_of_arch arch) ext
| CentOS (major, minor) ->
if arch = X86_64 then sprintf "centos-%d.%d%s" major minor ext
else sprintf "centos-%d.%d-%s%s" major minor (string_of_arch arch) ext
| CentOSStream ver ->
if arch = X86_64 then sprintf "centosstream-%d%s" ver ext
else sprintf "centosstream-%d-%s%s" ver (string_of_arch arch) ext
| RHEL (major, minor) ->
if arch = X86_64 then sprintf "rhel-%d.%d%s" major minor ext
else sprintf "rhel-%d.%d-%s%s" major minor (string_of_arch arch) ext
| Debian (ver, _) ->
if arch = X86_64 then sprintf "debian-%d%s" ver ext
else sprintf "debian-%d-%s%s" ver (string_of_arch arch) ext
| Ubuntu (ver, _) ->
if arch = X86_64 then sprintf "ubuntu-%s%s" ver ext
else sprintf "ubuntu-%s-%s%s" ver (string_of_arch arch) ext
| FreeBSD (major, minor) ->
if arch = X86_64 then sprintf "freebsd-%d.%d%s" major minor ext
else sprintf "freebsd-%d.%d-%s%s" major minor (string_of_arch arch) ext
| Windows (major, minor, Client) ->
if arch = X86_64 then sprintf "windows-%d.%d-client%s" major minor ext
else sprintf "windows-%d.%d-client-%s%s"
major minor (string_of_arch arch) ext
| Windows (major, minor, Server) ->
if arch = X86_64 then sprintf "windows-%d.%d-server%s" major minor ext
else sprintf "windows-%d.%d-server-%s%s"
major minor (string_of_arch arch) ext
and string_of_os os arch = filename_of_os os arch ""
and string_of_os_noarch = function
| Fedora ver -> sprintf "fedora-%d" ver
| Alma (major, minor) -> sprintf "alma-%d.%d" major minor
| CentOS (major, minor) -> sprintf "centos-%d.%d" major minor
| CentOSStream ver -> sprintf "centosstream-%d" ver
| RHEL (major, minor) -> sprintf "rhel-%d.%d" major minor
| Debian (ver, _) -> sprintf "debian-%d" ver
| Ubuntu (ver, _) -> sprintf "ubuntu-%s" ver
| FreeBSD (major, minor) -> sprintf "freebsd-%d.%d" major minor
| Windows (major, minor, Client) -> sprintf "windows-%d.%d-client" major minor
| Windows (major, minor, Server) -> sprintf "windows-%d.%d-server" major minor
and can_sysprep_os = function
| RHEL _ | Alma _ | CentOS _ | CentOSStream _ | Fedora _
| Debian _ | Ubuntu _ -> true
| FreeBSD _ | Windows _ -> false
and needs_uefi os arch =
match os, arch with
| Fedora _, Armv7
| Fedora _, Aarch64
| RHEL _, Aarch64 -> true
| RHEL _, _ | Alma _, _ | CentOS _, _ | CentOSStream _, _ | Fedora _, _
| Debian _, _ | Ubuntu _, _
| FreeBSD _, _ | Windows _, _ -> false
and get_virtual_size_gb os arch =
match os with
| RHEL _ | Alma _ | CentOS _ | CentOSStream _ | Fedora _
| Debian _ | Ubuntu _
| FreeBSD _ -> 6
Windows 10
Windows from 2008 - 2012
Windows < = 2003
| Windows _ -> assert false
and make_kickstart os arch =
match os with
Kickstart .
| Fedora _ | Alma _ | CentOS _ | CentOSStream _ | RHEL _ ->
let ks_filename = filename_of_os os arch ".ks" in
Some (make_kickstart_common ks_filename os arch)
| Debian _ -> Some (copy_preseed_to_temporary "debian.preseed")
| Ubuntu _ -> Some (copy_preseed_to_temporary "ubuntu.preseed")
| FreeBSD _ -> None
| Windows _ -> Some (make_unattend_iso os arch)
and make_kickstart_common ks_filename os arch =
let buf = Buffer.create 4096 in
let bpf fs = bprintf buf fs in
bpf "\
# Kickstart file for %s
# Generated by libguestfs.git/builder/templates/make-template.ml
" (string_of_os os arch);
(match os with
| Fedora n when n >= 34 -> ()
| RHEL (n, _)
| Alma (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 -> ()
| _ -> bpf "install\n";
);
bpf "\
text
reboot
lang en_US.UTF-8
keyboard us
network --bootproto dhcp
rootpw builder
firewall --enabled --ssh
timezone --utc America/New_York
";
(match os with
| RHEL (ver, _) when ver <= 4 ->
bpf "\
langsupport en_US
mouse generic
";
| _ -> ()
);
(match os with
| RHEL (3, _) -> ()
| _ ->
bpf "selinux --enforcing\n"
);
(match os with
| RHEL (5, _) -> bpf "key --skip\n"
| _ -> ()
);
bpf "\n";
bpf "bootloader --location=mbr --append=\"%s\"\n"
(kernel_cmdline_of_os os arch);
bpf "\n";
(match os with
| CentOS (8, _) ->
bpf "url --url=\"/\"\n"
| _ -> ()
);
bpf "\n";
(match os with
| CentOS ((3|4|5|6) as major, _) | RHEL ((3|4|5|6) as major, _) ->
let bootfs = if major <= 5 then "ext2" else "ext4" in
let rootfs = if major <= 4 then "ext3" else "ext4" in
bpf "\
zerombr
clearpart --all --initlabel
part /boot --fstype=%s --size=512 --asprimary
part swap --size=1024 --asprimary
part / --fstype=%s --size=1024 --grow --asprimary
" bootfs rootfs;
| Alma _ | CentOS _ | CentOSStream _ | RHEL _ | Fedora _ ->
bpf "\
zerombr
clearpart --all --initlabel --disklabel=gpt
autopart --type=plain
";
);
bpf "\n";
(match os with
| RHEL (3, _) -> ()
| _ ->
bpf "\
# Halt the system once configuration has finished.
poweroff
";
);
bpf "\n";
bpf "\
%%packages
@core
";
(match os with
| RHEL ((3|4|5), _) -> ()
| _ ->
bpf "%%end\n"
);
bpf "\n";
Generate the % post script section . The previous scripts did
* many different things here . The current script tries to update
* the packages and enable Xen drivers only .
* many different things here. The current script tries to update
* the packages and enable Xen drivers only.
*)
let regenerate_dracut () =
bpf "\
# To make dracut config changes permanent, we need to rerun dracut.
# Rerun dracut for the installed kernel (not the running kernel).
# See commit 0fa52e4e45d80874bc5ea5f112f74be1d3f3472f and
# -June/thread.html#00045
KERNEL_VERSION=\"$(rpm -q kernel --qf '%%{version}-%%{release}.%%{arch}\\n' |
sort -V | tail -1)\"
dracut -f /boot/initramfs-$KERNEL_VERSION.img $KERNEL_VERSION
"
in
(match os with
| Fedora _ ->
bpf "%%post\n";
bpf "\
# Ensure the installation is up-to-date.
dnf -y --best upgrade
# This required otherwise the kernel will not be bootable, see
#
# #c24
grub2-mkconfig -o %s
" (quote
(if needs_uefi os arch then "/etc/grub2-efi.cfg"
else "/etc/grub2.cfg"));
let needs_regenerate_dracut = ref false in
if arch = X86_64 then (
bpf "\
# Enable Xen domU support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" xen:vbd xen:vif \"' > virt-builder-xen-drivers.conf
popd
";
needs_regenerate_dracut := true
);
if arch = PPC64 || arch = PPC64le then (
bpf "\
# Enable virtio-scsi support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" virtio-blk virtio-scsi \"' > virt-builder-virtio-scsi.conf
popd
";
needs_regenerate_dracut := true
);
if !needs_regenerate_dracut then regenerate_dracut ();
bpf "%%end\n\n"
| RHEL (7,_) ->
bpf "%%post\n";
let needs_regenerate_dracut = ref false in
if arch = PPC64 || arch = PPC64le then (
bpf "\
# Enable virtio-scsi support.
pushd /etc/dracut.conf.d
echo 'add_drivers+=\" virtio-blk virtio-scsi \"' > virt-builder-virtio-scsi.conf
popd
";
needs_regenerate_dracut := true
);
if !needs_regenerate_dracut then regenerate_dracut ();
bpf "%%end\n\n"
| _ -> ()
);
bpf "# EOF\n";
let chan = open_out (ks_filename ^ ".new") in
Buffer.output_buffer chan buf;
close_out chan;
let cmd =
sprintf "mv %s %s" (quote (ks_filename ^ ".new")) (quote ks_filename) in
if Sys.command cmd <> 0 then exit 1;
ks_filename
and copy_preseed_to_temporary source =
d - i only works if the file is literally called " /preseed.cfg "
let d = Filename.get_temp_dir_name () // random8 () ^ ".tmp" in
let f = d // "preseed.cfg" in
Unix.mkdir d 0o700;
let cmd = sprintf "cp %s %s" (quote source) (quote f) in
if Sys.command cmd <> 0 then exit 1;
f
and make_unattend_iso os arch =
printf "enter Windows product key: ";
let product_key = read_line () in
let output_iso =
Sys.getcwd () // filename_of_os os arch "-unattend.iso" in
unlink_on_exit output_iso;
let d = Filename.get_temp_dir_name () // random8 () in
Unix.mkdir d 0o700;
let config_dir = d // "config" in
Unix.mkdir config_dir 0o700;
let f = config_dir // "autounattend.xml" in
let chan = open_out f in
let arch =
match arch with
| X86_64 -> "amd64"
| I686 -> "x86"
| _ ->
eprintf "%s: Windows architecture %s not supported\n"
prog (string_of_arch arch);
exit 1 in
Tip : If the install fails with a useless error " The answer file is
* invalid " , type Shift + F10 into the setup screen and look for a
* file called \Windows\Panther\Setupact.log ( NB :
* not \Windows\Setupact.log )
* invalid", type Shift + F10 into the setup screen and look for a
* file called \Windows\Panther\Setupact.log (NB:
* not \Windows\Setupact.log)
*)
fprintf chan "
<unattend xmlns=\"urn:schemas-microsoft-com:unattend\"
xmlns:ms=\"urn:schemas-microsoft-com:asm.v3\"
xmlns:wcm=\"\">
<settings pass=\"windowsPE\">
<component name=\"Microsoft-Windows-Setup\"
publicKeyToken=\"31bf3856ad364e35\"
language=\"neutral\"
versionScope=\"nonSxS\"
processorArchitecture=\"%s\">
<UserData>
<AcceptEula>true</AcceptEula>
<ProductKey>
<Key>%s</Key>
<WillShowUI>OnError</WillShowUI>
</ProductKey>
</UserData>
<DiskConfiguration>
<Disk wcm:action=\"add\">
<DiskID>0</DiskID>
<WillWipeDisk>true</WillWipeDisk>
<CreatePartitions>
<!-- System partition -->
<CreatePartition wcm:action=\"add\">
<Order>1</Order>
<Type>Primary</Type>
<Size>300</Size>
</CreatePartition>
<!-- Windows partition -->
<CreatePartition wcm:action=\"add\">
<Order>2</Order>
<Type>Primary</Type>
<Extend>true</Extend>
</CreatePartition>
</CreatePartitions>
<ModifyPartitions>
<!-- System partition -->
<ModifyPartition wcm:action=\"add\">
<Order>1</Order>
<PartitionID>1</PartitionID>
<Label>System</Label>
<Format>NTFS</Format>
<Active>true</Active>
</ModifyPartition>
<!-- Windows partition -->
<ModifyPartition wcm:action=\"add\">
<Order>2</Order>
<PartitionID>2</PartitionID>
<Label>Windows</Label>
<Letter>C</Letter>
<Format>NTFS</Format>
</ModifyPartition>
</ModifyPartitions>
</Disk>
<WillShowUI>OnError</WillShowUI>
</DiskConfiguration>
<ImageInstall>
<OSImage>
<WillShowUI>Never</WillShowUI>
<InstallFrom>
<MetaData>
<Key>/IMAGE/INDEX</Key>
<Value>1</Value>
</MetaData>
</InstallFrom>
<InstallTo>
<DiskID>0</DiskID>
<PartitionID>2</PartitionID>
</InstallTo>
</OSImage>
</ImageInstall>
</component>
<component name=\"Microsoft-Windows-International-Core-WinPE\"
publicKeyToken=\"31bf3856ad364e35\"
language=\"neutral\"
versionScope=\"nonSxS\"
processorArchitecture=\"%s\">
<SetupUILanguage>
<UILanguage>en-US</UILanguage>
</SetupUILanguage>
<SystemLocale>en-US</SystemLocale>
<UILanguage>en-US</UILanguage>
<UserLocale>en-US</UserLocale>
</component>
</settings>
</unattend>"
arch product_key arch;
close_out chan;
let cmd = sprintf "cd %s && mkisofs -o %s -J -r config"
(quote d) (quote output_iso) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "rm -rf %s" (quote d) in
if Sys.command cmd <> 0 then exit 1;
output_iso
and make_boot_media os arch =
match os, arch with
| Alma (major, minor), X86_64 ->
UK mirror
Location (sprintf "/\
%d.%d/BaseOS/x86_64/kickstart/"
major minor)
| CentOS (major, _), Aarch64 ->
Location (sprintf "/"
major)
| CentOS (7, _), X86_64 ->
For 6.x we rebuild this every time there is a new 6.x release , and bump
* the revision in the index .
* For 7.x this always points to the latest CentOS , so
* effectively the minor number is always ignored .
* the revision in the index.
* For 7.x this always points to the latest CentOS, so
* effectively the minor number is always ignored.
*)
Location "-7/7/os/x86_64/"
| CentOS (8, _), X86_64 ->
This is probably the last CentOS 8 release .
Location "/"
| CentOSStream 8, X86_64 ->
Location (sprintf "-stream/BaseOS/\
x86_64/os")
| CentOSStream ver, X86_64 ->
Location (sprintf "-stream/BaseOS/\
x86_64/os" ver)
| Debian (_, dist), arch ->
Location (sprintf "-%s"
dist (debian_arch_of_arch arch))
| Fedora ver, Armv7 ->
Location (sprintf "/\
%d/Server/armhfp/os/" ver)
| Fedora ver, X86_64 when ver < 21 ->
Location (sprintf "/\
releases/%d/Fedora/x86_64/os/" ver)
| Fedora ver, X86_64 ->
Location (sprintf "/\
%d/Server/x86_64/os/" ver)
| Fedora ver, Aarch64 ->
Location (sprintf "/\
%d/Server/aarch64/os/" ver)
| Fedora ver, I686 ->
Location (sprintf "-secondary/\
releases/%d/Server/i386/os/" ver)
| Fedora ver, PPC64 ->
Location (sprintf "-secondary/\
releases/%d/Server/ppc64/os/" ver)
| Fedora ver, PPC64le ->
Location (sprintf "-secondary/\
releases/%d/Server/ppc64le/os/" ver)
| Fedora ver, S390X ->
Location (sprintf "-secondary/\
releases/%d/Server/s390x/os/" ver)
| RHEL (3, minor), X86_64 ->
Location (sprintf "-3/\
U%d/AS/x86_64/tree" minor)
| RHEL (4, minor), X86_64 ->
Location (sprintf "-4/\
U%d/AS/x86_64/tree" minor)
| RHEL (5, minor), I686 ->
Location (sprintf "/\
RHEL-5-Server/U%d/i386/os" minor)
| RHEL (5, minor), X86_64 ->
Location (sprintf "/\
RHEL-5-Server/U%d/x86_64/os" minor)
| RHEL (6, minor), I686 ->
Location (sprintf "/\
RHEL-6/6.%d/Server/i386/os" minor)
| RHEL (6, minor), X86_64 ->
Location (sprintf "/\
RHEL-6/6.%d/Server/x86_64/os" minor)
| RHEL (7, minor), X86_64 ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/x86_64/os" minor)
| RHEL (7, minor), PPC64 ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/ppc64/os" minor)
| RHEL (7, minor), PPC64le ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/ppc64le/os" minor)
| RHEL (7, minor), S390X ->
Location (sprintf "/\
rhel-6-7-8/rhel-7/RHEL-7/7.%d/Server/s390x/os" minor)
| RHEL (7, minor), Aarch64 ->
Location (sprintf "/\
RHEL-ALT-7/7.%d/Server/aarch64/os" minor)
| RHEL (8, minor), arch ->
Location (sprintf "/\
rhel-6-7-8/rhel-8/RHEL-8/8.%d.0/BaseOS/%s/os"
minor (string_of_arch arch))
| RHEL (9, minor), arch ->
Location (sprintf "/\
RHEL-9/9.%d.0/BaseOS/%s/os" minor (string_of_arch arch))
| Ubuntu (_, dist), X86_64 ->
Location (sprintf "/\
%s/main/installer-amd64" dist)
| Ubuntu (_, dist), PPC64le ->
Location (sprintf "-ports/dists/\
%s/main/installer-ppc64el" dist)
| FreeBSD (major, minor), X86_64 ->
let iso = sprintf "FreeBSD-%d.%d-RELEASE-amd64-disc1.iso"
major minor in
let iso_xz = sprintf "ftp/\
amd64/amd64/ISO-IMAGES/%d.%d/%s.xz"
major minor iso in
let cmd = sprintf "wget -nc %s" (quote iso_xz) in
if Sys.command cmd <> 0 then exit 1;
let cmd = sprintf "unxz -f --keep %s.xz" iso in
if Sys.command cmd <> 0 then exit 1;
CDRom iso
| Windows (major, minor, variant), arch ->
let iso_name =
match major, minor, variant, arch with
Windows 7
"en_windows_7_ultimate_with_sp1_x64_dvd_u_677332.iso"
"en_windows_server_2008_r2_with_sp1_x64_dvd_617601.iso"
Windows Server 2012
"en_windows_server_2012_x64_dvd_915478.iso"
Windows Server 2012 R2
"en_windows_server_2012_r2_with_update_x64_dvd_6052708.iso"
Windows Server 2016
"en_windows_server_2016_updated_feb_2018_x64_dvd_11636692.iso"
| _ ->
eprintf "%s: don't have an installer ISO for this version of \
Windows\n"
prog;
exit 1 in
CDRom (windows_installers // iso_name)
| _ ->
eprintf "%s: don't know how to calculate the --location for this OS \
and architecture\n" prog;
exit 1
and print_install_notes = function
| Ubuntu _ ->
printf "\
Some preseed functions are not automated. You may need to hit [Return]
a few times during the install.\n"
| FreeBSD _ ->
printf "\
The FreeBSD install is not automated. Select all defaults, except:
- root password: builder
- timezone: UTC
- do not add any user accounts\n"
| _ -> ()
and needs_graphics = function
| Alma _ | CentOS _ | CentOSStream _ | RHEL _
| Debian _ | Ubuntu _ | Fedora _ -> false
| FreeBSD _ | Windows _ -> true
NB : Arguments do not need to be quoted , because we pass them
* directly to exec(2 ) .
* directly to exec(2).
*)
and make_virt_install_command os arch ks tmpname tmpout tmpefivars
boot_media virtual_size_gb =
let args = ref [] in
let add arg = args := arg :: !args in
add "virt-install";
This ensures the libvirt domain will be automatically deleted
* when virt - install exits . However it does n't work for certain
* types of guest .
* when virt-install exits. However it doesn't work for certain
* types of guest.
*)
(match os with
| Windows _ ->
printf "after Windows has installed, do:\n";
printf " virsh shutdown %s\n virsh undefine %s\n%!" tmpname tmpname;
| _ -> add "--transient"
);
Do n't try relabelling everything . This is particularly necessary
* for the Windows install ISOs which are located on NFS .
* for the Windows install ISOs which are located on NFS.
*)
(match os with
| Windows _ -> add "--security=type=none"
| _ -> ()
);
add (sprintf "--name=%s" tmpname);
add "--ram=4096";
(match arch with
| X86_64 ->
add "--arch=x86_64";
add "--cpu=host";
add "--vcpus=4"
| PPC64 ->
add "--arch=ppc64";
add "--machine=pseries";
add "--cpu=power7";
add "--vcpus=1"
| PPC64le ->
add "--arch=ppc64le";
add "--machine=pseries";
add "--cpu=power8";
add "--vcpus=1"
| Armv7 ->
add "--arch=armv7l";
RHBZ#1633328 ,
add "--vcpus=1"
| arch ->
add (sprintf "--arch=%s" (string_of_arch arch));
add "--vcpus=1"
);
add (sprintf "--os-variant=%s" (os_variant_of_os ~for_fedora:true os arch));
(match tmpefivars with
| Some (code, vars) ->
add "--boot";
add (sprintf "loader=%s,loader_ro=yes,loader_type=pflash,nvram=%s"
code vars)
| _ -> ()
);
--initrd - inject and --extra - args flags for Linux only .
(match os with
| Debian _ | Ubuntu _
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _ ->
let ks =
match ks with None -> assert false | Some ks -> ks in
add (sprintf "--initrd-inject=%s" ks);
let os_extra =
match os with
| Debian _ | Ubuntu _ -> "auto"
| Fedora n when n >= 34 ->
sprintf "inst.ks=file:/%s" (Filename.basename ks)
| Alma (major, _) ->
sprintf "inst.ks=file:/%s inst.repo=/\
almalinux/%d/BaseOS/x86_64/os/"
(Filename.basename ks) major
| RHEL (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 ->
sprintf "inst.ks=file:/%s" (Filename.basename ks)
| Fedora _ | RHEL _ | CentOS _ | CentOSStream _ ->
sprintf "ks=file:/%s" (Filename.basename ks)
| FreeBSD _ | Windows _ -> assert false in
let proxy =
let p = try Some (Sys.getenv "http_proxy") with Not_found -> None in
match p with
| None ->
(match os with
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _
| Ubuntu _ -> ""
| Debian _ -> "mirror/http/proxy="
| FreeBSD _ | Windows _ -> assert false
)
| Some p ->
match os with
| Fedora n when n >= 34 -> sprintf "inst.proxy=" ^ p
| RHEL (n, _)
| Alma (n, _) | CentOS (n, _) | CentOSStream n when n >= 9 ->
"inst.proxy=" ^ p
| Fedora _ | RHEL _ | Alma _ | CentOS _ | CentOSStream _ ->
"proxy=" ^ p
| Debian _ | Ubuntu _ -> "mirror/http/proxy=" ^ p
| FreeBSD _ | Windows _ -> assert false in
os_extra proxy (kernel_cmdline_of_os os arch));
| FreeBSD _ | Windows _ -> ()
);
add (sprintf "--disk=%s,size=%d,format=raw"
(Sys.getcwd () // tmpout) virtual_size_gb);
(match boot_media with
| Location location -> add (sprintf "--location=%s" location)
| CDRom iso -> add (sprintf "--disk=%s,device=cdrom,boot_order=1" iso)
);
Windows requires one or two extra CDs !
* See : -installation-of-windows-server-2012-on-kvm
* See: -installation-of-windows-server-2012-on-kvm
*)
(match os with
| Windows _ ->
let unattend_iso =
match ks with None -> assert false | Some ks -> ks in
add (sprintf "--disk=%s,device=cdrom,boot_order=99" unattend_iso)
| _ -> ()
);
add "--serial=pty";
if not (needs_graphics os) then add "--nographics";
Array.of_list (List.rev !args)
and print_virt_install_command chan vi =
Array.iter (
fun arg ->
if arg.[0] = '-' then fprintf chan "\\\n %s " (quote arg)
else fprintf chan "%s " (quote arg)
) vi;
fprintf chan "\n\n%!"
The optional [ ? for_fedora ] flag means that we only return
* data as currently supported by the latest version of
* Fedora .
*
* This is because if you try to use [ virt - install --os - variant= ... ]
* with an os - variant which the host does n't support , it wo n't work ,
* and I currently use , so whatever is supported there matters .
* libosinfo data as currently supported by the latest version of
* Fedora.
*
* This is because if you try to use [virt-install --os-variant=...]
* with an os-variant which the host doesn't support, it won't work,
* and I currently use Fedora, so whatever is supported there matters.
*)
and os_variant_of_os ?(for_fedora = false) os arch =
if not for_fedora then (
match os with
| Fedora ver -> sprintf "fedora%d" ver
| Alma (major, _) -> sprintf "almalinux%d" major
| CentOS (major, minor) -> sprintf "centos%d.%d" major minor
| CentOSStream ver -> sprintf "centosstream%d" ver
| RHEL (major, minor) -> sprintf "rhel%d.%d" major minor
| Debian (ver, _) -> sprintf "debian%d" ver
| Ubuntu (ver, _) -> sprintf "ubuntu%s" ver
| FreeBSD (major, minor) -> sprintf "freebsd%d.%d" major minor
| Windows (6, 1, Client) -> "win7"
| Windows (6, 1, Server) -> "win2k8r2"
| Windows (6, 2, Server) -> "win2k12"
| Windows (6, 3, Server) -> "win2k12r2"
| Windows (10, 0, Server) -> "win2k16"
| Windows _ -> assert false
)
else (
match os, arch with
This special case for / ppc64{,le } is needed to work
* around a bug in virt - install :
*
* around a bug in virt-install:
*
*)
| Fedora _, (PPC64|PPC64le) -> "fedora22"
| Fedora ver, _ when ver <= 23 ->
sprintf "fedora%d" ver
max version known in Fedora 34
| Alma (major, _), _ -> sprintf "almalinux%d" major
max version known in Fedora 36
| CentOS (major, minor), _ when (major, minor) <= (7,0) ->
sprintf "centos%d.%d" major minor
max version known in Fedora 31
max version known in Fedora 36
max version known in Fedora 36
| RHEL (6, minor), _ when minor <= 8 ->
sprintf "rhel6.%d" minor
max version known in Fedora 29
| RHEL (7, minor), _ when minor <= 4 ->
sprintf "rhel7.%d" minor
max version known in Fedora 29
max version known in Fedora 36
max version known in Fedora 37
| RHEL (major, minor), _ ->
sprintf "rhel%d.%d" major minor
| Debian (ver, _), _ when ver <= 8 -> sprintf "debian%d" ver
max version known in Fedora 26
| Ubuntu (ver, _), _ -> sprintf "ubuntu%s" ver
| FreeBSD (major, minor), _ -> sprintf "freebsd%d.%d" major minor
| Windows (6, 1, Client), _ -> "win7"
| Windows (6, 1, Server), _ -> "win2k8r2"
| Windows (6, 2, Server), _ -> "win2k12"
| Windows (6, 3, Server), _ -> "win2k12r2"
| Windows (10, 0, Server), _ -> "win2k16"
| Windows _, _ -> assert false
)
and kernel_cmdline_of_os os arch =
match os, arch with
| _, X86_64
| _, I686
| _, S390X ->
"console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"
| _, Aarch64 ->
"console=ttyAMA0 earlyprintk=pl011,0x9000000 ignore_loglevel \
no_timer_check printk.time=1 rd_NO_PLYMOUTH"
| _, Armv7 ->
"console=tty0 console=ttyAMA0,115200 rd_NO_PLYMOUTH"
| (Debian _|Fedora _|Ubuntu _), (PPC64|PPC64le) ->
"console=tty0 console=hvc0 rd_NO_PLYMOUTH"
| (RHEL _ | Alma _ | CentOS _ | CentOSStream _), PPC64
| (RHEL _ | Alma _ | CentOS _ | CentOSStream _), PPC64le ->
"console=tty0 console=ttyS0,115200 rd_NO_PLYMOUTH"
| FreeBSD _, _ | Windows _, _ -> assert false
and make_postinstall os arch =
match os with
| Debian _ | Ubuntu _ ->
Some (
fun g ->
Remove apt proxy configuration ( thanks : ) .
g#rm_f "/etc/apt/apt.conf";
g#touch "/etc/apt/apt.conf"
)
| RHEL (major, minor) when major >= 5 ->
Some (
fun g ->
RHEL guests require alternate yum configuration pointing to
* Red Hat 's internal servers .
* Red Hat's internal servers.
*)
let yum_conf = make_rhel_yum_conf major minor arch in
g#write "/etc/yum.repos.d/download.devel.redhat.com.repo" yum_conf
)
| RHEL _ | Fedora _ | Alma _ | CentOS _ | CentOSStream _
| FreeBSD _ | Windows _ -> None
and make_rhel_yum_conf major minor arch =
let buf = Buffer.create 4096 in
let bpf fs = bprintf buf fs in
if major <= 9 then (
let baseurl, srpms, optional =
match major, arch with
| 5, (I686|X86_64) ->
let arch = match arch with I686 -> "i386" | _ -> string_of_arch arch in
let topurl =
sprintf "-5-Server/U%d"
minor in
sprintf "%s/%s/os/Server" topurl arch,
sprintf "%s/source/SRPMS" topurl,
None
| 6, (I686|X86_64) ->
let arch = match arch with I686 -> "i386" | _ -> string_of_arch arch in
let topurl =
sprintf "-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl arch,
sprintf "%s/source/SRPMS" topurl,
Some ("Optional",
sprintf "%s/Server/optional/%s/os" arch topurl,
sprintf "%s/Server/optional/source/SRPMS" topurl)
| 7, (X86_64|PPC64|PPC64le|S390X) ->
let topurl =
sprintf "-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server/source/tree" topurl,
Some ("Optional",
sprintf "%s/Server-optional/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server-optional/source/tree" topurl)
| 7, Aarch64 ->
let topurl =
sprintf "-ALT-%d/%d.%d"
major major minor in
sprintf "%s/Server/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server/source/tree" topurl,
Some ("Optional",
sprintf "%s/Server-optional/%s/os" topurl (string_of_arch arch),
sprintf "%s/Server-optional/source/tree" topurl)
| (8|9), arch ->
let topurl =
sprintf "-%d/%d.%d.0"
major major minor in
sprintf "%s/BaseOS/%s/os" topurl (string_of_arch arch),
sprintf "%s/BaseOS/source/tree" topurl,
Some ("AppStream",
sprintf "%s/AppStream/%s/os" topurl (string_of_arch arch),
sprintf "%s/AppStream/source/tree" topurl)
| _ -> assert false in
bpf "\
# Yum configuration pointing to Red Hat servers.
[rhel%d]
name=RHEL %d Server
baseurl=%s
enabled=1
gpgcheck=0
keepcache=0
[rhel%d-source]
name=RHEL %d Server Source
baseurl=%s
enabled=0
gpgcheck=0
keepcache=0
" major major baseurl major major srpms;
(match optional with
| None -> ()
| Some (name, optionalbaseurl, optionalsrpms) ->
let lc_name = String.lowercase_ascii name in
bpf "\
[rhel%d-%s]
name=RHEL %d Server %s
baseurl=%s
enabled=1
gpgcheck=0
keepcache=0
[rhel%d-%s-source]
name=RHEL %d Server %s
baseurl=%s
enabled=0
gpgcheck=0
keepcache=0
" major lc_name major lc_name optionalbaseurl
major lc_name major lc_name optionalsrpms
)
) else (
not implemented for RHEL major > = 10
);
Buffer.contents buf
and make_index_fragment os arch index_fragment output nvram revision
expandfs lvexpandfs virtual_size_gb =
let virtual_size = Int64.of_int virtual_size_gb in
let virtual_size = Int64.mul virtual_size 1024_L in
let virtual_size = Int64.mul virtual_size 1024_L in
let virtual_size = Int64.mul virtual_size 1024_L in
let chan = open_out (index_fragment ^ ".new") in
let fpf fs = fprintf chan fs in
fpf "[%s]\n" (string_of_os_noarch os);
fpf "name=%s\n" (long_name_of_os os arch);
fpf "osinfo=%s\n" (os_variant_of_os os arch);
fpf "arch=%s\n" (string_of_arch arch);
fpf "file=%s\n" output;
(match revision with
| None -> ()
| Some i -> fpf "revision=%d\n" i
);
fpf "checksum[sha512]=%s\n" (sha512sum_of_file output);
fpf "format=raw\n";
fpf "size=%Ld\n" virtual_size;
fpf "compressed_size=%d\n" (size_of_file output);
fpf "expand=%s\n" expandfs;
(match lvexpandfs with
| None -> ()
| Some fs -> fpf "lvexpand=%s\n" fs
);
let notes = notes_of_os os arch nvram in
(match notes with
| first :: notes ->
fpf "notes=%s\n" first;
List.iter (fpf " %s\n") notes
| [] -> assert false
);
fpf "\n";
close_out chan;
let cmd =
sprintf "mv %s %s"
(quote (index_fragment ^ ".new")) (quote index_fragment) in
if Sys.command cmd <> 0 then exit 1
and long_name_of_os os arch =
match os, arch with
| Alma (major, minor), X86_64 ->
sprintf "AlmaLinux %d.%d" major minor
| Alma (major, minor), arch ->
sprintf "AlmaLinux %d.%d (%s)" major minor (string_of_arch arch)
| CentOS (major, minor), X86_64 ->
sprintf "CentOS %d.%d" major minor
| CentOS (major, minor), arch ->
sprintf "CentOS %d.%d (%s)" major minor (string_of_arch arch)
| CentOSStream ver, X86_64 ->
sprintf "CentOS Stream %d" ver
| CentOSStream ver, arch ->
sprintf "CentOS Stream %d (%s)" ver (string_of_arch arch)
| Debian (ver, dist), X86_64 ->
sprintf "Debian %d (%s)" ver dist
| Debian (ver, dist), arch ->
sprintf "Debian %d (%s) (%s)" ver dist (string_of_arch arch)
| Fedora ver, X86_64 ->
sprintf "Fedora® %d Server" ver
| Fedora ver, arch ->
sprintf "Fedora® %d Server (%s)" ver (string_of_arch arch)
| RHEL (major, minor), X86_64 ->
sprintf "Red Hat Enterprise Linux® %d.%d" major minor
| RHEL (major, minor), arch ->
sprintf "Red Hat Enterprise Linux® %d.%d (%s)"
major minor (string_of_arch arch)
| Ubuntu (ver, dist), X86_64 ->
sprintf "Ubuntu %s (%s)" ver dist
| Ubuntu (ver, dist), arch ->
sprintf "Ubuntu %s (%s) (%s)" ver dist (string_of_arch arch)
| FreeBSD (major, minor), X86_64 ->
sprintf "FreeBSD %d.%d" major minor
| FreeBSD (major, minor), arch ->
sprintf "FreeBSD %d.%d (%s)" major minor (string_of_arch arch)
| Windows (6, 1, Client), arch ->
sprintf "Windows 7 (%s)" (string_of_arch arch)
| Windows (6, 1, Server), arch ->
sprintf "Windows Server 2008 R2 (%s)" (string_of_arch arch)
| Windows (6, 2, Server), arch ->
sprintf "Windows Server 2012 (%s)" (string_of_arch arch)
| Windows (6, 3, Server), arch ->
sprintf "Windows Server 2012 R2 (%s)" (string_of_arch arch)
| Windows (10, 0, Server), arch ->
sprintf "Windows Server 2016 (%s)" (string_of_arch arch)
| Windows _, _ -> assert false
and notes_of_os os arch nvram =
let args = ref [] in
let add arg = args := arg :: !args in
add (long_name_of_os os arch);
add "";
(match os with
| Alma _ ->
add "This AlmaLinux image contains only unmodified @Core group packages."
| CentOS _ ->
add "This CentOS image contains only unmodified @Core group packages."
| CentOSStream _ ->
add "This CentOS Stream image contains only unmodified @Core \
group packages."
| Debian _ ->
add "This is a minimal Debian install."
| Fedora _ ->
add "This Fedora image contains only unmodified @Core group packages.";
add "";
add "Fedora and the Infinity design logo are trademarks of Red Hat, Inc.";
add "Source and further information is available from \
/"
| Ubuntu _ ->
add "This is a minimal Ubuntu install."
| FreeBSD _ ->
add "This is an all-default FreeBSD install."
| Windows _ ->
add "This is an unattended Windows install.";
add "";
add "You must have an MSDN subscription to use this image."
);
add "";
let reconfigure_ssh_host_keys_debian () =
add "This image does not contain SSH host keys. To regenerate them use:";
add "";
add " --firstboot-command \"dpkg-reconfigure openssh-server\"";
add "";
in
let fix_serial_console_debian () =
add "The serial console is not working in this image. To enable it, do:";
add "";
add " --edit '/etc/default/grub:";
add " s/^GRUB_CMDLINE_LINUX_DEFAULT=.*/GRUB_CMDLINE_LINUX_DEFAULT=\"console=tty0 console=ttyS0,115200n8\"/' \\";
add " --run-command update-grub";
add ""
in
let builder_account_warning () =
add "IMPORTANT WARNING:";
add "It seems to be impossible to create an Ubuntu >= 14.04 image using";
add "preseed without creating a user account. Therefore this image";
add "contains a user account 'builder'. I have disabled it, so that";
add "people who don't read release notes don't get caught out, but you";
add "might still wish to delete it completely.";
add ""
in
(match os with
| CentOS (6, _) ->
add "‘virt-builder centos-6’ will always install the latest 6.x release.";
add ""
| Debian ((8|9), _) ->
reconfigure_ssh_host_keys_debian ();
| Debian _ ->
add "This image is so very minimal that it only includes an ssh server";
reconfigure_ssh_host_keys_debian ();
| Ubuntu ("16.04", _) ->
builder_account_warning ();
fix_serial_console_debian ();
reconfigure_ssh_host_keys_debian ();
| Ubuntu (ver, _) when ver >= "14.04" ->
builder_account_warning ();
reconfigure_ssh_host_keys_debian ();
| Ubuntu _ ->
reconfigure_ssh_host_keys_debian ();
| _ -> ()
);
(match nvram with
| Some vars ->
add "You will need to use the associated UEFI NVRAM variables file:";
add (sprintf " " vars);
add "";
| None -> ()
);
add "This template was generated by a script in the libguestfs source tree:";
add " builder/templates/make-template.ml";
add "Associated files used to prepare this template can be found in the";
add "same directory.";
List.rev !args
and read_revision filename =
match (try Some (open_in filename) with Sys_error _ -> None) with
| None -> `No_file
| Some chan ->
let r = ref `No_revision in
let rex = Str.regexp "^revision=\\([0-9]+\\)$" in
(try
let rec loop () =
let line = input_line chan in
if Str.string_match rex line 0 then (
r := `Revision (int_of_string (Str.matched_group 1 line));
raise End_of_file
);
loop ()
in
loop ()
with End_of_file -> ()
);
close_in chan;
!r
and sha512sum_of_file filename =
let cmd = sprintf "sha512sum %s | awk '{print $1}'" (quote filename) in
let chan = Unix.open_process_in cmd in
let line = input_line chan in
let pstat = Unix.close_process_in chan in
check_process_status_for_errors pstat;
line
and size_of_file filename = (Unix.stat filename).Unix.st_size
and open_guest ?(mount = false) filename =
let g = new Guestfs.guestfs () in
g#add_drive_opts ~format:"raw" filename;
g#launch ();
let roots = g#inspect_os () in
if Array.length roots = 0 then (
eprintf "%s: cannot inspect this guest - \
this may mean guest installation failed\n" prog;
exit 1
);
if mount then (
let root = roots.(0) in
let mps = g#inspect_get_mountpoints root in
let cmp (a,_) (b,_) = compare (String.length a) (String.length b) in
let mps = List.sort cmp mps in
List.iter (fun (mp, dev) -> g#mount dev mp) mps
);
g
and check_process_status_for_errors = function
| Unix.WEXITED 0 -> ()
| Unix.WEXITED i ->
eprintf "command exited with %d\n%!" i;
exit 1
| Unix.WSIGNALED i ->
eprintf "command killed by signal %d\n%!" i;
exit 1
| Unix.WSTOPPED i ->
eprintf "command stopped by signal %d\n%!" i;
exit 1
and random8 =
let chars = "abcdefghijklmnopqrstuvwxyz0123456789" in
fun () ->
String.concat "" (
List.map (
fun _ ->
let c = Random.int 36 in
let c = chars.[c] in
String.make 1 c
) [1;2;3;4;5;6;7;8]
)
let () = main ()
|
623935d634f2f19c99441beb9b82cf8004d7b454f5db9befc636d0246ad29519 | carl-eastlund/dracula | list-utilities.rkt | #lang racket/base
(require "../lang/dracula.rkt" "../lang/do-check.rkt")
(provide (all-defined-out))
(begin-below
(in-package "ACL2")
;;(include-book "arithmetic/top" :dir :system)
;;(include-book "arithmetic-2/floor-mod/floor-mod" :dir :system)
(set-compile-fns t)
;;==============================================================================
;;====== Function: (break-at-nth n xs) =========================================
;;
Returns list with two elements
( 1 ) the first n elements of xs
( 2 ) xs without its first n elements
;;
Pre : ( and ( integerp n )
;; (>= n 0)
;; (true-listp xs))
;; Post: (and (= (min n (length xs))
;; (length (car (break-at-nth n xs))))
;; (equal (append (car (break-at-nth n xs))
( cadr ( break - at - nth n xs ) ) )
;; xs))
;;==============================================================================
(defun break-at-nth (n xs)
(if (or (not (integerp n))
(<= n 0)
(not (consp xs)))
(list '() xs)
(let* ((first-thing (car xs))
(break-of-rest (break-at-nth (- n 1) (cdr xs)))
(prefix (car break-of-rest))
(suffix (cadr break-of-rest)))
(list (cons first-thing prefix) suffix))))
;;====== Function: (break-at-set delimiters xs) ================================
;;
Returns list with two elements
( 1 ) the portion of xs that precedes its first element that is a
;; member of the list delimiters
( 2 ) the portion of xs beginning with its first element that is a
;; member of the list delimiters
;;
;; Pre : (and (true-listp delimiters)
;; (true-listp xs))
;; Post: (and (equal (append (car (break-at-set ds xs))
( cadr ( break - at - set ds xs ) ) )
;; xs)
( implies ( consp ( cadr ( break - at - set ds xs ) ) )
( member - equal ( caadr ( break - at - set ds xs ) ) ds ) ) )
;; (implies (member-equal d ds)
;; (not (member-equal d (car (break-at-set ds xs))))))
;;==============================================================================
(defun break-at-set (delimiters xs)
(if (or (not (consp xs))
(member-equal (car xs) delimiters))
(list '() xs)
(let* ((first-thing (car xs))
(break-of-rest (break-at-set delimiters (cdr xs)))
(prefix (car break-of-rest))
(suffix (cadr break-of-rest)))
(list (cons first-thing prefix) suffix))))
;;====== Function: (break-at x xs) =============================================
;;
Returns list with two elements
( 1 ) the portion of xs that precedes the first element equal to x
( 2 ) the portion of xs beginning with the first element equal to x
;;
;; Pre : (true-listp xs)
Post : ( and ( equal ( append ( car ( break - at x xs ) ) ( cadr ( break - at x xs ) ) )
;; xs)
( implies ( consp ( cadr ( break - at d xs ) ) )
( member - equal ( caadr ( break - at - set d xs ) ) ds ) ) )
;; (implies (member-equal d ds)
;; (not (member-equal d (car (break-at d xs))))))
;;==============================================================================
(defun break-at (delimiter xs)
(break-at-set (list delimiter) xs))
;;====== Function: (take-to-set delimiters xs) =================================
;;
Returns the part of xs that precedes its first element that is a member
;; of the list delimiters
;;
;; Pre : (and (true-listp delimiters)
;; (true-listp xs))
;; Post: (and (true-listp (take-to-set delimiters xs))
;; (implies (member-equal d delimiters)
;; (not (member-equal d (take-to-set delimiters xs))))
;; (implies (and (equal (append (take-to-set delimiters xs)
;; ys)
;; xs)
;; (consp ys))
;; (member-equal (car ys) delimiters)))
;;==============================================================================
(defun take-to-set (delimiters xs)
(car (break-at-set delimiters xs)))
;;====== Function: (take-to x xs) ==============================================
;;
Returns the part of xs that precedes the first element equal to x
;;
;; Pre : (true-listp xs)
;; Post: (and (true-listp (take-to x xs))
;; (not (member-equal x (take-to x xs)))
;; (implies (member-equal x xs)
;; (equal (append (take-to x xs)
;; (list x)
;; (drop-past x xs))))
;; (implies (not (member-equal x xs))
;; (equal (take-to x xs) xs)))
;;==============================================================================
(defun take-to (x xs)
(take-to-set (list x) xs))
;;===== Function: (drop-set delimiters xs) =====================================
;;
;; Returns the part of xs that follows the maximal contiguous block of
delimiters beginning at the first element of xs
;;
;; Pre: (true-listp xs)
;; Post: (and (true-listp (drop-set delimiters xs))
;; (implies (consp (drop-set delimiters xs))
;; (not (member (car (drop-set delimiters xs))
;; delimiters)))
;; (implies (and (equal (append ds (drop-set delimiters xs))
;; xs)
;; (member d ds))
;; (member d delimiters)))
;;==============================================================================
(defun drop-set (delimiters xs)
(if (and (consp xs)
(member-equal (car xs) delimiters))
(drop-set delimiters (cdr xs))
xs))
;;===== Function: (drop-past delimiter xs) =====================================
;;
Returns the part of xs that follows the first element equal to x
;;
;; Pre: (true-listp xs)
;; Post: (and (true-listp (drop-past delimiter xs))
;; (implies (member-equal x xs)
;; (equal (append (take-to delimiter xs)
;; (list x)
;; (drop-past delimiter xs))))
;; (implies (not (member-equal delimiter xs))
;; (equal (drop-past delimiter xs) nil)))
;;==============================================================================
(defun drop-past (delimiter xs)
(cdr (member-equal delimiter xs)))
(defun drop-thru (delimiter xs) ; Butch's synonym
(drop-past delimiter xs))
;;====== Function: (drop-past-n-delimiters n delimiter xs) =====================
;;
Returns the part of xs that follows the first n occurances of x
;;
Pre : ( and ( integerp n ) ( > = n 0 ) ( true - listp xs ) )
;; Post: (true-listp (drop-past-n-delimiters n d xs)) ; and some other stuff
;;==============================================================================
(defun drop-past-n-delimiters (n delimiter xs)
(if (or (not (integerp n))
(<= n 0)
(not (consp xs)))
xs
(drop-past-n-delimiters (- n 1) delimiter (drop-past delimiter xs))))
(defun drop-thru-n-delimiters (n delimiter xs) ; Butch's synonym
(drop-past-n-delimiters n delimiter xs))
;;====== Function: (packets-set delimiters xs) ==================================
;;
;; Parcels a list into a list of lists
;; Contiguous sequences from a specified set of delimiters
;; marks separations between packets
;;
;; Pre : (and (true-listp delimiters) (true-listp xs))
;; Post: (true-list-listp (packets-set ds xs))
;; and a bunch of other stuff
;;==============================================================================
;;(defthm break-at-set-cadr-begins-with-delimiter
( implies ( consp ( cadr ( break - at - set delimiters xs ) ) )
( member - equal ( caadr ( break - at - set delimiters xs ) ) delimiters ) ) )
( defthm car - cdr - cons - identity
;; (implies (consp xs)
;; (equal (cons (car xs) (cdr xs))
;; xs)))
( - non - empty - lists
;; (implies (and (true-listp xs) (consp xs))
;; (< (length (cdr xs)) (length xs))))
;;(defthm drop-set-chops-if-list-starts-with-delimiter
;; (implies (and (true-listp xs)
;; (true-listp delimiters)
;; (consp xs)
;; (member-equal (car xs) delimiters))
;; (< (length (drop-set delimiters xs))
;; (length xs))))
(defun packets-set (delimiters xs)
(if (not (consp xs))
'(nil)
(let* ((split (break-at-set delimiters xs))
(first-packet (car split))
(beyond-first-packet (cadr split))
(other-packets (if (not (consp beyond-first-packet))
nil
(packets-set delimiters
(cdr beyond-first-packet)))))
(cons first-packet other-packets))))
;;====== Function: (packets delimiter xs) ======================================
;;
;; Parcels a list into a list of lists
;; A specified delimiter marks separations between packets in the given list
;;
;; Pre : (true-listp xs)
;; Post: (and (true-list-listp (packets d xs))
( implies ( and ( integerp n ) ( > = n 0 ) )
;; (equal (nth n (packets d xs))
;; (take-to d (drop-past-n-delimiters n d xs)))))
;;==============================================================================
(defun packets (delimiter xs)
(packets-set (list delimiter) xs))
;;====== Function: (tokens delimiters xs) ======================================
;;
;; Parcels a list into a list of tokens
;; Tokens are the sublists residing between maximally contiguous
;; sequences of delimiters
;;
;; Pre : (and (true-listp delimiters) (true-listp xs))
;; Post: (true-list-listp (tokens ds xs))
;; and a bunch of other stuff
;;==============================================================================
(defun tokens (delimiters xs)
(remove nil (packets-set delimiters xs)))
;;===== Function: (rep-append n x xs) ==========================================
;;
;; Appends xs to a list consisting of n copies of x
;;
Pre : ( and ( integerp n )
;; (>= n 0)
;; (true-listp xs))
;; Post: (and (implies (member-equal y (take n (rep-append n x)))
;; (equal y x))
;; (equal (length (rep-append n x xs))
;; (+ (length xs) n))
( equal ( n ( rep - append n x xs ) )
;; xs))
;;==============================================================================
(defun rep-append (n x xs)
(if (or (not (integerp n))
(<= n 0))
xs
(rep-append (- n 1) x (cons x xs))))
;;===== Function: (replicate n x) =================================================
;;
;; Delivers a list consisting of n copies of x
;;
Pre : ( and ( integerp n )
;; (>= n 0))
;; Post: (and (implies (member-equal y (replicate n x))
;; (equal y x))
;; (equal (length (replicate n x)) n))
;;==============================================================================
(defun replicate (n x)
(rep-append n x nil))
;;===== Function: (pad-left w p xs) ============================================
;;
Pads appends xs to copies of p to make the resulting list have w elements
;; Note: Delivers xs, as is, if xs has w or more elements
;;
Pre : ( and ( integerp w )
;; (>= w 0)
( listp xs ) )
;; Post: (and (equal (length (pad-left w p xs))
;; (max w (length xs)))
( implies ( member - equal x ( take ( max 0 ( - w ( length xs ) ) ) xs ) )
;; (equal x p)))
;;==============================================================================
(defun pad-left (w p xs)
(append (replicate (max 0 (- w (length xs))) p)
xs))
;;====== Function: (chrs->str chrs) ============================================
;;
;; Converts list of characters to string
;;
Pre : ( character - )
;; Post: (stringp (chrs->str chrs))
;;==============================================================================
(defun chrs->str (chrs)
(coerce chrs 'string))
= = = = = = Function : ( ) = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
;;
;; Converts string to list of characters
;;
;; Pre : (stringp str)
Post : ( character - listp ( ) )
;;==============================================================================
(defun str->chrs (str)
(coerce str 'list))
;;====== Function: (words str) =================================================
;;
;; Parcels a string into a list of words
;; Words are the sublists residing between maximally contiguous
;; spans of whitespace
;;
;; Pre : (stringp str)
;; Post: (string-listp (words str))
;; and a bunch of other stuff
;;==============================================================================
(defun chrs->str-all (list-of-lists-of-chrs)
(if (consp list-of-lists-of-chrs)
(cons (chrs->str (car list-of-lists-of-chrs))
(chrs->str-all (cdr list-of-lists-of-chrs)))
nil))
(defun words (str)
(let* ((whitespace (list (code-char 32) (code-char 10) (code-char 9)
(code-char 11) (code-char 12) (code-char 13)
(code-char 27))))
(chrs->str-all (remove nil (tokens whitespace (str->chrs str))))))
)
| null | https://raw.githubusercontent.com/carl-eastlund/dracula/a937f4b40463779246e3544e4021c53744a33847/teachpacks/list-utilities.rkt | racket | (include-book "arithmetic/top" :dir :system)
(include-book "arithmetic-2/floor-mod/floor-mod" :dir :system)
==============================================================================
====== Function: (break-at-nth n xs) =========================================
(>= n 0)
(true-listp xs))
Post: (and (= (min n (length xs))
(length (car (break-at-nth n xs))))
(equal (append (car (break-at-nth n xs))
xs))
==============================================================================
====== Function: (break-at-set delimiters xs) ================================
member of the list delimiters
member of the list delimiters
Pre : (and (true-listp delimiters)
(true-listp xs))
Post: (and (equal (append (car (break-at-set ds xs))
xs)
(implies (member-equal d ds)
(not (member-equal d (car (break-at-set ds xs))))))
==============================================================================
====== Function: (break-at x xs) =============================================
Pre : (true-listp xs)
xs)
(implies (member-equal d ds)
(not (member-equal d (car (break-at d xs))))))
==============================================================================
====== Function: (take-to-set delimiters xs) =================================
of the list delimiters
Pre : (and (true-listp delimiters)
(true-listp xs))
Post: (and (true-listp (take-to-set delimiters xs))
(implies (member-equal d delimiters)
(not (member-equal d (take-to-set delimiters xs))))
(implies (and (equal (append (take-to-set delimiters xs)
ys)
xs)
(consp ys))
(member-equal (car ys) delimiters)))
==============================================================================
====== Function: (take-to x xs) ==============================================
Pre : (true-listp xs)
Post: (and (true-listp (take-to x xs))
(not (member-equal x (take-to x xs)))
(implies (member-equal x xs)
(equal (append (take-to x xs)
(list x)
(drop-past x xs))))
(implies (not (member-equal x xs))
(equal (take-to x xs) xs)))
==============================================================================
===== Function: (drop-set delimiters xs) =====================================
Returns the part of xs that follows the maximal contiguous block of
Pre: (true-listp xs)
Post: (and (true-listp (drop-set delimiters xs))
(implies (consp (drop-set delimiters xs))
(not (member (car (drop-set delimiters xs))
delimiters)))
(implies (and (equal (append ds (drop-set delimiters xs))
xs)
(member d ds))
(member d delimiters)))
==============================================================================
===== Function: (drop-past delimiter xs) =====================================
Pre: (true-listp xs)
Post: (and (true-listp (drop-past delimiter xs))
(implies (member-equal x xs)
(equal (append (take-to delimiter xs)
(list x)
(drop-past delimiter xs))))
(implies (not (member-equal delimiter xs))
(equal (drop-past delimiter xs) nil)))
==============================================================================
Butch's synonym
====== Function: (drop-past-n-delimiters n delimiter xs) =====================
Post: (true-listp (drop-past-n-delimiters n d xs)) ; and some other stuff
==============================================================================
Butch's synonym
====== Function: (packets-set delimiters xs) ==================================
Parcels a list into a list of lists
Contiguous sequences from a specified set of delimiters
marks separations between packets
Pre : (and (true-listp delimiters) (true-listp xs))
Post: (true-list-listp (packets-set ds xs))
and a bunch of other stuff
==============================================================================
(defthm break-at-set-cadr-begins-with-delimiter
(implies (consp xs)
(equal (cons (car xs) (cdr xs))
xs)))
(implies (and (true-listp xs) (consp xs))
(< (length (cdr xs)) (length xs))))
(defthm drop-set-chops-if-list-starts-with-delimiter
(implies (and (true-listp xs)
(true-listp delimiters)
(consp xs)
(member-equal (car xs) delimiters))
(< (length (drop-set delimiters xs))
(length xs))))
====== Function: (packets delimiter xs) ======================================
Parcels a list into a list of lists
A specified delimiter marks separations between packets in the given list
Pre : (true-listp xs)
Post: (and (true-list-listp (packets d xs))
(equal (nth n (packets d xs))
(take-to d (drop-past-n-delimiters n d xs)))))
==============================================================================
====== Function: (tokens delimiters xs) ======================================
Parcels a list into a list of tokens
Tokens are the sublists residing between maximally contiguous
sequences of delimiters
Pre : (and (true-listp delimiters) (true-listp xs))
Post: (true-list-listp (tokens ds xs))
and a bunch of other stuff
==============================================================================
===== Function: (rep-append n x xs) ==========================================
Appends xs to a list consisting of n copies of x
(>= n 0)
(true-listp xs))
Post: (and (implies (member-equal y (take n (rep-append n x)))
(equal y x))
(equal (length (rep-append n x xs))
(+ (length xs) n))
xs))
==============================================================================
===== Function: (replicate n x) =================================================
Delivers a list consisting of n copies of x
(>= n 0))
Post: (and (implies (member-equal y (replicate n x))
(equal y x))
(equal (length (replicate n x)) n))
==============================================================================
===== Function: (pad-left w p xs) ============================================
Note: Delivers xs, as is, if xs has w or more elements
(>= w 0)
Post: (and (equal (length (pad-left w p xs))
(max w (length xs)))
(equal x p)))
==============================================================================
====== Function: (chrs->str chrs) ============================================
Converts list of characters to string
Post: (stringp (chrs->str chrs))
==============================================================================
Converts string to list of characters
Pre : (stringp str)
==============================================================================
====== Function: (words str) =================================================
Parcels a string into a list of words
Words are the sublists residing between maximally contiguous
spans of whitespace
Pre : (stringp str)
Post: (string-listp (words str))
and a bunch of other stuff
============================================================================== | #lang racket/base
(require "../lang/dracula.rkt" "../lang/do-check.rkt")
(provide (all-defined-out))
(begin-below
(in-package "ACL2")
(set-compile-fns t)
Returns list with two elements
( 1 ) the first n elements of xs
( 2 ) xs without its first n elements
Pre : ( and ( integerp n )
( cadr ( break - at - nth n xs ) ) )
(defun break-at-nth (n xs)
(if (or (not (integerp n))
(<= n 0)
(not (consp xs)))
(list '() xs)
(let* ((first-thing (car xs))
(break-of-rest (break-at-nth (- n 1) (cdr xs)))
(prefix (car break-of-rest))
(suffix (cadr break-of-rest)))
(list (cons first-thing prefix) suffix))))
Returns list with two elements
( 1 ) the portion of xs that precedes its first element that is a
( 2 ) the portion of xs beginning with its first element that is a
( cadr ( break - at - set ds xs ) ) )
( implies ( consp ( cadr ( break - at - set ds xs ) ) )
( member - equal ( caadr ( break - at - set ds xs ) ) ds ) ) )
(defun break-at-set (delimiters xs)
(if (or (not (consp xs))
(member-equal (car xs) delimiters))
(list '() xs)
(let* ((first-thing (car xs))
(break-of-rest (break-at-set delimiters (cdr xs)))
(prefix (car break-of-rest))
(suffix (cadr break-of-rest)))
(list (cons first-thing prefix) suffix))))
Returns list with two elements
( 1 ) the portion of xs that precedes the first element equal to x
( 2 ) the portion of xs beginning with the first element equal to x
Post : ( and ( equal ( append ( car ( break - at x xs ) ) ( cadr ( break - at x xs ) ) )
( implies ( consp ( cadr ( break - at d xs ) ) )
( member - equal ( caadr ( break - at - set d xs ) ) ds ) ) )
(defun break-at (delimiter xs)
(break-at-set (list delimiter) xs))
Returns the part of xs that precedes its first element that is a member
(defun take-to-set (delimiters xs)
(car (break-at-set delimiters xs)))
Returns the part of xs that precedes the first element equal to x
(defun take-to (x xs)
(take-to-set (list x) xs))
delimiters beginning at the first element of xs
(defun drop-set (delimiters xs)
(if (and (consp xs)
(member-equal (car xs) delimiters))
(drop-set delimiters (cdr xs))
xs))
Returns the part of xs that follows the first element equal to x
(defun drop-past (delimiter xs)
(cdr (member-equal delimiter xs)))
(drop-past delimiter xs))
Returns the part of xs that follows the first n occurances of x
Pre : ( and ( integerp n ) ( > = n 0 ) ( true - listp xs ) )
(defun drop-past-n-delimiters (n delimiter xs)
(if (or (not (integerp n))
(<= n 0)
(not (consp xs)))
xs
(drop-past-n-delimiters (- n 1) delimiter (drop-past delimiter xs))))
(drop-past-n-delimiters n delimiter xs))
( implies ( consp ( cadr ( break - at - set delimiters xs ) ) )
( member - equal ( caadr ( break - at - set delimiters xs ) ) delimiters ) ) )
( defthm car - cdr - cons - identity
( - non - empty - lists
(defun packets-set (delimiters xs)
(if (not (consp xs))
'(nil)
(let* ((split (break-at-set delimiters xs))
(first-packet (car split))
(beyond-first-packet (cadr split))
(other-packets (if (not (consp beyond-first-packet))
nil
(packets-set delimiters
(cdr beyond-first-packet)))))
(cons first-packet other-packets))))
( implies ( and ( integerp n ) ( > = n 0 ) )
(defun packets (delimiter xs)
(packets-set (list delimiter) xs))
(defun tokens (delimiters xs)
(remove nil (packets-set delimiters xs)))
Pre : ( and ( integerp n )
( equal ( n ( rep - append n x xs ) )
(defun rep-append (n x xs)
(if (or (not (integerp n))
(<= n 0))
xs
(rep-append (- n 1) x (cons x xs))))
Pre : ( and ( integerp n )
(defun replicate (n x)
(rep-append n x nil))
Pads appends xs to copies of p to make the resulting list have w elements
Pre : ( and ( integerp w )
( listp xs ) )
( implies ( member - equal x ( take ( max 0 ( - w ( length xs ) ) ) xs ) )
(defun pad-left (w p xs)
(append (replicate (max 0 (- w (length xs))) p)
xs))
Pre : ( character - )
(defun chrs->str (chrs)
(coerce chrs 'string))
= = = = = = Function : ( ) = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
Post : ( character - listp ( ) )
(defun str->chrs (str)
(coerce str 'list))
(defun chrs->str-all (list-of-lists-of-chrs)
(if (consp list-of-lists-of-chrs)
(cons (chrs->str (car list-of-lists-of-chrs))
(chrs->str-all (cdr list-of-lists-of-chrs)))
nil))
(defun words (str)
(let* ((whitespace (list (code-char 32) (code-char 10) (code-char 9)
(code-char 11) (code-char 12) (code-char 13)
(code-char 27))))
(chrs->str-all (remove nil (tokens whitespace (str->chrs str))))))
)
|
093eed8a8fae6c73bd04e93e322b8f9b424efafcf67204233682d8380f9ce859 | Schol-R-LEA/Assiah | test-syntax-insertion.scm | #!r6rs
(import
(rnrs (6))
(rnrs base (6))
(rnrs syntax-case (6))
(rnrs io simple (6))
(rnrs mutable-pairs (6)))
(define-syntax clear!
(lambda (statement)
(syntax-case statement ()
((_ ?x)
#'(set! ?x 0)))))
(define a 42)
(clear! a)
(display a)
(newline)
| null | https://raw.githubusercontent.com/Schol-R-LEA/Assiah/3656f62a0868bb74b3f211741beaac2b9da82288/scheme-tests/scheme/test-syntax-insertion.scm | scheme | #!r6rs
(import
(rnrs (6))
(rnrs base (6))
(rnrs syntax-case (6))
(rnrs io simple (6))
(rnrs mutable-pairs (6)))
(define-syntax clear!
(lambda (statement)
(syntax-case statement ()
((_ ?x)
#'(set! ?x 0)))))
(define a 42)
(clear! a)
(display a)
(newline)
| |
9b56a62861febde91afcc03a8b555a4cb5f4207f8c5c701e8837a219af4c5866 | Innf107/polaris | driver.ml | open Syntax
open Rename
open Eval
let _tc_category, trace_driver = Trace.make ~flag:"driver" ~prefix:"Driver"
type driver_options = {
filename : string;
argv : string list;
print_ast : bool;
print_renamed : bool;
print_tokens : bool;
}
exception ParseError of loc * string
type specific_parse_error = Parserprelude.specific_parse_error
exception SpecificParseError = Parserprelude.SpecificParseError
let rec parse_rename_typecheck : driver_options
-> Lexing.lexbuf
-> RenameScope.t
-> ?check_or_infer_top_level : [`Check | `Infer]
-> Types.global_env
-> Typed.header * Typed.expr list * RenameScope.t * Types.global_env
= fun options lexbuf scope ?(check_or_infer_top_level = `Check) type_env ->
trace_driver (lazy ("Lexing with filename '" ^ options.filename));
Lexing.set_filename lexbuf options.filename;
if options.print_tokens then
let lex_state = Lexer.new_lex_state () in
let rec go () =
match Lexer.token lex_state lexbuf with
| Parser.EOF -> exit 0
| t -> print_endline (Parserutil.pretty_token t); go ()
in
go ()
else
();
trace_driver (lazy "Parsing...");
let header, ast =
let lex_state = Lexer.new_lex_state () in
match
Parser.main (Lexer.token lex_state) lexbuf
with
| exception Parser.Error ->
let start_pos = lexbuf.lex_start_p in
let end_pos = lexbuf.lex_curr_p in
raise (ParseError (Loc.from_pos start_pos end_pos, "Parse error"))
| res -> res
in
if options.print_ast then begin
print_endline "~~~~~~~~Parsed AST~~~~~~~~";
print_endline (Parsed.pretty_list ast);
print_endline "~~~~~~~~~~~~~~~~~~~~~~~~~~"
end
else ();
let imported_files = List.map (fun x -> (x, Util.path_relative_to options.filename x))
(List.concat_map (Modules.extract_import_paths) ast) in
trace_driver (lazy ("Importing modules from (" ^ String.concat ", " (List.map snd imported_files) ^ ")"));
let items_for_exports = List.map (fun (filename, path) ->
(filename, parse_rename_typecheck {options with filename=path} (Lexing.from_channel (open_in path)) RenameScope.empty Types.empty_env)
) imported_files in
let import_map = FilePathMap.of_seq
(Seq.map (fun (file, (header, ast, scope, env)) -> (file, (Modules.build_export_map header ast scope env, ast)))
(List.to_seq items_for_exports)) in
trace_driver (lazy "Renaming...");
let renamed_header, renamed, new_scope = Rename.rename_scope import_map scope header ast in
if options.print_renamed then begin
print_endline "~~~~~~~~Renamed AST~~~~~~~";
print_endline (Renamed.pretty_list renamed);
print_endline "~~~~~~~~~~~~~~~~~~~~~~~~~~"
end
else ();
trace_driver (lazy "Typechecking...");
let type_env, typed_header, typed_exprs = Types.typecheck check_or_infer_top_level renamed_header renamed type_env in
typed_header, typed_exprs, new_scope, type_env
let run_env : driver_options
-> Lexing.lexbuf
-> eval_env
-> RenameScope.t
-> ?check_or_infer_top_level : [`Check | `Infer]
-> Types.global_env
-> value * eval_env * RenameScope.t * Types.global_env
= fun options lexbuf env scope ?check_or_infer_top_level type_env ->
let renamed_header, renamed, new_scope, new_type_env = parse_rename_typecheck options lexbuf scope ?check_or_infer_top_level type_env in
trace_driver (lazy "Evaluating...");
let env = Eval.eval_header env renamed_header in
let res, new_env = Eval.eval_seq_state env renamed in
res, new_env, new_scope, new_type_env
let run_eval (options : driver_options) (lexbuf : Lexing.lexbuf) : value =
let res, _, _, _ = run_env options lexbuf (Eval.empty_eval_env options.argv) RenameScope.empty Types.empty_env in
res
let run (options : driver_options) (lexbuf : Lexing.lexbuf) : unit =
let _ = run_eval options lexbuf in
()
| null | https://raw.githubusercontent.com/Innf107/polaris/02b41afaf53f347ed84ac97048dd231214a8c03d/src/driver.ml | ocaml | open Syntax
open Rename
open Eval
let _tc_category, trace_driver = Trace.make ~flag:"driver" ~prefix:"Driver"
type driver_options = {
filename : string;
argv : string list;
print_ast : bool;
print_renamed : bool;
print_tokens : bool;
}
exception ParseError of loc * string
type specific_parse_error = Parserprelude.specific_parse_error
exception SpecificParseError = Parserprelude.SpecificParseError
let rec parse_rename_typecheck : driver_options
-> Lexing.lexbuf
-> RenameScope.t
-> ?check_or_infer_top_level : [`Check | `Infer]
-> Types.global_env
-> Typed.header * Typed.expr list * RenameScope.t * Types.global_env
= fun options lexbuf scope ?(check_or_infer_top_level = `Check) type_env ->
trace_driver (lazy ("Lexing with filename '" ^ options.filename));
Lexing.set_filename lexbuf options.filename;
if options.print_tokens then
let lex_state = Lexer.new_lex_state () in
let rec go () =
match Lexer.token lex_state lexbuf with
| Parser.EOF -> exit 0
| t -> print_endline (Parserutil.pretty_token t); go ()
in
go ()
else
();
trace_driver (lazy "Parsing...");
let header, ast =
let lex_state = Lexer.new_lex_state () in
match
Parser.main (Lexer.token lex_state) lexbuf
with
| exception Parser.Error ->
let start_pos = lexbuf.lex_start_p in
let end_pos = lexbuf.lex_curr_p in
raise (ParseError (Loc.from_pos start_pos end_pos, "Parse error"))
| res -> res
in
if options.print_ast then begin
print_endline "~~~~~~~~Parsed AST~~~~~~~~";
print_endline (Parsed.pretty_list ast);
print_endline "~~~~~~~~~~~~~~~~~~~~~~~~~~"
end
else ();
let imported_files = List.map (fun x -> (x, Util.path_relative_to options.filename x))
(List.concat_map (Modules.extract_import_paths) ast) in
trace_driver (lazy ("Importing modules from (" ^ String.concat ", " (List.map snd imported_files) ^ ")"));
let items_for_exports = List.map (fun (filename, path) ->
(filename, parse_rename_typecheck {options with filename=path} (Lexing.from_channel (open_in path)) RenameScope.empty Types.empty_env)
) imported_files in
let import_map = FilePathMap.of_seq
(Seq.map (fun (file, (header, ast, scope, env)) -> (file, (Modules.build_export_map header ast scope env, ast)))
(List.to_seq items_for_exports)) in
trace_driver (lazy "Renaming...");
let renamed_header, renamed, new_scope = Rename.rename_scope import_map scope header ast in
if options.print_renamed then begin
print_endline "~~~~~~~~Renamed AST~~~~~~~";
print_endline (Renamed.pretty_list renamed);
print_endline "~~~~~~~~~~~~~~~~~~~~~~~~~~"
end
else ();
trace_driver (lazy "Typechecking...");
let type_env, typed_header, typed_exprs = Types.typecheck check_or_infer_top_level renamed_header renamed type_env in
typed_header, typed_exprs, new_scope, type_env
let run_env : driver_options
-> Lexing.lexbuf
-> eval_env
-> RenameScope.t
-> ?check_or_infer_top_level : [`Check | `Infer]
-> Types.global_env
-> value * eval_env * RenameScope.t * Types.global_env
= fun options lexbuf env scope ?check_or_infer_top_level type_env ->
let renamed_header, renamed, new_scope, new_type_env = parse_rename_typecheck options lexbuf scope ?check_or_infer_top_level type_env in
trace_driver (lazy "Evaluating...");
let env = Eval.eval_header env renamed_header in
let res, new_env = Eval.eval_seq_state env renamed in
res, new_env, new_scope, new_type_env
let run_eval (options : driver_options) (lexbuf : Lexing.lexbuf) : value =
let res, _, _, _ = run_env options lexbuf (Eval.empty_eval_env options.argv) RenameScope.empty Types.empty_env in
res
let run (options : driver_options) (lexbuf : Lexing.lexbuf) : unit =
let _ = run_eval options lexbuf in
()
| |
27756854451706e9db2ede34d1120f69eade1b61f2134820ace64357b5532410 | hugoduncan/makejack | ns_tree.clj | (ns makejack.tasks.ns-tree
(:require
[babashka.fs :as fs]
[clojure.pprint :as pprint]
[makejack.defaults.api :as defaults]
[makejack.deps.api :as deps]
[makejack.files.api :as files]))
(defn ns-tree
"Return namespace tree info."
[params]
(let [basis (defaults/basis params)
info-map (->> basis
deps/lift-local-deps
defaults/paths
(mapv #(fs/relativize
(fs/absolutize (fs/path (:dir params ".")))
(fs/absolutize (fs/path %))))
(files/info-map params)
)]
(println "Unreferenced namespaces"
(files/top-level-nses info-map))
(pprint/pprint
(files/topo-namespaces info-map))))
| null | https://raw.githubusercontent.com/hugoduncan/makejack/a66cb5e37420872bfa870aaf7b022ce39b60b05b/bases/tasks/src/makejack/tasks/ns_tree.clj | clojure | (ns makejack.tasks.ns-tree
(:require
[babashka.fs :as fs]
[clojure.pprint :as pprint]
[makejack.defaults.api :as defaults]
[makejack.deps.api :as deps]
[makejack.files.api :as files]))
(defn ns-tree
"Return namespace tree info."
[params]
(let [basis (defaults/basis params)
info-map (->> basis
deps/lift-local-deps
defaults/paths
(mapv #(fs/relativize
(fs/absolutize (fs/path (:dir params ".")))
(fs/absolutize (fs/path %))))
(files/info-map params)
)]
(println "Unreferenced namespaces"
(files/top-level-nses info-map))
(pprint/pprint
(files/topo-namespaces info-map))))
| |
ef45a2d8160033a1a4c10db266dda1c67f51ed1d1dae15586a7bbf1a2c2d4e20 | janestreet/hardcaml | fifo.mli | (** Synchronous FIFO implementions with optional [showahead] functionality and pipelining
stages. *)
include Fifo_intf.S
| null | https://raw.githubusercontent.com/janestreet/hardcaml/4126f65f39048fef5853ba9b8d766143f678a9e4/src/fifo.mli | ocaml | * Synchronous FIFO implementions with optional [showahead] functionality and pipelining
stages. |
include Fifo_intf.S
|
230702f84a2304c43f23ad6bad69a563ebd985fa21fee24a44952856b8b52aa5 | discus-lang/ddc | Initialize.hs |
module DDC.Driver.LSP.Protocol.Data.Initialize where
import DDC.Driver.LSP.Protocol.Pack
import DDC.Driver.LSP.Protocol.Data.Base
import DDC.Driver.LSP.Protocol.Data.ClientCapabilities
import DDC.Driver.LSP.Protocol.Data.ServerCapabilities
---------------------------------------------------------------------------------------------------
| The initialize request is sent as the first request from the client to the server .
data InitializeParams
= InitializeParams
{ ipProcessId :: Maybe Int
, ipRootPath :: Maybe (Maybe String)
, ipRootUri :: Maybe DocumentUri
, ipInitOptions :: Maybe JSValue
, ipClientCapabilities :: ClientCapabilities
-- , ipTrace :: Maybe Trace
, ipWorkspaceFolders : : [ WorkspaceFolder ]
}
deriving Show
---------------------------------------------------------------------------------------------------
data Trace
= TraceOff
| TraceMessages
| TraceVerbose
deriving Show
---------------------------------------------------------------------------------------------------
data WorkspaceFolder
= WorkspaceFolder
{ wfUri :: String
, wfName :: String }
deriving Show
---------------------------------------------------------------------------------------------------
-- | The server responds to the initialize request with an initialize result.
data InitializeResult
= InitializeResult
{ irCapabilities :: ServerCapabilities }
deriving Show
instance Pack InitializeResult where
pack ir
= jobj
[ ("capabilities", pack $ irCapabilities ir) ]
| null | https://raw.githubusercontent.com/discus-lang/ddc/2baa1b4e2d43b6b02135257677671a83cb7384ac/src/s1/ddc-driver/DDC/Driver/LSP/Protocol/Data/Initialize.hs | haskell | -------------------------------------------------------------------------------------------------
, ipTrace :: Maybe Trace
-------------------------------------------------------------------------------------------------
-------------------------------------------------------------------------------------------------
-------------------------------------------------------------------------------------------------
| The server responds to the initialize request with an initialize result. |
module DDC.Driver.LSP.Protocol.Data.Initialize where
import DDC.Driver.LSP.Protocol.Pack
import DDC.Driver.LSP.Protocol.Data.Base
import DDC.Driver.LSP.Protocol.Data.ClientCapabilities
import DDC.Driver.LSP.Protocol.Data.ServerCapabilities
| The initialize request is sent as the first request from the client to the server .
data InitializeParams
= InitializeParams
{ ipProcessId :: Maybe Int
, ipRootPath :: Maybe (Maybe String)
, ipRootUri :: Maybe DocumentUri
, ipInitOptions :: Maybe JSValue
, ipClientCapabilities :: ClientCapabilities
, ipWorkspaceFolders : : [ WorkspaceFolder ]
}
deriving Show
data Trace
= TraceOff
| TraceMessages
| TraceVerbose
deriving Show
data WorkspaceFolder
= WorkspaceFolder
{ wfUri :: String
, wfName :: String }
deriving Show
data InitializeResult
= InitializeResult
{ irCapabilities :: ServerCapabilities }
deriving Show
instance Pack InitializeResult where
pack ir
= jobj
[ ("capabilities", pack $ irCapabilities ir) ]
|
aa756a5efc1bb19247c17220ce93188a6b96918e2d78e52b1a19f30e980eae68 | futurice/haskell-mega-repo | RateMeter.hs | module Futurice.Metrics.RateMeter (mark, mark', values) where
import Control.Concurrent.STM
(TVar, atomically, newTVar, modifyTVar', newTVarIO, readTVar, writeTVar)
import Futurice.Prelude
import Prelude ()
import System.IO.Unsafe (unsafePerformIO)
-------------------------------------------------------------------------------
Interface
-------------------------------------------------------------------------------
mark :: Text -> IO ()
mark name = mark' name 1
mark' :: Text -> Word64 -> IO ()
mark' name value = atomically $ do
gm <- readTVar globalMap
case gm ^. at name of
Nothing -> do
rmTVar <- newTVar
$ markRateMeter value
$ zeroRateMeter
writeTVar globalMap $ gm
& at name ?~ rmTVar
Just tvar -> do
modifyTVar' tvar
$ markRateMeter value
values :: IO (Map Text Word64)
values = atomically $ readTVar globalMap >>= traverse readMeter
where
readMeter tvar = do
RateMeter value <- readTVar tvar
writeTVar tvar zeroRateMeter
return value
-------------------------------------------------------------------------------
RateMeter
-------------------------------------------------------------------------------
newtype RateMeter = RateMeter Word64
deriving Show
zeroRateMeter :: RateMeter
zeroRateMeter = RateMeter 0
markRateMeter :: Word64 -> RateMeter -> RateMeter
markRateMeter value (RateMeter value') = RateMeter (value + value')
-------------------------------------------------------------------------------
-- Internals
-------------------------------------------------------------------------------
-- todo change to be per capacity
globalMap :: TVar (Map Text (TVar RateMeter))
globalMap = unsafePerformIO $ newTVarIO mempty
# NOINLINE globalMap #
| null | https://raw.githubusercontent.com/futurice/haskell-mega-repo/2647723f12f5435e2edc373f6738386a9668f603/futurice-metrics/src/Futurice/Metrics/RateMeter.hs | haskell | -----------------------------------------------------------------------------
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
Internals
-----------------------------------------------------------------------------
todo change to be per capacity | module Futurice.Metrics.RateMeter (mark, mark', values) where
import Control.Concurrent.STM
(TVar, atomically, newTVar, modifyTVar', newTVarIO, readTVar, writeTVar)
import Futurice.Prelude
import Prelude ()
import System.IO.Unsafe (unsafePerformIO)
Interface
mark :: Text -> IO ()
mark name = mark' name 1
mark' :: Text -> Word64 -> IO ()
mark' name value = atomically $ do
gm <- readTVar globalMap
case gm ^. at name of
Nothing -> do
rmTVar <- newTVar
$ markRateMeter value
$ zeroRateMeter
writeTVar globalMap $ gm
& at name ?~ rmTVar
Just tvar -> do
modifyTVar' tvar
$ markRateMeter value
values :: IO (Map Text Word64)
values = atomically $ readTVar globalMap >>= traverse readMeter
where
readMeter tvar = do
RateMeter value <- readTVar tvar
writeTVar tvar zeroRateMeter
return value
RateMeter
newtype RateMeter = RateMeter Word64
deriving Show
zeroRateMeter :: RateMeter
zeroRateMeter = RateMeter 0
markRateMeter :: Word64 -> RateMeter -> RateMeter
markRateMeter value (RateMeter value') = RateMeter (value + value')
globalMap :: TVar (Map Text (TVar RateMeter))
globalMap = unsafePerformIO $ newTVarIO mempty
# NOINLINE globalMap #
|
b45f32b5cdee9741574383b657a1b87cd05367b64b716cc8c809bdc805b82e6e | schemedoc/implementation-metadata | scheme88.scm | (title "Scheme 88")
(tagline "re-implementation of Scheme 84 to Ibuki Common Lisp")
(based-on "scheme84")
(academy "Rice University")
| null | https://raw.githubusercontent.com/schemedoc/implementation-metadata/6280d9c4c73833dc5bd1c9bef9b45be6ea5beb68/schemes/scheme88.scm | scheme | (title "Scheme 88")
(tagline "re-implementation of Scheme 84 to Ibuki Common Lisp")
(based-on "scheme84")
(academy "Rice University")
| |
fdf237c14885ad38dd77550dd7756a3e123564ed1b5067de04f1474234e24bbe | haskell/stylish-haskell | FelixTests.hs | --------------------------------------------------------------------------------
| Tests contributed by as part of
-- <-haskell/pull/293>.
# LANGUAGE OverloadedLists #
module Language.Haskell.Stylish.Step.Imports.FelixTests
( tests
) where
--------------------------------------------------------------------------------
import Prelude hiding (lines)
import Test.Framework (Test, testGroup)
import Test.Framework.Providers.HUnit (testCase)
import Test.HUnit (Assertion)
--------------------------------------------------------------------------------
import Language.Haskell.Stylish.Step.Imports
import Language.Haskell.Stylish.Tests.Util (assertSnippet)
--------------------------------------------------------------------------------
tests :: Test
tests = testGroup "Language.Haskell.Stylish.Step.Imports.FelixTests"
[ testCase "Hello world" ex0
, testCase "Sorted simple" ex1
, testCase "Sorted import lists" ex2
, testCase "Sorted import lists and import decls" ex3
, testCase "Import constructor all" ex4
, testCase "Import constructor specific" ex5
, testCase "Import constructor specific sorted" ex6
, testCase "Imports step does not change rest of file" ex7
, testCase "Imports respect groups" ex8
, testCase "Imports respects whitespace between groups" ex9
, testCase "Doesn't add extra space after 'hiding'" ex10
, testCase "Should be able to format symbolic imports" ex11
, testCase "Able to merge equivalent imports" ex12
, testCase "Obeys max columns setting" ex13
, testCase "Obeys max columns setting with two in each" ex14
, testCase "Respects multiple groups" ex15
, testCase "Doesn't delete nullary imports" ex16
]
--------------------------------------------------------------------------------
ex0 :: Assertion
ex0 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A"
]
[ "import A"
, "import B"
]
ex1 :: Assertion
ex1 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A"
, "import C"
, "import qualified A"
, "import qualified B as X"
]
[ "import A"
, "import qualified A"
, "import B"
, "import qualified B as X"
, "import C"
]
ex2 :: Assertion
ex2 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "import A (X)"
, "import qualified A as Y (Y)"
, "import B"
, "import C"
]
ex3 :: Assertion
ex3 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A (X, Z, Y)"
, "import C"
, "import qualified A as A0 (b, Y, a)"
, "import qualified D as D0 (Y, b, a)"
, "import qualified E as E0 (b, a, Y)"
]
[ "import A (X, Y, Z)"
, "import qualified A as A0 (Y, a, b)"
, "import B"
, "import C"
, "import qualified D as D0 (Y, a, b)"
, "import qualified E as E0 (Y, a, b)"
]
ex4 :: Assertion
ex4 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(..), Y)"
]
[ "import A (X, Y, Z (..))"
]
ex5 :: Assertion
ex5 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(Z), Y)"
]
[ "import A (X, Y, Z (Z))"
]
ex6 :: Assertion
ex6 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(X, Z, Y), Y)"
]
[ "import A (X, Y, Z (X, Y, Z))"
]
ex7 :: Assertion
ex7 = assertSnippet (step Nothing felixOptions)
[ "module Foo (tests) where"
, "import B"
, "import A (X, Z, Y)"
, "import C"
, "import qualified A as A0 (b, Y, a)"
, "import qualified D as D0 (Y, b, a)"
, "import qualified E as E0 (b, a, Y)"
, "-- hello"
, "foo :: Int"
, "foo = 1"
]
[ "module Foo (tests) where"
, "import A (X, Y, Z)"
, "import qualified A as A0 (Y, a, b)"
, "import B"
, "import C"
, "import qualified D as D0 (Y, a, b)"
, "import qualified E as E0 (Y, a, b)"
, "-- hello"
, "foo :: Int"
, "foo = 1"
]
ex8 :: Assertion
ex8 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "-- Group divisor"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "import B"
, "-- Group divisor"
, "import A (X)"
, "import qualified A as Y (Y)"
, "import C"
]
ex9 :: Assertion
ex9 = assertSnippet (step Nothing felixOptions)
[ "--------"
, "import B"
, ""
, "-- Group divisor"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "--------"
, "import B"
, ""
, "-- Group divisor"
, "import A (X)"
, "import qualified A as Y (Y)"
, "import C"
]
ex10 :: Assertion
ex10 = assertSnippet (step Nothing felixOptions)
[ "import B hiding (X)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import B hiding (X)"
]
ex11 :: Assertion
ex11 = assertSnippet (step Nothing felixOptions)
[ "import Data.Aeson ((.=))"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Data.Aeson ((.=))"
]
ex12 :: Assertion
ex12 = assertSnippet (step Nothing felixOptions)
[ "import Data.Aeson ((.=))"
, "import Data.Aeson ((.=))"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Data.Aeson ((.=))"
]
ex13 :: Assertion
ex13 = assertSnippet (step (Just 10) felixOptions)
[ "import Foo (A, B, C, D)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Foo (A)"
, "import Foo (B)"
, "import Foo (C)"
, "import Foo (D)"
]
ex14 :: Assertion
ex14 = assertSnippet (step (Just 27) felixOptions)
[ "import Foo (A, B, C, D)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Foo (A, B)"
, "import Foo (C, D)"
]
ex15 :: Assertion
ex15 = assertSnippet (step (Just 100) felixOptions)
[ "module Custom.Prelude"
, " ( LazyByteString"
, " , UUID"
, " , decodeUtf8Lenient"
, " , error"
, " , headMay"
, " , module X"
, " , nextRandomUUID"
, " , onChars"
, " , proxyOf"
, " , show"
, " , showStr"
, " , toLazyByteString"
, " , toStrictByteString"
, " , type (~>)"
, " , uuidToText"
, " ) where"
, ""
, "--------------------------------------------------------------------------------"
, "import Prelude as X hiding ((!!), appendFile, error, foldl, head, putStrLn, readFile, show, tail, take, unlines, unwords, words, writeFile)"
, "import qualified Prelude"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Lens as X ((%~), (&), (.~), (?~), (^.), (^?), _Left, _Right, iat, over, preview, sans, set, to, view)"
, "import Control.Lens.Extras as X (is)"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Applicative as X ((<|>))"
, "import Control.Monad as X ((<=<), (>=>), guard, unless, when)"
, "import Control.Monad.Except as X (ExceptT (..), MonadError (..), liftEither, runExceptT, withExceptT)"
, "import Control.Monad.IO.Unlift as X"
, "import Control.Monad.Reader as X (MonadReader (..), ReaderT (..), asks)"
, "import Control.Monad.Trans.Class as X (MonadTrans (lift))"
, "--------------------------------------------------------------------------------"
]
[ "module Custom.Prelude"
, " ( LazyByteString"
, " , UUID"
, " , decodeUtf8Lenient"
, " , error"
, " , headMay"
, " , module X"
, " , nextRandomUUID"
, " , onChars"
, " , proxyOf"
, " , show"
, " , showStr"
, " , toLazyByteString"
, " , toStrictByteString"
, " , type (~>)"
, " , uuidToText"
, " ) where"
, ""
, "--------------------------------------------------------------------------------"
, "import Prelude as X hiding (appendFile, error, foldl, head, putStrLn, readFile, show, tail, take, unlines, unwords, words, writeFile, (!!))"
, "import qualified Prelude"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Lens as X (_Left, _Right, iat, over, preview, sans, set, to)"
, "import Control.Lens as X (view, (%~), (&), (.~), (?~), (^.), (^?))"
, "import Control.Lens.Extras as X (is)"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Applicative as X ((<|>))"
, "import Control.Monad as X (guard, unless, when, (<=<), (>=>))"
, "import Control.Monad.Except as X (ExceptT (..), MonadError (..), liftEither)"
, "import Control.Monad.Except as X (runExceptT, withExceptT)"
, "import Control.Monad.IO.Unlift as X"
, "import Control.Monad.Reader as X (MonadReader (..), ReaderT (..), asks)"
, "import Control.Monad.Trans.Class as X (MonadTrans (lift))"
, "--------------------------------------------------------------------------------"
]
ex16 :: Assertion
ex16 = assertSnippet (step Nothing felixOptions)
[ "module Foo where"
, ""
, "import B ()"
, "import A ()"
]
[ "module Foo where"
, ""
, "import A ()"
, "import B ()"
]
felixOptions :: Options
felixOptions = defaultOptions
{ listAlign = Repeat
}
| null | https://raw.githubusercontent.com/haskell/stylish-haskell/be4814e4380613567b7055830cd596a580a5ad6d/tests/Language/Haskell/Stylish/Step/Imports/FelixTests.hs | haskell | ------------------------------------------------------------------------------
<-haskell/pull/293>.
------------------------------------------------------------------------------
------------------------------------------------------------------------------
------------------------------------------------------------------------------
------------------------------------------------------------------------------ | | Tests contributed by as part of
# LANGUAGE OverloadedLists #
module Language.Haskell.Stylish.Step.Imports.FelixTests
( tests
) where
import Prelude hiding (lines)
import Test.Framework (Test, testGroup)
import Test.Framework.Providers.HUnit (testCase)
import Test.HUnit (Assertion)
import Language.Haskell.Stylish.Step.Imports
import Language.Haskell.Stylish.Tests.Util (assertSnippet)
tests :: Test
tests = testGroup "Language.Haskell.Stylish.Step.Imports.FelixTests"
[ testCase "Hello world" ex0
, testCase "Sorted simple" ex1
, testCase "Sorted import lists" ex2
, testCase "Sorted import lists and import decls" ex3
, testCase "Import constructor all" ex4
, testCase "Import constructor specific" ex5
, testCase "Import constructor specific sorted" ex6
, testCase "Imports step does not change rest of file" ex7
, testCase "Imports respect groups" ex8
, testCase "Imports respects whitespace between groups" ex9
, testCase "Doesn't add extra space after 'hiding'" ex10
, testCase "Should be able to format symbolic imports" ex11
, testCase "Able to merge equivalent imports" ex12
, testCase "Obeys max columns setting" ex13
, testCase "Obeys max columns setting with two in each" ex14
, testCase "Respects multiple groups" ex15
, testCase "Doesn't delete nullary imports" ex16
]
ex0 :: Assertion
ex0 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A"
]
[ "import A"
, "import B"
]
ex1 :: Assertion
ex1 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A"
, "import C"
, "import qualified A"
, "import qualified B as X"
]
[ "import A"
, "import qualified A"
, "import B"
, "import qualified B as X"
, "import C"
]
ex2 :: Assertion
ex2 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "import A (X)"
, "import qualified A as Y (Y)"
, "import B"
, "import C"
]
ex3 :: Assertion
ex3 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "import A (X, Z, Y)"
, "import C"
, "import qualified A as A0 (b, Y, a)"
, "import qualified D as D0 (Y, b, a)"
, "import qualified E as E0 (b, a, Y)"
]
[ "import A (X, Y, Z)"
, "import qualified A as A0 (Y, a, b)"
, "import B"
, "import C"
, "import qualified D as D0 (Y, a, b)"
, "import qualified E as E0 (Y, a, b)"
]
ex4 :: Assertion
ex4 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(..), Y)"
]
[ "import A (X, Y, Z (..))"
]
ex5 :: Assertion
ex5 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(Z), Y)"
]
[ "import A (X, Y, Z (Z))"
]
ex6 :: Assertion
ex6 = assertSnippet (step Nothing felixOptions)
[ "import A (X, Z(X, Z, Y), Y)"
]
[ "import A (X, Y, Z (X, Y, Z))"
]
ex7 :: Assertion
ex7 = assertSnippet (step Nothing felixOptions)
[ "module Foo (tests) where"
, "import B"
, "import A (X, Z, Y)"
, "import C"
, "import qualified A as A0 (b, Y, a)"
, "import qualified D as D0 (Y, b, a)"
, "import qualified E as E0 (b, a, Y)"
, "-- hello"
, "foo :: Int"
, "foo = 1"
]
[ "module Foo (tests) where"
, "import A (X, Y, Z)"
, "import qualified A as A0 (Y, a, b)"
, "import B"
, "import C"
, "import qualified D as D0 (Y, a, b)"
, "import qualified E as E0 (Y, a, b)"
, "-- hello"
, "foo :: Int"
, "foo = 1"
]
ex8 :: Assertion
ex8 = assertSnippet (step Nothing felixOptions)
[ "import B"
, "-- Group divisor"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "import B"
, "-- Group divisor"
, "import A (X)"
, "import qualified A as Y (Y)"
, "import C"
]
ex9 :: Assertion
ex9 = assertSnippet (step Nothing felixOptions)
[ "--------"
, "import B"
, ""
, "-- Group divisor"
, "import A (X)"
, "import C"
, "import qualified A as Y (Y)"
]
[ "--------"
, "import B"
, ""
, "-- Group divisor"
, "import A (X)"
, "import qualified A as Y (Y)"
, "import C"
]
ex10 :: Assertion
ex10 = assertSnippet (step Nothing felixOptions)
[ "import B hiding (X)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import B hiding (X)"
]
ex11 :: Assertion
ex11 = assertSnippet (step Nothing felixOptions)
[ "import Data.Aeson ((.=))"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Data.Aeson ((.=))"
]
ex12 :: Assertion
ex12 = assertSnippet (step Nothing felixOptions)
[ "import Data.Aeson ((.=))"
, "import Data.Aeson ((.=))"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Data.Aeson ((.=))"
]
ex13 :: Assertion
ex13 = assertSnippet (step (Just 10) felixOptions)
[ "import Foo (A, B, C, D)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Foo (A)"
, "import Foo (B)"
, "import Foo (C)"
, "import Foo (D)"
]
ex14 :: Assertion
ex14 = assertSnippet (step (Just 27) felixOptions)
[ "import Foo (A, B, C, D)"
, "import A hiding (X)"
]
[ "import A hiding (X)"
, "import Foo (A, B)"
, "import Foo (C, D)"
]
ex15 :: Assertion
ex15 = assertSnippet (step (Just 100) felixOptions)
[ "module Custom.Prelude"
, " ( LazyByteString"
, " , UUID"
, " , decodeUtf8Lenient"
, " , error"
, " , headMay"
, " , module X"
, " , nextRandomUUID"
, " , onChars"
, " , proxyOf"
, " , show"
, " , showStr"
, " , toLazyByteString"
, " , toStrictByteString"
, " , type (~>)"
, " , uuidToText"
, " ) where"
, ""
, "--------------------------------------------------------------------------------"
, "import Prelude as X hiding ((!!), appendFile, error, foldl, head, putStrLn, readFile, show, tail, take, unlines, unwords, words, writeFile)"
, "import qualified Prelude"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Lens as X ((%~), (&), (.~), (?~), (^.), (^?), _Left, _Right, iat, over, preview, sans, set, to, view)"
, "import Control.Lens.Extras as X (is)"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Applicative as X ((<|>))"
, "import Control.Monad as X ((<=<), (>=>), guard, unless, when)"
, "import Control.Monad.Except as X (ExceptT (..), MonadError (..), liftEither, runExceptT, withExceptT)"
, "import Control.Monad.IO.Unlift as X"
, "import Control.Monad.Reader as X (MonadReader (..), ReaderT (..), asks)"
, "import Control.Monad.Trans.Class as X (MonadTrans (lift))"
, "--------------------------------------------------------------------------------"
]
[ "module Custom.Prelude"
, " ( LazyByteString"
, " , UUID"
, " , decodeUtf8Lenient"
, " , error"
, " , headMay"
, " , module X"
, " , nextRandomUUID"
, " , onChars"
, " , proxyOf"
, " , show"
, " , showStr"
, " , toLazyByteString"
, " , toStrictByteString"
, " , type (~>)"
, " , uuidToText"
, " ) where"
, ""
, "--------------------------------------------------------------------------------"
, "import Prelude as X hiding (appendFile, error, foldl, head, putStrLn, readFile, show, tail, take, unlines, unwords, words, writeFile, (!!))"
, "import qualified Prelude"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Lens as X (_Left, _Right, iat, over, preview, sans, set, to)"
, "import Control.Lens as X (view, (%~), (&), (.~), (?~), (^.), (^?))"
, "import Control.Lens.Extras as X (is)"
, ""
, "--------------------------------------------------------------------------------"
, "import Control.Applicative as X ((<|>))"
, "import Control.Monad as X (guard, unless, when, (<=<), (>=>))"
, "import Control.Monad.Except as X (ExceptT (..), MonadError (..), liftEither)"
, "import Control.Monad.Except as X (runExceptT, withExceptT)"
, "import Control.Monad.IO.Unlift as X"
, "import Control.Monad.Reader as X (MonadReader (..), ReaderT (..), asks)"
, "import Control.Monad.Trans.Class as X (MonadTrans (lift))"
, "--------------------------------------------------------------------------------"
]
ex16 :: Assertion
ex16 = assertSnippet (step Nothing felixOptions)
[ "module Foo where"
, ""
, "import B ()"
, "import A ()"
]
[ "module Foo where"
, ""
, "import A ()"
, "import B ()"
]
felixOptions :: Options
felixOptions = defaultOptions
{ listAlign = Repeat
}
|
8cc2322bead5c532f2575a91ad5aca1d4afdf86191730f25f11df583b7e2556f | chr15m/sitefox | html.cljs | (ns sitefox.html
"Functions for wrangling HTML and rendering Reagent components into selectors."
(:require
[clojure.test :refer [is]]
[applied-science.js-interop :as j]
[reagent.dom.server :refer [render-to-static-markup] :rename {render-to-static-markup r}]
[sitefox.deps :refer [parse-html]]))
(defn parse "Shorthand for [`node-html-parser`'s `parse` function](-html-parser#usage).
Returns a dom-like document object (HTMLElement) that can be manipulated as in the browser."
[html-string] (parse-html html-string))
(defn $ "Shorthand for CSS style `querySelector` on parsed HTML `element`
such as the `document` returned by the `parse` function or a sub-element."
[element selector] (.querySelector element selector))
(defn $$ "Shorthand for CSS style `querySelectorAll` on parsed HTML `element`
such as the `document` returned by the `parse` function or a sub-element."
[element selector] (.querySelectorAll element selector))
(defn render "Shorthand for Reagent's `render-to-static-markup`." [form] (r form))
(defn render-anything
"Render anything to HTML.
If `source` is a Reagent form, `render-to-static-markup` is used.
If `source` is a jsdom HTMLElement or other type of object `.toString` is used.
If `source` is a fn it will be called with any args that were passed.
If `source` is already a string it is passed through with no change."
{:test (fn []
(let [string-html "<div id=\"thing\">Hi</div>"
el-html (parse string-html)
reagent-html [:div {:id "thing"} "Hi"]]
(is (= (render-anything string-html) string-html))
(is (= (render-anything el-html) string-html))
(is (= (render-anything reagent-html) string-html))))}
[source & args]
(cond
(vector? source) (render source)
(string? source) source
(fn? source) (apply source args)
:else (.toString source)))
(defn select-apply
"Parse `template` if it is a string and then run each of selector-applications on it.
If it is already a `document`-like object it won't be parsed first.
The `selector-applications` should each be an array like: `[selector document-method-name ...arguments]`.
For each one the selector will be run and then the method run on the result, with arguments passed to the method.
The special 'method' `setHTML` expects a Reagent form which will be rendered and `innerHTML` will be set to the result."
{:test (fn []
(let [html-string "<html><body><div id='app'></div><span id=one></span><span id=two></span></body></html>"]
(is (= (select-apply html-string ["#app" :remove])
"<html><body><span id=one></span><span id=two></span></body></html>"))
(is (= (select-apply html-string ["#app" :setHTML [:p "My message."]])
"<html><body><div id='app'><p>My message.</p></div><span id=one></span><span id=two></span></body></html>"))
(is (= (select-apply html-string ["span" :setHTML "In span."] ["#app" :remove])
"<html><body><span id=one>In span.</span><span id=two>In span.</span></body></html>"))
(is (= (select-apply html-string ["span" :setAttribute "data-thing" 42] ["#app" :remove])
"<html><body><span id=\"one\" data-thing=\"42\"></span><span id=\"two\" data-thing=\"42\"></span></body></html>"))
(is html-string)))}
[template & selector-application-pairs]
(let [string-template (= (type template) js/String)
document (if string-template (parse-html template) template)]
(doseq [[selector method-name & args] selector-application-pairs]
(doseq [el ($$ document selector)]
(if (= (keyword method-name) :setHTML)
(j/assoc! el :innerHTML (render (first args)))
(j/apply el method-name (clj->js args)))))
(if string-template
(j/call document :toString)
document)))
(defn render-into
"Render a Reagent component into the chosen element of an HTML document.
* `html-string` is the HTML document to be modified.
* `selector` is a CSS-style selector such as `#app` or `main`.
* `reagent-forms` is a valid Reagent component."
{:test (fn []
(let [html-string "<html><body><div id='app'></div></body></html>"]
(is (render-into html-string "body" [:div "Hello, world!"]))
(is (= (render-into html-string "#app" [:div "Hello, world!"])
"<html><body><div id='app'><div>Hello, world!</div></div></body></html>"))
(is (= (render-into html-string "body" [:main "Hello, world!"])
"<html><body><main>Hello, world!</main></body></html>"))
(is (thrown-with-msg?
js/Error #"HTML element not found"
(render-into html-string "#bad" [:div "Hello, world!"])))))}
[html-string selector reagent-forms]
(let [t (parse-html html-string)
el ($ t selector)
rendered (r reagent-forms)]
(when (not el) (throw (js/Error. (str "HTML element not found: \"" selector "\""))))
(j/call el :set_content rendered)
(.toString t)))
(defn direct-to-template
"Render `selector` `component` Reagent pairs into the HTML `template` string and use the express `res` to send the resulting HTML to the client."
[res template & selector-component-pairs]
(.send res
(reduce
(fn [html [selector component]]
(render-into html selector component))
template
(partition 2 selector-component-pairs))))
| null | https://raw.githubusercontent.com/chr15m/sitefox/39c7d80886d11e2459e56a1b9e2976000cde3be2/src/sitefox/html.cljs | clojure | (ns sitefox.html
"Functions for wrangling HTML and rendering Reagent components into selectors."
(:require
[clojure.test :refer [is]]
[applied-science.js-interop :as j]
[reagent.dom.server :refer [render-to-static-markup] :rename {render-to-static-markup r}]
[sitefox.deps :refer [parse-html]]))
(defn parse "Shorthand for [`node-html-parser`'s `parse` function](-html-parser#usage).
Returns a dom-like document object (HTMLElement) that can be manipulated as in the browser."
[html-string] (parse-html html-string))
(defn $ "Shorthand for CSS style `querySelector` on parsed HTML `element`
such as the `document` returned by the `parse` function or a sub-element."
[element selector] (.querySelector element selector))
(defn $$ "Shorthand for CSS style `querySelectorAll` on parsed HTML `element`
such as the `document` returned by the `parse` function or a sub-element."
[element selector] (.querySelectorAll element selector))
(defn render "Shorthand for Reagent's `render-to-static-markup`." [form] (r form))
(defn render-anything
"Render anything to HTML.
If `source` is a Reagent form, `render-to-static-markup` is used.
If `source` is a jsdom HTMLElement or other type of object `.toString` is used.
If `source` is a fn it will be called with any args that were passed.
If `source` is already a string it is passed through with no change."
{:test (fn []
(let [string-html "<div id=\"thing\">Hi</div>"
el-html (parse string-html)
reagent-html [:div {:id "thing"} "Hi"]]
(is (= (render-anything string-html) string-html))
(is (= (render-anything el-html) string-html))
(is (= (render-anything reagent-html) string-html))))}
[source & args]
(cond
(vector? source) (render source)
(string? source) source
(fn? source) (apply source args)
:else (.toString source)))
(defn select-apply
"Parse `template` if it is a string and then run each of selector-applications on it.
If it is already a `document`-like object it won't be parsed first.
The `selector-applications` should each be an array like: `[selector document-method-name ...arguments]`.
For each one the selector will be run and then the method run on the result, with arguments passed to the method.
The special 'method' `setHTML` expects a Reagent form which will be rendered and `innerHTML` will be set to the result."
{:test (fn []
(let [html-string "<html><body><div id='app'></div><span id=one></span><span id=two></span></body></html>"]
(is (= (select-apply html-string ["#app" :remove])
"<html><body><span id=one></span><span id=two></span></body></html>"))
(is (= (select-apply html-string ["#app" :setHTML [:p "My message."]])
"<html><body><div id='app'><p>My message.</p></div><span id=one></span><span id=two></span></body></html>"))
(is (= (select-apply html-string ["span" :setHTML "In span."] ["#app" :remove])
"<html><body><span id=one>In span.</span><span id=two>In span.</span></body></html>"))
(is (= (select-apply html-string ["span" :setAttribute "data-thing" 42] ["#app" :remove])
"<html><body><span id=\"one\" data-thing=\"42\"></span><span id=\"two\" data-thing=\"42\"></span></body></html>"))
(is html-string)))}
[template & selector-application-pairs]
(let [string-template (= (type template) js/String)
document (if string-template (parse-html template) template)]
(doseq [[selector method-name & args] selector-application-pairs]
(doseq [el ($$ document selector)]
(if (= (keyword method-name) :setHTML)
(j/assoc! el :innerHTML (render (first args)))
(j/apply el method-name (clj->js args)))))
(if string-template
(j/call document :toString)
document)))
(defn render-into
"Render a Reagent component into the chosen element of an HTML document.
* `html-string` is the HTML document to be modified.
* `selector` is a CSS-style selector such as `#app` or `main`.
* `reagent-forms` is a valid Reagent component."
{:test (fn []
(let [html-string "<html><body><div id='app'></div></body></html>"]
(is (render-into html-string "body" [:div "Hello, world!"]))
(is (= (render-into html-string "#app" [:div "Hello, world!"])
"<html><body><div id='app'><div>Hello, world!</div></div></body></html>"))
(is (= (render-into html-string "body" [:main "Hello, world!"])
"<html><body><main>Hello, world!</main></body></html>"))
(is (thrown-with-msg?
js/Error #"HTML element not found"
(render-into html-string "#bad" [:div "Hello, world!"])))))}
[html-string selector reagent-forms]
(let [t (parse-html html-string)
el ($ t selector)
rendered (r reagent-forms)]
(when (not el) (throw (js/Error. (str "HTML element not found: \"" selector "\""))))
(j/call el :set_content rendered)
(.toString t)))
(defn direct-to-template
"Render `selector` `component` Reagent pairs into the HTML `template` string and use the express `res` to send the resulting HTML to the client."
[res template & selector-component-pairs]
(.send res
(reduce
(fn [html [selector component]]
(render-into html selector component))
template
(partition 2 selector-component-pairs))))
| |
f735f10c0183a97838e7ba999aa26f4904a7ddae1c694282c46b78965b4ccb73 | Eduap-com/WordMat | zungql.lisp | ;;; Compiled by f2cl version:
( " f2cl1.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl2.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl3.l , v 96616d88fb7e 2008/02/22 22:19:34 rtoy $ "
" f2cl4.l , v 96616d88fb7e 2008/02/22 22:19:34 rtoy $ "
" f2cl5.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl6.l , v 1d5cbacbb977 2008/08/24 00:56:27 rtoy $ "
" macros.l , v 1409c1352feb 2013/03/24 20:44:50 toy $ " )
;;; Using Lisp CMU Common Lisp snapshot-2013-11 (20E Unicode)
;;;
;;; Options: ((:prune-labels nil) (:auto-save t) (:relaxed-array-decls t)
;;; (:coerce-assigns :as-needed) (:array-type ':array)
;;; (:array-slicing t) (:declare-common nil)
;;; (:float-format single-float))
(in-package "LAPACK")
(let* ((zero (f2cl-lib:cmplx 0.0d0 0.0d0)))
(declare (type (f2cl-lib:complex16) zero) (ignorable zero))
(defun zungql (m n k a lda tau work lwork info)
(declare (type (array f2cl-lib:complex16 (*)) work tau a)
(type (f2cl-lib:integer4) info lwork lda k n m))
(f2cl-lib:with-multi-array-data
((a f2cl-lib:complex16 a-%data% a-%offset%)
(tau f2cl-lib:complex16 tau-%data% tau-%offset%)
(work f2cl-lib:complex16 work-%data% work-%offset%))
(prog ((i 0) (ib 0) (iinfo 0) (iws 0) (j 0) (kk 0) (l 0) (ldwork 0)
(lwkopt 0) (nb 0) (nbmin 0) (nx 0) (lquery nil))
(declare (type (f2cl-lib:integer4) i ib iinfo iws j kk l ldwork lwkopt
nb nbmin nx)
(type f2cl-lib:logical lquery))
(setf info 0)
(setf lquery (coerce (= lwork -1) 'f2cl-lib:logical))
(cond
((< m 0)
(setf info -1))
((or (< n 0) (> n m))
(setf info -2))
((or (< k 0) (> k n))
(setf info -3))
((< lda (max (the f2cl-lib:integer4 1) (the f2cl-lib:integer4 m)))
(setf info -5)))
(cond
((= info 0)
(cond
((= n 0)
(setf lwkopt 1))
(t
(setf nb (ilaenv 1 "ZUNGQL" " " m n k -1))
(setf lwkopt (f2cl-lib:int-mul n nb))))
(setf (f2cl-lib:fref work-%data% (1) ((1 *)) work-%offset%)
(coerce lwkopt 'f2cl-lib:complex16))
(cond
((and
(< lwork
(max (the f2cl-lib:integer4 1) (the f2cl-lib:integer4 n)))
(not lquery))
(setf info -8)))))
(cond
((/= info 0)
(xerbla "ZUNGQL" (f2cl-lib:int-sub info))
(go end_label))
(lquery
(go end_label)))
(cond
((<= n 0)
(go end_label)))
(setf nbmin 2)
(setf nx 0)
(setf iws n)
(cond
((and (> nb 1) (< nb k))
(setf nx
(max (the f2cl-lib:integer4 0)
(the f2cl-lib:integer4
(ilaenv 3 "ZUNGQL" " " m n k -1))))
(cond
((< nx k)
(setf ldwork n)
(setf iws (f2cl-lib:int-mul ldwork nb))
(cond
((< lwork iws)
(setf nb (the f2cl-lib:integer4 (truncate lwork ldwork)))
(setf nbmin
(max (the f2cl-lib:integer4 2)
(the f2cl-lib:integer4
(ilaenv 2 "ZUNGQL" " " m n k -1))))))))))
(cond
((and (>= nb nbmin) (< nb k) (< nx k))
(setf kk
(min k
(*
(the f2cl-lib:integer4
(truncate (- (+ (- k nx) nb) 1) nb))
nb)))
(f2cl-lib:fdo (j 1 (f2cl-lib:int-add j 1))
((> j (f2cl-lib:int-add n (f2cl-lib:int-sub kk))) nil)
(tagbody
(f2cl-lib:fdo (i (f2cl-lib:int-add m (f2cl-lib:int-sub kk) 1)
(f2cl-lib:int-add i 1))
((> i m) nil)
(tagbody
(setf (f2cl-lib:fref a-%data%
(i j)
((1 lda) (1 *))
a-%offset%)
zero)
label10))
label20)))
(t
(setf kk 0)))
(multiple-value-bind (var-0 var-1 var-2 var-3 var-4 var-5 var-6 var-7)
(zung2l (f2cl-lib:int-sub m kk) (f2cl-lib:int-sub n kk)
(f2cl-lib:int-sub k kk) a lda tau work iinfo)
(declare (ignore var-0 var-1 var-2 var-3 var-5 var-6))
(setf lda var-4)
(setf iinfo var-7))
(cond
((> kk 0)
(f2cl-lib:fdo (i (f2cl-lib:int-add k (f2cl-lib:int-sub kk) 1)
(f2cl-lib:int-add i nb))
((> i k) nil)
(tagbody
(setf ib
(min (the f2cl-lib:integer4 nb)
(the f2cl-lib:integer4
(f2cl-lib:int-add (f2cl-lib:int-sub k i) 1))))
(cond
((> (f2cl-lib:int-add n (f2cl-lib:int-sub k) i) 1)
(zlarft "Backward" "Columnwise"
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda
(f2cl-lib:array-slice tau-%data%
f2cl-lib:complex16
(i)
((1 *))
tau-%offset%)
work ldwork)
(zlarfb "Left" "No transpose" "Backward" "Columnwise"
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub n k) i)
1)
ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda work ldwork a lda
(f2cl-lib:array-slice work-%data%
f2cl-lib:complex16
((+ ib 1))
((1 *))
work-%offset%)
ldwork)))
(multiple-value-bind
(var-0 var-1 var-2 var-3 var-4 var-5 var-6 var-7)
(zung2l
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
ib ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda
(f2cl-lib:array-slice tau-%data%
f2cl-lib:complex16
(i)
((1 *))
tau-%offset%)
work iinfo)
(declare (ignore var-0 var-1 var-2 var-3 var-5 var-6))
(setf lda var-4)
(setf iinfo var-7))
(f2cl-lib:fdo (j (f2cl-lib:int-add n (f2cl-lib:int-sub k) i)
(f2cl-lib:int-add j 1))
((> j
(f2cl-lib:int-add n
(f2cl-lib:int-sub k)
i
ib
(f2cl-lib:int-sub 1)))
nil)
(tagbody
(f2cl-lib:fdo (l
(f2cl-lib:int-add m
(f2cl-lib:int-sub k)
i
ib)
(f2cl-lib:int-add l 1))
((> l m) nil)
(tagbody
(setf (f2cl-lib:fref a-%data%
(l j)
((1 lda) (1 *))
a-%offset%)
zero)
label30))
label40))
label50))))
(setf (f2cl-lib:fref work-%data% (1) ((1 *)) work-%offset%)
(coerce iws 'f2cl-lib:complex16))
(go end_label)
end_label
(return (values nil nil nil nil lda nil nil nil info))))))
(in-package #-gcl #:cl-user #+gcl "CL-USER")
#+#.(cl:if (cl:find-package '#:f2cl) '(and) '(or))
(eval-when (:load-toplevel :compile-toplevel :execute)
(setf (gethash 'fortran-to-lisp::zungql
fortran-to-lisp::*f2cl-function-info*)
(fortran-to-lisp::make-f2cl-finfo
:arg-types '((fortran-to-lisp::integer4) (fortran-to-lisp::integer4)
(fortran-to-lisp::integer4)
(array fortran-to-lisp::complex16 (*))
(fortran-to-lisp::integer4)
(array fortran-to-lisp::complex16 (*))
(array fortran-to-lisp::complex16 (*))
(fortran-to-lisp::integer4)
(fortran-to-lisp::integer4))
:return-values '(nil nil nil nil fortran-to-lisp::lda nil nil nil
fortran-to-lisp::info)
:calls '(fortran-to-lisp::zlarfb fortran-to-lisp::zlarft
fortran-to-lisp::zung2l fortran-to-lisp::xerbla
fortran-to-lisp::ilaenv))))
| null | https://raw.githubusercontent.com/Eduap-com/WordMat/83c9336770067f54431cc42c7147dc6ed640a339/Windows/ExternalPrograms/maxima-5.45.1/share/maxima/5.45.1/share/lapack/lapack/zungql.lisp | lisp | Compiled by f2cl version:
Using Lisp CMU Common Lisp snapshot-2013-11 (20E Unicode)
Options: ((:prune-labels nil) (:auto-save t) (:relaxed-array-decls t)
(:coerce-assigns :as-needed) (:array-type ':array)
(:array-slicing t) (:declare-common nil)
(:float-format single-float)) | ( " f2cl1.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl2.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl3.l , v 96616d88fb7e 2008/02/22 22:19:34 rtoy $ "
" f2cl4.l , v 96616d88fb7e 2008/02/22 22:19:34 rtoy $ "
" f2cl5.l , v 95098eb54f13 2013/04/01 00:45:16 toy $ "
" f2cl6.l , v 1d5cbacbb977 2008/08/24 00:56:27 rtoy $ "
" macros.l , v 1409c1352feb 2013/03/24 20:44:50 toy $ " )
(in-package "LAPACK")
(let* ((zero (f2cl-lib:cmplx 0.0d0 0.0d0)))
(declare (type (f2cl-lib:complex16) zero) (ignorable zero))
(defun zungql (m n k a lda tau work lwork info)
(declare (type (array f2cl-lib:complex16 (*)) work tau a)
(type (f2cl-lib:integer4) info lwork lda k n m))
(f2cl-lib:with-multi-array-data
((a f2cl-lib:complex16 a-%data% a-%offset%)
(tau f2cl-lib:complex16 tau-%data% tau-%offset%)
(work f2cl-lib:complex16 work-%data% work-%offset%))
(prog ((i 0) (ib 0) (iinfo 0) (iws 0) (j 0) (kk 0) (l 0) (ldwork 0)
(lwkopt 0) (nb 0) (nbmin 0) (nx 0) (lquery nil))
(declare (type (f2cl-lib:integer4) i ib iinfo iws j kk l ldwork lwkopt
nb nbmin nx)
(type f2cl-lib:logical lquery))
(setf info 0)
(setf lquery (coerce (= lwork -1) 'f2cl-lib:logical))
(cond
((< m 0)
(setf info -1))
((or (< n 0) (> n m))
(setf info -2))
((or (< k 0) (> k n))
(setf info -3))
((< lda (max (the f2cl-lib:integer4 1) (the f2cl-lib:integer4 m)))
(setf info -5)))
(cond
((= info 0)
(cond
((= n 0)
(setf lwkopt 1))
(t
(setf nb (ilaenv 1 "ZUNGQL" " " m n k -1))
(setf lwkopt (f2cl-lib:int-mul n nb))))
(setf (f2cl-lib:fref work-%data% (1) ((1 *)) work-%offset%)
(coerce lwkopt 'f2cl-lib:complex16))
(cond
((and
(< lwork
(max (the f2cl-lib:integer4 1) (the f2cl-lib:integer4 n)))
(not lquery))
(setf info -8)))))
(cond
((/= info 0)
(xerbla "ZUNGQL" (f2cl-lib:int-sub info))
(go end_label))
(lquery
(go end_label)))
(cond
((<= n 0)
(go end_label)))
(setf nbmin 2)
(setf nx 0)
(setf iws n)
(cond
((and (> nb 1) (< nb k))
(setf nx
(max (the f2cl-lib:integer4 0)
(the f2cl-lib:integer4
(ilaenv 3 "ZUNGQL" " " m n k -1))))
(cond
((< nx k)
(setf ldwork n)
(setf iws (f2cl-lib:int-mul ldwork nb))
(cond
((< lwork iws)
(setf nb (the f2cl-lib:integer4 (truncate lwork ldwork)))
(setf nbmin
(max (the f2cl-lib:integer4 2)
(the f2cl-lib:integer4
(ilaenv 2 "ZUNGQL" " " m n k -1))))))))))
(cond
((and (>= nb nbmin) (< nb k) (< nx k))
(setf kk
(min k
(*
(the f2cl-lib:integer4
(truncate (- (+ (- k nx) nb) 1) nb))
nb)))
(f2cl-lib:fdo (j 1 (f2cl-lib:int-add j 1))
((> j (f2cl-lib:int-add n (f2cl-lib:int-sub kk))) nil)
(tagbody
(f2cl-lib:fdo (i (f2cl-lib:int-add m (f2cl-lib:int-sub kk) 1)
(f2cl-lib:int-add i 1))
((> i m) nil)
(tagbody
(setf (f2cl-lib:fref a-%data%
(i j)
((1 lda) (1 *))
a-%offset%)
zero)
label10))
label20)))
(t
(setf kk 0)))
(multiple-value-bind (var-0 var-1 var-2 var-3 var-4 var-5 var-6 var-7)
(zung2l (f2cl-lib:int-sub m kk) (f2cl-lib:int-sub n kk)
(f2cl-lib:int-sub k kk) a lda tau work iinfo)
(declare (ignore var-0 var-1 var-2 var-3 var-5 var-6))
(setf lda var-4)
(setf iinfo var-7))
(cond
((> kk 0)
(f2cl-lib:fdo (i (f2cl-lib:int-add k (f2cl-lib:int-sub kk) 1)
(f2cl-lib:int-add i nb))
((> i k) nil)
(tagbody
(setf ib
(min (the f2cl-lib:integer4 nb)
(the f2cl-lib:integer4
(f2cl-lib:int-add (f2cl-lib:int-sub k i) 1))))
(cond
((> (f2cl-lib:int-add n (f2cl-lib:int-sub k) i) 1)
(zlarft "Backward" "Columnwise"
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda
(f2cl-lib:array-slice tau-%data%
f2cl-lib:complex16
(i)
((1 *))
tau-%offset%)
work ldwork)
(zlarfb "Left" "No transpose" "Backward" "Columnwise"
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub n k) i)
1)
ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda work ldwork a lda
(f2cl-lib:array-slice work-%data%
f2cl-lib:complex16
((+ ib 1))
((1 *))
work-%offset%)
ldwork)))
(multiple-value-bind
(var-0 var-1 var-2 var-3 var-4 var-5 var-6 var-7)
(zung2l
(f2cl-lib:int-sub
(f2cl-lib:int-add (f2cl-lib:int-sub m k) i ib)
1)
ib ib
(f2cl-lib:array-slice a-%data%
f2cl-lib:complex16
(1
(f2cl-lib:int-add
(f2cl-lib:int-sub n k)
i))
((1 lda) (1 *))
a-%offset%)
lda
(f2cl-lib:array-slice tau-%data%
f2cl-lib:complex16
(i)
((1 *))
tau-%offset%)
work iinfo)
(declare (ignore var-0 var-1 var-2 var-3 var-5 var-6))
(setf lda var-4)
(setf iinfo var-7))
(f2cl-lib:fdo (j (f2cl-lib:int-add n (f2cl-lib:int-sub k) i)
(f2cl-lib:int-add j 1))
((> j
(f2cl-lib:int-add n
(f2cl-lib:int-sub k)
i
ib
(f2cl-lib:int-sub 1)))
nil)
(tagbody
(f2cl-lib:fdo (l
(f2cl-lib:int-add m
(f2cl-lib:int-sub k)
i
ib)
(f2cl-lib:int-add l 1))
((> l m) nil)
(tagbody
(setf (f2cl-lib:fref a-%data%
(l j)
((1 lda) (1 *))
a-%offset%)
zero)
label30))
label40))
label50))))
(setf (f2cl-lib:fref work-%data% (1) ((1 *)) work-%offset%)
(coerce iws 'f2cl-lib:complex16))
(go end_label)
end_label
(return (values nil nil nil nil lda nil nil nil info))))))
(in-package #-gcl #:cl-user #+gcl "CL-USER")
#+#.(cl:if (cl:find-package '#:f2cl) '(and) '(or))
(eval-when (:load-toplevel :compile-toplevel :execute)
(setf (gethash 'fortran-to-lisp::zungql
fortran-to-lisp::*f2cl-function-info*)
(fortran-to-lisp::make-f2cl-finfo
:arg-types '((fortran-to-lisp::integer4) (fortran-to-lisp::integer4)
(fortran-to-lisp::integer4)
(array fortran-to-lisp::complex16 (*))
(fortran-to-lisp::integer4)
(array fortran-to-lisp::complex16 (*))
(array fortran-to-lisp::complex16 (*))
(fortran-to-lisp::integer4)
(fortran-to-lisp::integer4))
:return-values '(nil nil nil nil fortran-to-lisp::lda nil nil nil
fortran-to-lisp::info)
:calls '(fortran-to-lisp::zlarfb fortran-to-lisp::zlarft
fortran-to-lisp::zung2l fortran-to-lisp::xerbla
fortran-to-lisp::ilaenv))))
|
7fe917257622b7a8863bf921f03c8eed82af7d2830a4b8d2a26f6bbb787e6db1 | xxyzz/SICP | Exercise_2_41.rkt | #lang racket/base
(define (accumulate op initial sequence)
(if (null? sequence)
initial
(op (car sequence)
(accumulate op initial (cdr sequence)))))
(define (enumerate-interval low high)
(if (> low high)
null
(cons low (enumerate-interval (+ low 1) high))))
(define (flatmap proc seq)
(accumulate append null (map proc seq)))
(define (unique-triples n)
(flatmap (lambda (i)
(flatmap (lambda (j)
(map (lambda (k) (list i j k))
(enumerate-interval 1 (sub1 j))))
(enumerate-interval 1 (sub1 i))))
(enumerate-interval 1 n)))
(define (equal-sum-pairs n s)
(filter (lambda (triple)
(= (accumulate + 0 triple) s))
(unique-triples n)))
(unique-triples 4)
' ( ( 3 2 1 ) ( 4 2 1 ) ( 4 3 1 ) ( 4 3 2 ) )
(equal-sum-pairs 4 6)
' ( ( 3 2 1 ) )
| null | https://raw.githubusercontent.com/xxyzz/SICP/e26aea1c58fd896297dbf5406f7fcd32bb4f8f78/2_Building_Abstractions_with_Data/2.2_Hierarchical_Data_and_the_Closure_Property/Exercise_2_41.rkt | racket | #lang racket/base
(define (accumulate op initial sequence)
(if (null? sequence)
initial
(op (car sequence)
(accumulate op initial (cdr sequence)))))
(define (enumerate-interval low high)
(if (> low high)
null
(cons low (enumerate-interval (+ low 1) high))))
(define (flatmap proc seq)
(accumulate append null (map proc seq)))
(define (unique-triples n)
(flatmap (lambda (i)
(flatmap (lambda (j)
(map (lambda (k) (list i j k))
(enumerate-interval 1 (sub1 j))))
(enumerate-interval 1 (sub1 i))))
(enumerate-interval 1 n)))
(define (equal-sum-pairs n s)
(filter (lambda (triple)
(= (accumulate + 0 triple) s))
(unique-triples n)))
(unique-triples 4)
' ( ( 3 2 1 ) ( 4 2 1 ) ( 4 3 1 ) ( 4 3 2 ) )
(equal-sum-pairs 4 6)
' ( ( 3 2 1 ) )
| |
139fd68a1f02b14c36045b322ee3171628dc7bcdbaa692c37c9c259008677350 | kmi/irs | swift-services-datatypes.lisp | Mode : Lisp ; Package :
File created in WebOnto
(in-package "OCML")
(in-ontology swift-services-datatypes)
2 ) ChangeDetailsOfCitizenInterfaceOut -- SWIFTDB
(def-class change-details-of-citizen-request-type ()
((has-address-key :type integer)
(has-new-address :type new-address)))
(def-class new-address ()
((has-postcode :type string)
(has-premise-number :type string)
(has-premise-name :type string)
(has-street :type string)
(has-locality :type string)
(has-town :type string)))
(def-class change-details-of-citizen-response-type ()
((has-response :type string)))
3 ) CitizenAddressByCodeInterfaceOut -- SWIFTDB
(def-class citizen-address-by-citizen-code-request-type ()
((has-citizen-key :type integer)))
(def-class citizen-address-response-type ()
((has-citizen-address :type citizen-address)))
(def-class citizen-address ()
((has-address-key :type integer)
(has-postcode :type post-code-string)
(has-premise-number :type integer) ;;; type positive-integer ??
(has-premise-name :type string)
(has-street :type string)
(has-locality :type string)
(has-town :type string)))
postcode constraint : its length is 8 characters maximum
:constraint (< (length ?x) 9))
4 ) CitizenAddressByNameInterfaceOut -- SWIFTDB
(def-class citizen-address-by-name-request-type ()
((has-family-name :type string)
(has-first-names :type string)))
(def-class citizen-address-response-type ()
already defined in 3 )
5 ) CitizenDataByCitizenCodeInterfaceOut -- SWIFTDB
(def-class citizen-data-by-citizen-code-request-type ()
((has-citizen-key :type integer)))
(def-class citizen-data-response-type ()
((has-citizen-data :type citizen-data)))
(def-class citizen-data ()
((has-citizen-key :type integer)
(has-gender-code :type integer)
(has-marital-status-code :type integer)
(has-title-code :type integer)
(has-ethnicity-code :type integer)
(has-family-name :type string)
(has-speech-impairment :type boolean) ;;; value defined as boolean in the DB, and as a string with length=1 and Y or N values in WSDL description
(has-hearing-impairment :type boolean) ;;; value defined as boolean in the DB, and as a string with length=1 and Y or N values in WSDL description
(has-first-names :type string)
(has-initials :type string)
(has-date-of-birth :type date) ;;; class date defined below
(has-date-of-death :type date)
(has-approx-date-of-birth :type date)
(has-age :type integer)
(has-expected-date-of-birth :type date)
))
(def-class date ()
((has-date :type string))
)
6 ) CitizenDataByNameInterfaceOut -- SWIFTDB
(def-class citizen-data-by-name-request-type ()
((has-family-name :type string)
(has-first-names :type string)))
(def-class citizen-data-response-type ()
already defined in 5 )
7 ) CreateCitizenRecordInterfaceOut -- SWIFTDB
(def-class create-citizen-record-response-type ()
((has-response :type string)))
(def-class create-citizen-record-request-type ()
already defined in 5 )
8) -- SWIFTDB
(def-class create-new-assessment-for-client-request-out ()
((has-citizen-key :type integer)))
(def-class create-new-assessment-for-client-response-type ()
((has-assessment-key :type integer)))
9 ) EthnicityByCodeInterfaceOut -- SWIFTDB
(def-class ethnicity-response-type ()
((has-ethnicity :type ethnicity)))
(def-class ethnicity ()
((has-ethnicity-code :type integer)
(has-ethnicity-display :type string)
(has-ethnicity-description :type string)))
(def-class ethnicity-by-code-request-type ()
((has-ethnicity-code :type integer)))
10 ) FinalizeServiceInterfaceOut -- SWIFTDB
(def-class finalize-service-response-type ()
((has-response :type string)))
(def-class finalize-service-request-type ()
((has-citizen-key :type integer)
(has-care-item-code :type integer)))
11 ) GenderByCodeInterfaceOut -- SWIFTDB
(def-class gender-response-type ()
((has-gender :type gender)))
(def-class gender ()
((has-gender-code :type integer)
(has-gender-description :type string)
(has-gender-display :type string)))
(def-class gender-by-code-request-type ()
((has-gender-code :type integer)))
12 ) NotifyCitizenDeceasedInterfaceOut -- SWIFT
(def-class notify-citizen-deceased-response-type ()
((has-response :type string)))
(def-class notify-citizen-deceased-request-type ()
((has-citizen-key :type integer)
(has-date-of-death :type date))) ;;; type date already defined as string))
13 ) OrderServiceForAssessmentInterfaceOut -- SWIFT
(def-class order-service-for-assessment-request-type ()
((has-referral-key :type integer)
(has-care-item-code :type integer)))
(def-class order-service-for-assessment-response-type ()
((has-response :type string)))
16 ) TitleByCodeInterfaceOut -- SWIFT
(def-class title-response-type ()
((has-title :type title)))
(def-class title ()
((has-title-code :type integer)
(has-title-display :type string)
(has-title-description :type string)))
(def-class title-by-code-request-type ()
((has-title-code :type integer)))
| null | https://raw.githubusercontent.com/kmi/irs/e1b8d696f61c6b6878c0e92d993ed549fee6e7dd/ontologies/domains/swift-services-datatypes/swift-services-datatypes.lisp | lisp | Package :
type positive-integer ??
value defined as boolean in the DB, and as a string with length=1 and Y or N values in WSDL description
value defined as boolean in the DB, and as a string with length=1 and Y or N values in WSDL description
class date defined below
type date already defined as string)) |
File created in WebOnto
(in-package "OCML")
(in-ontology swift-services-datatypes)
2 ) ChangeDetailsOfCitizenInterfaceOut -- SWIFTDB
(def-class change-details-of-citizen-request-type ()
((has-address-key :type integer)
(has-new-address :type new-address)))
(def-class new-address ()
((has-postcode :type string)
(has-premise-number :type string)
(has-premise-name :type string)
(has-street :type string)
(has-locality :type string)
(has-town :type string)))
(def-class change-details-of-citizen-response-type ()
((has-response :type string)))
3 ) CitizenAddressByCodeInterfaceOut -- SWIFTDB
(def-class citizen-address-by-citizen-code-request-type ()
((has-citizen-key :type integer)))
(def-class citizen-address-response-type ()
((has-citizen-address :type citizen-address)))
(def-class citizen-address ()
((has-address-key :type integer)
(has-postcode :type post-code-string)
(has-premise-name :type string)
(has-street :type string)
(has-locality :type string)
(has-town :type string)))
postcode constraint : its length is 8 characters maximum
:constraint (< (length ?x) 9))
4 ) CitizenAddressByNameInterfaceOut -- SWIFTDB
(def-class citizen-address-by-name-request-type ()
((has-family-name :type string)
(has-first-names :type string)))
(def-class citizen-address-response-type ()
already defined in 3 )
5 ) CitizenDataByCitizenCodeInterfaceOut -- SWIFTDB
(def-class citizen-data-by-citizen-code-request-type ()
((has-citizen-key :type integer)))
(def-class citizen-data-response-type ()
((has-citizen-data :type citizen-data)))
(def-class citizen-data ()
((has-citizen-key :type integer)
(has-gender-code :type integer)
(has-marital-status-code :type integer)
(has-title-code :type integer)
(has-ethnicity-code :type integer)
(has-family-name :type string)
(has-first-names :type string)
(has-initials :type string)
(has-date-of-death :type date)
(has-approx-date-of-birth :type date)
(has-age :type integer)
(has-expected-date-of-birth :type date)
))
(def-class date ()
((has-date :type string))
)
6 ) CitizenDataByNameInterfaceOut -- SWIFTDB
(def-class citizen-data-by-name-request-type ()
((has-family-name :type string)
(has-first-names :type string)))
(def-class citizen-data-response-type ()
already defined in 5 )
7 ) CreateCitizenRecordInterfaceOut -- SWIFTDB
(def-class create-citizen-record-response-type ()
((has-response :type string)))
(def-class create-citizen-record-request-type ()
already defined in 5 )
8) -- SWIFTDB
(def-class create-new-assessment-for-client-request-out ()
((has-citizen-key :type integer)))
(def-class create-new-assessment-for-client-response-type ()
((has-assessment-key :type integer)))
9 ) EthnicityByCodeInterfaceOut -- SWIFTDB
(def-class ethnicity-response-type ()
((has-ethnicity :type ethnicity)))
(def-class ethnicity ()
((has-ethnicity-code :type integer)
(has-ethnicity-display :type string)
(has-ethnicity-description :type string)))
(def-class ethnicity-by-code-request-type ()
((has-ethnicity-code :type integer)))
10 ) FinalizeServiceInterfaceOut -- SWIFTDB
(def-class finalize-service-response-type ()
((has-response :type string)))
(def-class finalize-service-request-type ()
((has-citizen-key :type integer)
(has-care-item-code :type integer)))
11 ) GenderByCodeInterfaceOut -- SWIFTDB
(def-class gender-response-type ()
((has-gender :type gender)))
(def-class gender ()
((has-gender-code :type integer)
(has-gender-description :type string)
(has-gender-display :type string)))
(def-class gender-by-code-request-type ()
((has-gender-code :type integer)))
12 ) NotifyCitizenDeceasedInterfaceOut -- SWIFT
(def-class notify-citizen-deceased-response-type ()
((has-response :type string)))
(def-class notify-citizen-deceased-request-type ()
((has-citizen-key :type integer)
13 ) OrderServiceForAssessmentInterfaceOut -- SWIFT
(def-class order-service-for-assessment-request-type ()
((has-referral-key :type integer)
(has-care-item-code :type integer)))
(def-class order-service-for-assessment-response-type ()
((has-response :type string)))
16 ) TitleByCodeInterfaceOut -- SWIFT
(def-class title-response-type ()
((has-title :type title)))
(def-class title ()
((has-title-code :type integer)
(has-title-display :type string)
(has-title-description :type string)))
(def-class title-by-code-request-type ()
((has-title-code :type integer)))
|
7aac074659d47048f7038b77d82dab88ebbd33d5fcc3f149bea88690d2df1a36 | protosens/monorepo.cljc | print.clj | (ns protosens.bb.help.print
"Default printers.
Used by [[protosens.bb.help/print]] unless overwritten by the user."
(:require [clojure.string :as string]))
;;;;;;;;;;
(defn no-task
"When no task has been provided as input.
Prints available tasks (documented ones)."
[data]
(println "These tasks have extra documentation:")
(println)
(doseq [task (sort-by string/lower-case
(keys (data :task+)))]
(println (str " "
task))))
(defn no-task+
"When the `bb.edn` file does not have any task."
[_data]
(println "No tasks declared in that BB file."))
(defn not-found
"When the given task does not exist.
Also prints `:no-task` ([[no-task]] by default)."
[data]
(println "Task not found.")
(println)
((get-in data
[:printer+
:no-task])
data))
(defn task
"When the given task has been found.
Prints its docstring and `:protosens/doc` (if any)."
[data]
(when-some [docstring (data :doc)]
(println docstring)
(println)
(println "---")
(println))
(println (or (data :body)
"No extra documentation found for this task")))
(defn undocumented-task+
"Prints undocumented tasks."
[data]
(if-some [task+ (not-empty (data :task+))]
(do
(println "These tasks do not have extra documentation:")
(println)
(doseq [task task+]
(println (str " "
task))))
(println "All tasks have extra documentation.")))
| null | https://raw.githubusercontent.com/protosens/monorepo.cljc/1c7cc00cbfb7c7484521146bf998438d2867552f/module/bb.help/src/main/clj/protosens/bb/help/print.clj | clojure | (ns protosens.bb.help.print
"Default printers.
Used by [[protosens.bb.help/print]] unless overwritten by the user."
(:require [clojure.string :as string]))
(defn no-task
"When no task has been provided as input.
Prints available tasks (documented ones)."
[data]
(println "These tasks have extra documentation:")
(println)
(doseq [task (sort-by string/lower-case
(keys (data :task+)))]
(println (str " "
task))))
(defn no-task+
"When the `bb.edn` file does not have any task."
[_data]
(println "No tasks declared in that BB file."))
(defn not-found
"When the given task does not exist.
Also prints `:no-task` ([[no-task]] by default)."
[data]
(println "Task not found.")
(println)
((get-in data
[:printer+
:no-task])
data))
(defn task
"When the given task has been found.
Prints its docstring and `:protosens/doc` (if any)."
[data]
(when-some [docstring (data :doc)]
(println docstring)
(println)
(println "---")
(println))
(println (or (data :body)
"No extra documentation found for this task")))
(defn undocumented-task+
"Prints undocumented tasks."
[data]
(if-some [task+ (not-empty (data :task+))]
(do
(println "These tasks do not have extra documentation:")
(println)
(doseq [task task+]
(println (str " "
task))))
(println "All tasks have extra documentation.")))
| |
a740623c7c9c8f346a94ebb0dd00f7175ff5c7b9be0ae68977e14461db8905c4 | ml-in-barcelona/server-reason-react | belt_List.ml | type 'a t = 'a list
module A = Belt_Array
external mutableCell : 'a -> 'a t -> 'a t = "belt_makemutablelist"
let unsafeMutateTail a b = Obj.set_field (Obj.repr a) 1 (Obj.repr b)
let unsafeTail a = Obj.obj (Obj.field (Obj.repr a) 1)
let head x = match x with [] -> None | x :: _ -> Some x
let headExn x =
match x with
| [] -> Js.Exn.raiseError "File \"\", line 94, characters 12-18"
| x :: _ -> x
let tail x = match x with [] -> None | _ :: xs -> Some xs
let tailExn x =
match x with
| [] -> Js.Exn.raiseError "File \"\", line 104, characters 12-18"
| _ :: t -> t
let add xs x = x :: xs
let rec nthAux x n =
match x with
| h :: t -> if n = 0 then Some h else nthAux t (n - 1)
| _ -> None
let rec nthAuxAssert x n =
match x with
| h :: t -> if n = 0 then h else nthAuxAssert t (n - 1)
| _ -> Js.Exn.raiseError "File \"\", line 118, characters 11-17"
let get x n = if n < 0 then None else nthAux x n
let getExn x n =
if n < 0 then Js.Exn.raiseError "File \"\", line 125, characters 18-24"
else nthAuxAssert x n
let rec partitionAux p cell precX precY =
match cell with
| [] -> ()
| h :: t ->
let next = mutableCell h [] in
if p h then (
unsafeMutateTail precX next;
partitionAux p t next precY)
else (
unsafeMutateTail precY next;
partitionAux p t precX next)
let rec splitAux cell precX precY =
match cell with
| [] -> ()
| (a, b) :: t ->
let nextA = mutableCell a [] in
let nextB = mutableCell b [] in
unsafeMutateTail precX nextA;
unsafeMutateTail precY nextB;
splitAux t nextA nextB
let rec copyAuxCont cellX prec =
match cellX with
| [] -> prec
| h :: t ->
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxCont t next
let rec copyAuxWitFilter f cellX prec =
match cellX with
| [] -> ()
| h :: t ->
if f h then (
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxWitFilter f t next)
else copyAuxWitFilter f t prec
let rec copyAuxWitFilterMap f cellX prec =
match cellX with
| [] -> ()
| h :: t -> (
match f h with
| Some h ->
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxWitFilterMap f t next
| None -> copyAuxWitFilterMap f t prec)
let rec removeAssocAuxWithMap cellX x prec f =
match cellX with
| [] -> false
| ((a, _) as h) :: t ->
if f a x then (
unsafeMutateTail prec t;
true)
else
let next = mutableCell h [] in
unsafeMutateTail prec next;
removeAssocAuxWithMap t x next f
let rec setAssocAuxWithMap cellX x k prec eq =
match cellX with
| [] -> false
| ((a, _) as h) :: t ->
if eq a x then (
unsafeMutateTail prec ((x, k) :: t);
true)
else
let next = mutableCell h [] in
unsafeMutateTail prec next;
setAssocAuxWithMap t x k next eq
let rec copyAuxWithMap cellX prec f =
match cellX with
| [] -> ()
| h :: t ->
let next = mutableCell (f h) [] in
unsafeMutateTail prec next;
copyAuxWithMap t next f
let rec zipAux cellX cellY prec =
match (cellX, cellY) with
| h1 :: t1, h2 :: t2 ->
let next = mutableCell (h1, h2) [] in
unsafeMutateTail prec next;
zipAux t1 t2 next
| [], _ | _, [] -> ()
let rec copyAuxWithMap2 f cellX cellY prec =
match (cellX, cellY) with
| h1 :: t1, h2 :: t2 ->
let next = mutableCell (f h1 h2) [] in
unsafeMutateTail prec next;
copyAuxWithMap2 f t1 t2 next
| [], _ | _, [] -> ()
let rec copyAuxWithMapI f i cellX prec =
match cellX with
| h :: t ->
let next = mutableCell (f i h) [] in
unsafeMutateTail prec next;
copyAuxWithMapI f (i + 1) t next
| [] -> ()
let rec takeAux n cell prec =
if n = 0 then true
else
match cell with
| [] -> false
| x :: xs ->
let cell = mutableCell x [] in
unsafeMutateTail prec cell;
takeAux (n - 1) xs cell
let rec splitAtAux n cell prec =
if n = 0 then Some cell
else
match cell with
| [] -> None
| x :: xs ->
let cell = mutableCell x [] in
unsafeMutateTail prec cell;
splitAtAux (n - 1) xs cell
let take lst n =
if n < 0 then None
else if n = 0 then Some []
else
match lst with
| [] -> None
| x :: xs ->
let cell = mutableCell x [] in
let has = takeAux (n - 1) xs cell in
if has then Some cell else None
let rec dropAux l n =
if n = 0 then Some l
else match l with _ :: tl -> dropAux tl (n - 1) | [] -> None
let drop lst n = if n < 0 then None else dropAux lst n
let splitAt lst n =
if n < 0 then None
else if n = 0 then Some ([], lst)
else
match lst with
| [] -> None
| x :: xs -> (
let cell = mutableCell x [] in
let rest = splitAtAux (n - 1) xs cell in
match rest with Some rest -> Some (cell, rest) | None -> None)
let concat xs ys =
match xs with
| [] -> ys
| h :: t ->
let cell = mutableCell h [] in
unsafeMutateTail (copyAuxCont t cell) ys;
cell
let mapU xs f =
match xs with
| [] -> []
| h :: t ->
let cell = mutableCell (f h) [] in
copyAuxWithMap t cell f;
cell
let map xs f = mapU xs (fun x -> f x)
let zipByU l1 l2 f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 ->
let cell = mutableCell (f a1 a2) [] in
copyAuxWithMap2 f l1 l2 cell;
cell
| [], _ | _, [] -> []
let zipBy l1 l2 f = zipByU l1 l2 (fun x y -> f x y)
let mapWithIndexU xs f =
match xs with
| [] -> []
| h :: t ->
let cell = mutableCell (f 0 h) [] in
copyAuxWithMapI f 1 t cell;
cell
let mapWithIndex xs f = mapWithIndexU xs (fun i x -> f i x)
let makeByU n f =
if n <= 0 then []
else
let headX = mutableCell (f 0) [] in
let cur = ref headX in
let i = ref 1 in
while !i < n do
let v = mutableCell (f !i) [] in
unsafeMutateTail !cur v;
cur := v;
incr i
done;
headX
let makeBy n f = makeByU n (fun x -> f x)
let make n v =
if n <= 0 then []
else
let headX = mutableCell v [] in
let cur = ref headX in
let i = ref 1 in
while !i < n do
let v = mutableCell v [] in
unsafeMutateTail !cur v;
cur := v;
incr i
done;
headX
let rec lengthAux x acc =
match x with [] -> acc | _ :: t -> lengthAux t (acc + 1)
let length xs = lengthAux xs 0
let size = length
let rec fillAux arr i x =
match x with
| [] -> ()
| h :: t ->
A.setUnsafe arr i h;
fillAux arr (i + 1) t
let rec fromArrayAux a i res =
if i < 0 then res else fromArrayAux a (i - 1) (A.getUnsafe a i :: res)
let fromArray a = fromArrayAux a (A.length a - 1) []
let toArray (x : _ t) =
let len = length x in
let arr =
match x with x :: _ -> A.makeUninitializedUnsafe len x | _ -> [||]
in
fillAux arr 0 x;
arr
let shuffle xs =
let v = toArray xs in
A.shuffleInPlace v;
fromArray v
let rec fillAuxMap arr i x f =
match x with
| [] -> ()
| h :: t ->
A.setUnsafe arr i (f h);
fillAuxMap arr (i + 1) t f
let rec reverseConcat l1 l2 =
match l1 with [] -> l2 | a :: l -> reverseConcat l (a :: l2)
let reverse l = reverseConcat l []
let rec flattenAux prec xs =
match xs with
| [] -> unsafeMutateTail prec []
| h :: r -> flattenAux (copyAuxCont h prec) r
let rec flatten xs =
match xs with
| [] -> []
| [] :: xs -> flatten xs
| (h :: t) :: r ->
let cell = mutableCell h [] in
flattenAux (copyAuxCont t cell) r;
cell
let concatMany xs =
match xs with
| [||] -> []
| [| x |] -> x
| _ ->
let len = A.length xs in
let v = ref (A.getUnsafe xs (len - 1)) in
for i = len - 2 downto 0 do
v := concat (A.getUnsafe xs i) !v
done;
!v
let rec mapRevAux f accu xs =
match xs with [] -> accu | a :: l -> mapRevAux f (f a :: accu) l
let mapReverseU l f = mapRevAux f [] l
let mapReverse l f = mapReverseU l (fun x -> f x)
let rec forEachU xs f =
match xs with
| [] -> ()
| a :: l ->
f a;
forEachU l f
let forEach xs f = forEachU xs (fun x -> f x)
let rec iteri xs i f =
match xs with
| [] -> ()
| a :: l ->
f i a;
iteri l (i + 1) f
let forEachWithIndexU l f = iteri l 0 f
let forEachWithIndex l f = forEachWithIndexU l (fun i x -> f i x)
let rec reduceU l accu f =
match l with [] -> accu | a :: l -> reduceU l (f accu a) f
let reduce l accu f = reduceU l accu (fun acc x -> f acc x)
let rec reduceReverseUnsafeU l accu f =
match l with [] -> accu | a :: l -> f (reduceReverseUnsafeU l accu f) a
let reduceReverseU (type a b) (l : a list) (acc : b) f =
let len = length l in
if len < 1000 then reduceReverseUnsafeU l acc f
else A.reduceReverseU (toArray l) acc f
let reduceReverse l accu f = reduceReverseU l accu (fun a b -> f a b)
let rec mapRevAux2 l1 l2 accu f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 -> mapRevAux2 l1 l2 (f a1 a2 :: accu) f
| _, [] | [], _ -> accu
let mapReverse2U l1 l2 f = mapRevAux2 l1 l2 [] f
let mapReverse2 l1 l2 f = mapReverse2U l1 l2 (fun a b -> f a b)
let rec forEach2U l1 l2 f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 ->
f a1 a2;
forEach2U l1 l2 f
| [], _ | _, [] -> ()
let forEach2 l1 l2 f = forEach2U l1 l2 (fun a b -> f a b)
let rec reduce2U l1 l2 accu f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 -> reduce2U l1 l2 (f accu a1 a2) f
| [], _ | _, [] -> accu
let reduce2 l1 l2 acc f = reduce2U l1 l2 acc (fun a b c -> f a b c)
let rec reduceReverse2UnsafeU l1 l2 accu f =
match (l1, l2) with
| [], [] -> accu
| a1 :: l1, a2 :: l2 -> f (reduceReverse2UnsafeU l1 l2 accu f) a1 a2
| _, [] | [], _ -> accu
let reduceReverse2U (type a b c) (l1 : a list) (l2 : b list) (acc : c) f =
let len = length l1 in
if len < 1000 then reduceReverse2UnsafeU l1 l2 acc f
else A.reduceReverse2U (toArray l1) (toArray l2) acc f
let reduceReverse2 l1 l2 acc f =
reduceReverse2U l1 l2 acc (fun a b c -> f a b c)
let rec everyU xs p = match xs with [] -> true | a :: l -> p a && everyU l p
let every xs p = everyU xs (fun x -> p x)
let rec someU xs p = match xs with [] -> false | a :: l -> p a || someU l p
let some xs p = someU xs (fun x -> p x)
let rec every2U l1 l2 p =
match (l1, l2) with
| _, [] | [], _ -> true
| a1 :: l1, a2 :: l2 -> p a1 a2 && every2U l1 l2 p
let every2 l1 l2 p = every2U l1 l2 (fun a b -> p a b)
let rec cmpByLength l1 l2 =
match (l1, l2) with
| [], [] -> 0
| _, [] -> 1
| [], _ -> -1
| _ :: l1s, _ :: l2s -> cmpByLength l1s l2s
let rec cmpU l1 l2 p =
match (l1, l2) with
| [], [] -> 0
| _, [] -> 1
| [], _ -> -1
| a1 :: l1, a2 :: l2 ->
let c = p a1 a2 in
if c = 0 then cmpU l1 l2 p else c
let cmp l1 l2 f = cmpU l1 l2 (fun x y -> f x y)
let rec eqU l1 l2 p =
match (l1, l2) with
| [], [] -> true
| _, [] | [], _ -> false
| a1 :: l1, a2 :: l2 -> if p a1 a2 then eqU l1 l2 p else false
let eq l1 l2 f = eqU l1 l2 (fun x y -> f x y)
let rec some2U l1 l2 p =
match (l1, l2) with
| [], _ | _, [] -> false
| a1 :: l1, a2 :: l2 -> p a1 a2 || some2U l1 l2 p
let some2 l1 l2 p = some2U l1 l2 (fun a b -> p a b)
let rec hasU xs x eq =
match xs with [] -> false | a :: l -> eq a x || hasU l x eq
let has xs x eq = hasU xs x (fun a b -> eq a b)
let rec getAssocU xs x eq =
match xs with
| [] -> None
| (a, b) :: l -> if eq a x then Some b else getAssocU l x eq
let getAssoc xs x eq = getAssocU xs x (fun a b -> eq a b)
let rec hasAssocU xs x eq =
match xs with [] -> false | (a, b) :: l -> eq a x || hasAssocU l x eq
let hasAssoc xs x eq = hasAssocU xs x (fun a b -> eq a b)
let removeAssocU xs x eq =
match xs with
| [] -> []
| ((a, _) as pair) :: l ->
if eq a x then l
else
let cell = mutableCell pair [] in
let removed = removeAssocAuxWithMap l x cell eq in
if removed then cell else xs
let removeAssoc xs x eq = removeAssocU xs x (fun a b -> eq a b)
let setAssocU xs x k eq =
match xs with
| [] -> [ (x, k) ]
| ((a, _) as pair) :: l ->
if eq a x then (x, k) :: l
else
let cell = mutableCell pair [] in
let replaced = setAssocAuxWithMap l x k cell eq in
if replaced then cell else (x, k) :: xs
let setAssoc xs x k eq = setAssocU xs x k (fun a b -> eq a b)
let sortU xs cmp =
let arr = toArray xs in
Belt_SortArray.stableSortInPlaceByU arr cmp;
fromArray arr
let sort xs cmp = sortU xs (fun x y -> cmp x y)
let rec getByU xs p =
match xs with [] -> None | x :: l -> if p x then Some x else getByU l p
let getBy xs p = getByU xs (fun a -> p a)
let rec keepU xs p =
match xs with
| [] -> []
| h :: t ->
if p h then (
let cell = mutableCell h [] in
copyAuxWitFilter p t cell;
cell)
else keepU t p
let keep xs p = keepU xs (fun x -> p x)
let rec keepMapU xs p =
match xs with
| [] -> []
| h :: t -> (
match p h with
| Some h ->
let cell = mutableCell h [] in
copyAuxWitFilterMap p t cell;
cell
| None -> keepMapU t p)
let keepMap xs p = keepMapU xs (fun x -> p x)
let partitionU l p =
match l with
| [] -> ([], [])
| h :: t ->
let nextX = mutableCell h [] in
let nextY = mutableCell h [] in
let b = p h in
partitionAux p t nextX nextY;
if b then (nextX, unsafeTail nextY) else (unsafeTail nextX, nextY)
let partition l p = partitionU l (fun x -> p x)
let rec unzip xs =
match xs with
| [] -> ([], [])
| (x, y) :: l ->
let cellX = mutableCell x [] in
let cellY = mutableCell y [] in
splitAux l cellX cellY;
(cellX, cellY)
let rec zip l1 l2 =
match (l1, l2) with
| _, [] | [], _ -> []
| a1 :: l1, a2 :: l2 ->
let cell = mutableCell (a1, a2) [] in
zipAux l1 l2 cell;
cell
| null | https://raw.githubusercontent.com/ml-in-barcelona/server-reason-react/a5d22907eb2633bcb8e77808f6c677802062953a/lib/belt/belt_List.ml | ocaml | type 'a t = 'a list
module A = Belt_Array
external mutableCell : 'a -> 'a t -> 'a t = "belt_makemutablelist"
let unsafeMutateTail a b = Obj.set_field (Obj.repr a) 1 (Obj.repr b)
let unsafeTail a = Obj.obj (Obj.field (Obj.repr a) 1)
let head x = match x with [] -> None | x :: _ -> Some x
let headExn x =
match x with
| [] -> Js.Exn.raiseError "File \"\", line 94, characters 12-18"
| x :: _ -> x
let tail x = match x with [] -> None | _ :: xs -> Some xs
let tailExn x =
match x with
| [] -> Js.Exn.raiseError "File \"\", line 104, characters 12-18"
| _ :: t -> t
let add xs x = x :: xs
let rec nthAux x n =
match x with
| h :: t -> if n = 0 then Some h else nthAux t (n - 1)
| _ -> None
let rec nthAuxAssert x n =
match x with
| h :: t -> if n = 0 then h else nthAuxAssert t (n - 1)
| _ -> Js.Exn.raiseError "File \"\", line 118, characters 11-17"
let get x n = if n < 0 then None else nthAux x n
let getExn x n =
if n < 0 then Js.Exn.raiseError "File \"\", line 125, characters 18-24"
else nthAuxAssert x n
let rec partitionAux p cell precX precY =
match cell with
| [] -> ()
| h :: t ->
let next = mutableCell h [] in
if p h then (
unsafeMutateTail precX next;
partitionAux p t next precY)
else (
unsafeMutateTail precY next;
partitionAux p t precX next)
let rec splitAux cell precX precY =
match cell with
| [] -> ()
| (a, b) :: t ->
let nextA = mutableCell a [] in
let nextB = mutableCell b [] in
unsafeMutateTail precX nextA;
unsafeMutateTail precY nextB;
splitAux t nextA nextB
let rec copyAuxCont cellX prec =
match cellX with
| [] -> prec
| h :: t ->
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxCont t next
let rec copyAuxWitFilter f cellX prec =
match cellX with
| [] -> ()
| h :: t ->
if f h then (
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxWitFilter f t next)
else copyAuxWitFilter f t prec
let rec copyAuxWitFilterMap f cellX prec =
match cellX with
| [] -> ()
| h :: t -> (
match f h with
| Some h ->
let next = mutableCell h [] in
unsafeMutateTail prec next;
copyAuxWitFilterMap f t next
| None -> copyAuxWitFilterMap f t prec)
let rec removeAssocAuxWithMap cellX x prec f =
match cellX with
| [] -> false
| ((a, _) as h) :: t ->
if f a x then (
unsafeMutateTail prec t;
true)
else
let next = mutableCell h [] in
unsafeMutateTail prec next;
removeAssocAuxWithMap t x next f
let rec setAssocAuxWithMap cellX x k prec eq =
match cellX with
| [] -> false
| ((a, _) as h) :: t ->
if eq a x then (
unsafeMutateTail prec ((x, k) :: t);
true)
else
let next = mutableCell h [] in
unsafeMutateTail prec next;
setAssocAuxWithMap t x k next eq
let rec copyAuxWithMap cellX prec f =
match cellX with
| [] -> ()
| h :: t ->
let next = mutableCell (f h) [] in
unsafeMutateTail prec next;
copyAuxWithMap t next f
let rec zipAux cellX cellY prec =
match (cellX, cellY) with
| h1 :: t1, h2 :: t2 ->
let next = mutableCell (h1, h2) [] in
unsafeMutateTail prec next;
zipAux t1 t2 next
| [], _ | _, [] -> ()
let rec copyAuxWithMap2 f cellX cellY prec =
match (cellX, cellY) with
| h1 :: t1, h2 :: t2 ->
let next = mutableCell (f h1 h2) [] in
unsafeMutateTail prec next;
copyAuxWithMap2 f t1 t2 next
| [], _ | _, [] -> ()
let rec copyAuxWithMapI f i cellX prec =
match cellX with
| h :: t ->
let next = mutableCell (f i h) [] in
unsafeMutateTail prec next;
copyAuxWithMapI f (i + 1) t next
| [] -> ()
let rec takeAux n cell prec =
if n = 0 then true
else
match cell with
| [] -> false
| x :: xs ->
let cell = mutableCell x [] in
unsafeMutateTail prec cell;
takeAux (n - 1) xs cell
let rec splitAtAux n cell prec =
if n = 0 then Some cell
else
match cell with
| [] -> None
| x :: xs ->
let cell = mutableCell x [] in
unsafeMutateTail prec cell;
splitAtAux (n - 1) xs cell
let take lst n =
if n < 0 then None
else if n = 0 then Some []
else
match lst with
| [] -> None
| x :: xs ->
let cell = mutableCell x [] in
let has = takeAux (n - 1) xs cell in
if has then Some cell else None
let rec dropAux l n =
if n = 0 then Some l
else match l with _ :: tl -> dropAux tl (n - 1) | [] -> None
let drop lst n = if n < 0 then None else dropAux lst n
let splitAt lst n =
if n < 0 then None
else if n = 0 then Some ([], lst)
else
match lst with
| [] -> None
| x :: xs -> (
let cell = mutableCell x [] in
let rest = splitAtAux (n - 1) xs cell in
match rest with Some rest -> Some (cell, rest) | None -> None)
let concat xs ys =
match xs with
| [] -> ys
| h :: t ->
let cell = mutableCell h [] in
unsafeMutateTail (copyAuxCont t cell) ys;
cell
let mapU xs f =
match xs with
| [] -> []
| h :: t ->
let cell = mutableCell (f h) [] in
copyAuxWithMap t cell f;
cell
let map xs f = mapU xs (fun x -> f x)
let zipByU l1 l2 f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 ->
let cell = mutableCell (f a1 a2) [] in
copyAuxWithMap2 f l1 l2 cell;
cell
| [], _ | _, [] -> []
let zipBy l1 l2 f = zipByU l1 l2 (fun x y -> f x y)
let mapWithIndexU xs f =
match xs with
| [] -> []
| h :: t ->
let cell = mutableCell (f 0 h) [] in
copyAuxWithMapI f 1 t cell;
cell
let mapWithIndex xs f = mapWithIndexU xs (fun i x -> f i x)
let makeByU n f =
if n <= 0 then []
else
let headX = mutableCell (f 0) [] in
let cur = ref headX in
let i = ref 1 in
while !i < n do
let v = mutableCell (f !i) [] in
unsafeMutateTail !cur v;
cur := v;
incr i
done;
headX
let makeBy n f = makeByU n (fun x -> f x)
let make n v =
if n <= 0 then []
else
let headX = mutableCell v [] in
let cur = ref headX in
let i = ref 1 in
while !i < n do
let v = mutableCell v [] in
unsafeMutateTail !cur v;
cur := v;
incr i
done;
headX
let rec lengthAux x acc =
match x with [] -> acc | _ :: t -> lengthAux t (acc + 1)
let length xs = lengthAux xs 0
let size = length
let rec fillAux arr i x =
match x with
| [] -> ()
| h :: t ->
A.setUnsafe arr i h;
fillAux arr (i + 1) t
let rec fromArrayAux a i res =
if i < 0 then res else fromArrayAux a (i - 1) (A.getUnsafe a i :: res)
let fromArray a = fromArrayAux a (A.length a - 1) []
let toArray (x : _ t) =
let len = length x in
let arr =
match x with x :: _ -> A.makeUninitializedUnsafe len x | _ -> [||]
in
fillAux arr 0 x;
arr
let shuffle xs =
let v = toArray xs in
A.shuffleInPlace v;
fromArray v
let rec fillAuxMap arr i x f =
match x with
| [] -> ()
| h :: t ->
A.setUnsafe arr i (f h);
fillAuxMap arr (i + 1) t f
let rec reverseConcat l1 l2 =
match l1 with [] -> l2 | a :: l -> reverseConcat l (a :: l2)
let reverse l = reverseConcat l []
let rec flattenAux prec xs =
match xs with
| [] -> unsafeMutateTail prec []
| h :: r -> flattenAux (copyAuxCont h prec) r
let rec flatten xs =
match xs with
| [] -> []
| [] :: xs -> flatten xs
| (h :: t) :: r ->
let cell = mutableCell h [] in
flattenAux (copyAuxCont t cell) r;
cell
let concatMany xs =
match xs with
| [||] -> []
| [| x |] -> x
| _ ->
let len = A.length xs in
let v = ref (A.getUnsafe xs (len - 1)) in
for i = len - 2 downto 0 do
v := concat (A.getUnsafe xs i) !v
done;
!v
let rec mapRevAux f accu xs =
match xs with [] -> accu | a :: l -> mapRevAux f (f a :: accu) l
let mapReverseU l f = mapRevAux f [] l
let mapReverse l f = mapReverseU l (fun x -> f x)
let rec forEachU xs f =
match xs with
| [] -> ()
| a :: l ->
f a;
forEachU l f
let forEach xs f = forEachU xs (fun x -> f x)
let rec iteri xs i f =
match xs with
| [] -> ()
| a :: l ->
f i a;
iteri l (i + 1) f
let forEachWithIndexU l f = iteri l 0 f
let forEachWithIndex l f = forEachWithIndexU l (fun i x -> f i x)
let rec reduceU l accu f =
match l with [] -> accu | a :: l -> reduceU l (f accu a) f
let reduce l accu f = reduceU l accu (fun acc x -> f acc x)
let rec reduceReverseUnsafeU l accu f =
match l with [] -> accu | a :: l -> f (reduceReverseUnsafeU l accu f) a
let reduceReverseU (type a b) (l : a list) (acc : b) f =
let len = length l in
if len < 1000 then reduceReverseUnsafeU l acc f
else A.reduceReverseU (toArray l) acc f
let reduceReverse l accu f = reduceReverseU l accu (fun a b -> f a b)
let rec mapRevAux2 l1 l2 accu f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 -> mapRevAux2 l1 l2 (f a1 a2 :: accu) f
| _, [] | [], _ -> accu
let mapReverse2U l1 l2 f = mapRevAux2 l1 l2 [] f
let mapReverse2 l1 l2 f = mapReverse2U l1 l2 (fun a b -> f a b)
let rec forEach2U l1 l2 f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 ->
f a1 a2;
forEach2U l1 l2 f
| [], _ | _, [] -> ()
let forEach2 l1 l2 f = forEach2U l1 l2 (fun a b -> f a b)
let rec reduce2U l1 l2 accu f =
match (l1, l2) with
| a1 :: l1, a2 :: l2 -> reduce2U l1 l2 (f accu a1 a2) f
| [], _ | _, [] -> accu
let reduce2 l1 l2 acc f = reduce2U l1 l2 acc (fun a b c -> f a b c)
let rec reduceReverse2UnsafeU l1 l2 accu f =
match (l1, l2) with
| [], [] -> accu
| a1 :: l1, a2 :: l2 -> f (reduceReverse2UnsafeU l1 l2 accu f) a1 a2
| _, [] | [], _ -> accu
let reduceReverse2U (type a b c) (l1 : a list) (l2 : b list) (acc : c) f =
let len = length l1 in
if len < 1000 then reduceReverse2UnsafeU l1 l2 acc f
else A.reduceReverse2U (toArray l1) (toArray l2) acc f
let reduceReverse2 l1 l2 acc f =
reduceReverse2U l1 l2 acc (fun a b c -> f a b c)
let rec everyU xs p = match xs with [] -> true | a :: l -> p a && everyU l p
let every xs p = everyU xs (fun x -> p x)
let rec someU xs p = match xs with [] -> false | a :: l -> p a || someU l p
let some xs p = someU xs (fun x -> p x)
let rec every2U l1 l2 p =
match (l1, l2) with
| _, [] | [], _ -> true
| a1 :: l1, a2 :: l2 -> p a1 a2 && every2U l1 l2 p
let every2 l1 l2 p = every2U l1 l2 (fun a b -> p a b)
let rec cmpByLength l1 l2 =
match (l1, l2) with
| [], [] -> 0
| _, [] -> 1
| [], _ -> -1
| _ :: l1s, _ :: l2s -> cmpByLength l1s l2s
let rec cmpU l1 l2 p =
match (l1, l2) with
| [], [] -> 0
| _, [] -> 1
| [], _ -> -1
| a1 :: l1, a2 :: l2 ->
let c = p a1 a2 in
if c = 0 then cmpU l1 l2 p else c
let cmp l1 l2 f = cmpU l1 l2 (fun x y -> f x y)
let rec eqU l1 l2 p =
match (l1, l2) with
| [], [] -> true
| _, [] | [], _ -> false
| a1 :: l1, a2 :: l2 -> if p a1 a2 then eqU l1 l2 p else false
let eq l1 l2 f = eqU l1 l2 (fun x y -> f x y)
let rec some2U l1 l2 p =
match (l1, l2) with
| [], _ | _, [] -> false
| a1 :: l1, a2 :: l2 -> p a1 a2 || some2U l1 l2 p
let some2 l1 l2 p = some2U l1 l2 (fun a b -> p a b)
let rec hasU xs x eq =
match xs with [] -> false | a :: l -> eq a x || hasU l x eq
let has xs x eq = hasU xs x (fun a b -> eq a b)
let rec getAssocU xs x eq =
match xs with
| [] -> None
| (a, b) :: l -> if eq a x then Some b else getAssocU l x eq
let getAssoc xs x eq = getAssocU xs x (fun a b -> eq a b)
let rec hasAssocU xs x eq =
match xs with [] -> false | (a, b) :: l -> eq a x || hasAssocU l x eq
let hasAssoc xs x eq = hasAssocU xs x (fun a b -> eq a b)
let removeAssocU xs x eq =
match xs with
| [] -> []
| ((a, _) as pair) :: l ->
if eq a x then l
else
let cell = mutableCell pair [] in
let removed = removeAssocAuxWithMap l x cell eq in
if removed then cell else xs
let removeAssoc xs x eq = removeAssocU xs x (fun a b -> eq a b)
let setAssocU xs x k eq =
match xs with
| [] -> [ (x, k) ]
| ((a, _) as pair) :: l ->
if eq a x then (x, k) :: l
else
let cell = mutableCell pair [] in
let replaced = setAssocAuxWithMap l x k cell eq in
if replaced then cell else (x, k) :: xs
let setAssoc xs x k eq = setAssocU xs x k (fun a b -> eq a b)
let sortU xs cmp =
let arr = toArray xs in
Belt_SortArray.stableSortInPlaceByU arr cmp;
fromArray arr
let sort xs cmp = sortU xs (fun x y -> cmp x y)
let rec getByU xs p =
match xs with [] -> None | x :: l -> if p x then Some x else getByU l p
let getBy xs p = getByU xs (fun a -> p a)
let rec keepU xs p =
match xs with
| [] -> []
| h :: t ->
if p h then (
let cell = mutableCell h [] in
copyAuxWitFilter p t cell;
cell)
else keepU t p
let keep xs p = keepU xs (fun x -> p x)
let rec keepMapU xs p =
match xs with
| [] -> []
| h :: t -> (
match p h with
| Some h ->
let cell = mutableCell h [] in
copyAuxWitFilterMap p t cell;
cell
| None -> keepMapU t p)
let keepMap xs p = keepMapU xs (fun x -> p x)
let partitionU l p =
match l with
| [] -> ([], [])
| h :: t ->
let nextX = mutableCell h [] in
let nextY = mutableCell h [] in
let b = p h in
partitionAux p t nextX nextY;
if b then (nextX, unsafeTail nextY) else (unsafeTail nextX, nextY)
let partition l p = partitionU l (fun x -> p x)
let rec unzip xs =
match xs with
| [] -> ([], [])
| (x, y) :: l ->
let cellX = mutableCell x [] in
let cellY = mutableCell y [] in
splitAux l cellX cellY;
(cellX, cellY)
let rec zip l1 l2 =
match (l1, l2) with
| _, [] | [], _ -> []
| a1 :: l1, a2 :: l2 ->
let cell = mutableCell (a1, a2) [] in
zipAux l1 l2 cell;
cell
| |
11fcf4274649779a0a983fbabff11502d05f85f64658729d2d6e176be6e20229 | jeopard/haskell-checking-account | ResponseSpec.hs | # LANGUAGE DeriveGeneric #
module Serializers.ResponseSpec (spec) where
import Data.Aeson (ToJSON, encode, toEncoding, genericToEncoding, defaultOptions)
import Data.ByteString.Lazy.Char8
import GHC.Generics
import Test.Hspec
import Serializers.Response
spec :: Spec
spec = do
successSpec
errorSpec
successSpec :: Spec
successSpec = do
describe "Success" $ do
successToJSONSpec
successToJSONSpec :: Spec
successToJSONSpec = do
describe "toJSON" $ do
it "renders the data correctly" $ do
let payload = SampleData { key1 = 35, key2 = "some value" }
serializer = Success payload
let result = unpack $ encode serializer
expectation = "{\"status\":\"success\",\"data\":{\"key2\":\"some value\",\"key1\":35}}"
result `shouldBe` expectation
-- sample data structure to use in our spec
data SampleData = SampleData { key1 :: Int, key2 :: String } deriving (Generic)
instance ToJSON SampleData where
toEncoding = genericToEncoding defaultOptions
errorSpec :: Spec
errorSpec = do
describe "Success" $ do
errorToJSONSpec
errorToJSONSpec :: Spec
errorToJSONSpec = do
describe "toJSON" $ do
it "renders the data correctly" $ do
let serializer = Error "some error message"
let result = unpack $ encode serializer
expectation = "{\"status\":\"error\",\"message\":\"some error message\"}"
result `shouldBe` expectation
| null | https://raw.githubusercontent.com/jeopard/haskell-checking-account/27a889e507ad830ccb476a9663a5ab62aba8baa7/test/Serializers/ResponseSpec.hs | haskell | sample data structure to use in our spec | # LANGUAGE DeriveGeneric #
module Serializers.ResponseSpec (spec) where
import Data.Aeson (ToJSON, encode, toEncoding, genericToEncoding, defaultOptions)
import Data.ByteString.Lazy.Char8
import GHC.Generics
import Test.Hspec
import Serializers.Response
spec :: Spec
spec = do
successSpec
errorSpec
successSpec :: Spec
successSpec = do
describe "Success" $ do
successToJSONSpec
successToJSONSpec :: Spec
successToJSONSpec = do
describe "toJSON" $ do
it "renders the data correctly" $ do
let payload = SampleData { key1 = 35, key2 = "some value" }
serializer = Success payload
let result = unpack $ encode serializer
expectation = "{\"status\":\"success\",\"data\":{\"key2\":\"some value\",\"key1\":35}}"
result `shouldBe` expectation
data SampleData = SampleData { key1 :: Int, key2 :: String } deriving (Generic)
instance ToJSON SampleData where
toEncoding = genericToEncoding defaultOptions
errorSpec :: Spec
errorSpec = do
describe "Success" $ do
errorToJSONSpec
errorToJSONSpec :: Spec
errorToJSONSpec = do
describe "toJSON" $ do
it "renders the data correctly" $ do
let serializer = Error "some error message"
let result = unpack $ encode serializer
expectation = "{\"status\":\"error\",\"message\":\"some error message\"}"
result `shouldBe` expectation
|
0ea0ccba056a9682d95549ae6c7fe581ba0d36c261528f7e9bb2779d355a890e | zxymike93/SICP | 210.rkt | #lang sicp
(define make-interval cons)
(define lower-bound car)
(define upper-bound cdr)
(define (print-interval x)
(display "[")
(display (lower-bound x))
(display ",")
(display (upper-bound x))
(display "]"))
(define (mul-interval x y)
(let ([p1 (* (lower-bound x) (lower-bound y))]
[p2 (* (lower-bound x) (upper-bound y))]
[p3 (* (upper-bound x) (lower-bound y))]
[p4 (* (upper-bound x) (upper-bound y))])
(make-interval (min p1 p2 p3 p4)
(max p1 p2 p3 p4))))
修改区间除法,当出现除数跨0时,发出error
(define (div-interval x y)
(if (and (< (lower-bound y) 0)
(> (upper-bound y) 0))
(error "Can not divide by an interval that spans zero.")
(mul-interval x
(make-interval
(/ 1. (upper-bound y))
(/ 1. (lower-bound y))))))
;; tests
(define a (make-interval 4 8))
(define b (make-interval -2 3))
(print-interval (div-interval a b)) | null | https://raw.githubusercontent.com/zxymike93/SICP/9d8e84d6a185bf4d7f28c414fc3359741384beb5/chapter2/210.rkt | racket | tests | #lang sicp
(define make-interval cons)
(define lower-bound car)
(define upper-bound cdr)
(define (print-interval x)
(display "[")
(display (lower-bound x))
(display ",")
(display (upper-bound x))
(display "]"))
(define (mul-interval x y)
(let ([p1 (* (lower-bound x) (lower-bound y))]
[p2 (* (lower-bound x) (upper-bound y))]
[p3 (* (upper-bound x) (lower-bound y))]
[p4 (* (upper-bound x) (upper-bound y))])
(make-interval (min p1 p2 p3 p4)
(max p1 p2 p3 p4))))
修改区间除法,当出现除数跨0时,发出error
(define (div-interval x y)
(if (and (< (lower-bound y) 0)
(> (upper-bound y) 0))
(error "Can not divide by an interval that spans zero.")
(mul-interval x
(make-interval
(/ 1. (upper-bound y))
(/ 1. (lower-bound y))))))
(define a (make-interval 4 8))
(define b (make-interval -2 3))
(print-interval (div-interval a b)) |
a489674dc5ac68f952a48b73c260d01c44ac6093d36d3ca9b0f7e441096c2a51 | MarcWeber/hasktags | testcase11.hs | \section[GHC.Base]{Module @GHC.Base@}
simple lhs test
to be found Monad
Other Prelude modules are much easier with fewer complex dependencies.
\begin{code}
| The ' Functor ' class is used for types that can be mapped over .
Instances of ' Functor ' should satisfy the following laws :
> fmap i d = = i d
> fmap ( f . ) = = fmap f .
The instances of ' Functor ' for lists , ' Data . Maybe . Maybe ' and ' System . IO.IO '
satisfy these laws .
Instances of 'Functor' should satisfy the following laws:
> fmap id == id
> fmap (f . g) == fmap f . fmap g
The instances of 'Functor' for lists, 'Data.Maybe.Maybe' and 'System.IO.IO'
satisfy these laws.
-}
class Functor f where
fmap :: (a -> b) -> f a -> f b
-- | Replace all locations in the input with the same value.
The default definition is @'fmap ' . ' const'@ , but this may be
-- overridden with a more efficient version.
(<$) :: a -> f b -> f a
(<$) = fmap . const
| The ' Monad ' class defines the basic operations over a /monad/ ,
a concept from a branch of mathematics known as /category theory/.
From the perspective of a programmer , however , it is best to
think of a monad as an /abstract datatype/ of actions .
's @do@ expressions provide a convenient syntax for writing
monadic expressions .
Minimal complete definition : ' > > = ' and ' return ' .
Instances of ' Monad ' should satisfy the following laws :
> return a > > = k = = k a
> m > > = return = = m
> m > > = ( \x - > k x > > = h ) = = ( m > > = k ) > > = h
Instances of both ' Monad ' and ' Functor ' should additionally satisfy the law :
> fmap f xs = = xs > > = return . f
The instances of ' Monad ' for lists , ' Data . Maybe . Maybe ' and ' System . IO.IO '
defined in the " Prelude " satisfy these laws .
a concept from a branch of mathematics known as /category theory/.
From the perspective of a Haskell programmer, however, it is best to
think of a monad as an /abstract datatype/ of actions.
Haskell's @do@ expressions provide a convenient syntax for writing
monadic expressions.
Minimal complete definition: '>>=' and 'return'.
Instances of 'Monad' should satisfy the following laws:
> return a >>= k == k a
> m >>= return == m
> m >>= (\x -> k x >>= h) == (m >>= k) >>= h
Instances of both 'Monad' and 'Functor' should additionally satisfy the law:
> fmap f xs == xs >>= return . f
The instances of 'Monad' for lists, 'Data.Maybe.Maybe' and 'System.IO.IO'
defined in the "Prelude" satisfy these laws.
-}
class Monad m where
| Sequentially compose two actions , passing any value produced
by the first as an argument to the second .
(>>=) :: forall a b. m a -> (a -> m b) -> m b
| Sequentially compose two actions , discarding any value produced
by the first , like sequencing operators ( such as the semicolon )
-- in imperative languages.
(>>) :: forall a b. m a -> m b -> m b
-- Explicit for-alls so that we know what order to
-- give type arguments when desugaring
-- | Inject a value into the monadic type.
return :: a -> m a
-- | Fail with a message. This operation is not part of the
-- mathematical definition of a monad, but is invoked on pattern-match
-- failure in a @do@ expression.
fail :: String -> m a
{-# INLINE (>>) #-}
m >> k = m >>= \_ -> k
fail s = error s
instance Functor ((->) r) where
fmap = (.)
instance Monad ((->) r) where
return = const
f >>= k = \ r -> k (f r) r
instance Functor ((,) a) where
fmap f (x,y) = (x, f y)
\end{code}
| null | https://raw.githubusercontent.com/MarcWeber/hasktags/65bcbecb695f0d2f31c2436958480535b8193b6c/testcases/testcase11.hs | haskell | | Replace all locations in the input with the same value.
overridden with a more efficient version.
in imperative languages.
Explicit for-alls so that we know what order to
give type arguments when desugaring
| Inject a value into the monadic type.
| Fail with a message. This operation is not part of the
mathematical definition of a monad, but is invoked on pattern-match
failure in a @do@ expression.
# INLINE (>>) # | \section[GHC.Base]{Module @GHC.Base@}
simple lhs test
to be found Monad
Other Prelude modules are much easier with fewer complex dependencies.
\begin{code}
| The ' Functor ' class is used for types that can be mapped over .
Instances of ' Functor ' should satisfy the following laws :
> fmap i d = = i d
> fmap ( f . ) = = fmap f .
The instances of ' Functor ' for lists , ' Data . Maybe . Maybe ' and ' System . IO.IO '
satisfy these laws .
Instances of 'Functor' should satisfy the following laws:
> fmap id == id
> fmap (f . g) == fmap f . fmap g
The instances of 'Functor' for lists, 'Data.Maybe.Maybe' and 'System.IO.IO'
satisfy these laws.
-}
class Functor f where
fmap :: (a -> b) -> f a -> f b
The default definition is @'fmap ' . ' const'@ , but this may be
(<$) :: a -> f b -> f a
(<$) = fmap . const
| The ' Monad ' class defines the basic operations over a /monad/ ,
a concept from a branch of mathematics known as /category theory/.
From the perspective of a programmer , however , it is best to
think of a monad as an /abstract datatype/ of actions .
's @do@ expressions provide a convenient syntax for writing
monadic expressions .
Minimal complete definition : ' > > = ' and ' return ' .
Instances of ' Monad ' should satisfy the following laws :
> return a > > = k = = k a
> m > > = return = = m
> m > > = ( \x - > k x > > = h ) = = ( m > > = k ) > > = h
Instances of both ' Monad ' and ' Functor ' should additionally satisfy the law :
> fmap f xs = = xs > > = return . f
The instances of ' Monad ' for lists , ' Data . Maybe . Maybe ' and ' System . IO.IO '
defined in the " Prelude " satisfy these laws .
a concept from a branch of mathematics known as /category theory/.
From the perspective of a Haskell programmer, however, it is best to
think of a monad as an /abstract datatype/ of actions.
Haskell's @do@ expressions provide a convenient syntax for writing
monadic expressions.
Minimal complete definition: '>>=' and 'return'.
Instances of 'Monad' should satisfy the following laws:
> return a >>= k == k a
> m >>= return == m
> m >>= (\x -> k x >>= h) == (m >>= k) >>= h
Instances of both 'Monad' and 'Functor' should additionally satisfy the law:
> fmap f xs == xs >>= return . f
The instances of 'Monad' for lists, 'Data.Maybe.Maybe' and 'System.IO.IO'
defined in the "Prelude" satisfy these laws.
-}
class Monad m where
| Sequentially compose two actions , passing any value produced
by the first as an argument to the second .
(>>=) :: forall a b. m a -> (a -> m b) -> m b
| Sequentially compose two actions , discarding any value produced
by the first , like sequencing operators ( such as the semicolon )
(>>) :: forall a b. m a -> m b -> m b
return :: a -> m a
fail :: String -> m a
m >> k = m >>= \_ -> k
fail s = error s
instance Functor ((->) r) where
fmap = (.)
instance Monad ((->) r) where
return = const
f >>= k = \ r -> k (f r) r
instance Functor ((,) a) where
fmap f (x,y) = (x, f y)
\end{code}
|
6f0cf36ca61a48c5a8aea2dae1790dc25ad4bbcaa4a4b50f247f53c544f6b0eb | thegeez/clj-board | sessions.clj | (ns net.thegeez.clj-board.sessions
(:require [io.pedestal.log :as log]
[io.pedestal.interceptor :as interceptor]
[clojure.java.jdbc :as jdbc]
[net.thegeez.clj-board.jwt :as jwt]))
(defn find-or-create-user [db username email]
(log/info :foc username :e email)
(if-let [user (first (jdbc/query db ["Select * from users where email = ?" email]))]
{:id (:id user)
:username (:username user)
:email (:email user)}
(let [now (.getTime (java.util.Date.))
user-id (first (map (fn [r]
(or (:1 r)
(:id r)))
(jdbc/insert! db :users
{:username username
:email email
:created_at now
:updated_at now})))
demo-board-id (:id (first (jdbc/query db ["select id from boards where slug = ?" "demo-board"])))]
;; assign to demo board
(jdbc/insert! db :user_board
{:user_id user-id
:board_id demo-board-id})
(find-or-create-user db username email))))
(def create
(interceptor/interceptor
{:enter (fn [context]
(let [{:keys [username email]} (get-in context [:request :transit-params])]
(let [user (find-or-create-user (:database context)
username
email)]
(assoc context :response
{:status 201
:body {:user user
:jwt (jwt/jwt-encode user)}})
;; find-or-create-user will always succeed
#_(assoc context :response
{:status 422
:body {:error "Invalid username, email or password."}}))))}))
(def get-user
(interceptor/interceptor
{:enter (fn [context]
(assoc context :response
{:status 200
:body {:user (:user context)}}))}))
| null | https://raw.githubusercontent.com/thegeez/clj-board/e162d6097ce7b66438f7529418bf48af66e34fa8/src/clj/net/thegeez/clj_board/sessions.clj | clojure | assign to demo board
find-or-create-user will always succeed | (ns net.thegeez.clj-board.sessions
(:require [io.pedestal.log :as log]
[io.pedestal.interceptor :as interceptor]
[clojure.java.jdbc :as jdbc]
[net.thegeez.clj-board.jwt :as jwt]))
(defn find-or-create-user [db username email]
(log/info :foc username :e email)
(if-let [user (first (jdbc/query db ["Select * from users where email = ?" email]))]
{:id (:id user)
:username (:username user)
:email (:email user)}
(let [now (.getTime (java.util.Date.))
user-id (first (map (fn [r]
(or (:1 r)
(:id r)))
(jdbc/insert! db :users
{:username username
:email email
:created_at now
:updated_at now})))
demo-board-id (:id (first (jdbc/query db ["select id from boards where slug = ?" "demo-board"])))]
(jdbc/insert! db :user_board
{:user_id user-id
:board_id demo-board-id})
(find-or-create-user db username email))))
(def create
(interceptor/interceptor
{:enter (fn [context]
(let [{:keys [username email]} (get-in context [:request :transit-params])]
(let [user (find-or-create-user (:database context)
username
email)]
(assoc context :response
{:status 201
:body {:user user
:jwt (jwt/jwt-encode user)}})
#_(assoc context :response
{:status 422
:body {:error "Invalid username, email or password."}}))))}))
(def get-user
(interceptor/interceptor
{:enter (fn [context]
(assoc context :response
{:status 200
:body {:user (:user context)}}))}))
|
392a4b370a376d23246855563efab2bdfaed152d8322bf84f1f11f6857d9330a | purescript/purescript | Make.hs | module Language.PureScript.Make
(
-- * Make API
rebuildModule
, rebuildModule'
, make
, inferForeignModules
, module Monad
, module Actions
) where
import Prelude
import Control.Concurrent.Lifted as C
import Control.Exception.Base (onException)
import Control.Monad hiding (sequence)
import Control.Monad.Error.Class (MonadError(..))
import Control.Monad.IO.Class
import Control.Monad.Supply
import Control.Monad.Trans.Control (MonadBaseControl(..), control)
import Control.Monad.Trans.State (runStateT)
import Control.Monad.Writer.Class (MonadWriter(..), censor)
import Control.Monad.Writer.Strict (runWriterT)
import Data.Function (on)
import Data.Foldable (fold, for_)
import Data.List (foldl', sortOn)
import qualified Data.List.NonEmpty as NEL
import Data.Maybe (fromMaybe)
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
import Language.PureScript.AST
import Language.PureScript.Crash
import qualified Language.PureScript.CST as CST
import qualified Language.PureScript.Docs.Convert as Docs
import Language.PureScript.Environment
import Language.PureScript.Errors
import Language.PureScript.Externs
import Language.PureScript.Linter
import Language.PureScript.ModuleDependencies
import Language.PureScript.Names
import Language.PureScript.Renamer
import Language.PureScript.Sugar
import Language.PureScript.TypeChecker
import Language.PureScript.Make.BuildPlan
import qualified Language.PureScript.Make.BuildPlan as BuildPlan
import qualified Language.PureScript.Make.Cache as Cache
import Language.PureScript.Make.Actions as Actions
import Language.PureScript.Make.Monad as Monad
import qualified Language.PureScript.CoreFn as CF
import System.Directory (doesFileExist)
import System.FilePath (replaceExtension)
-- | Rebuild a single module.
--
-- This function is used for fast-rebuild workflows (PSCi and psc-ide are examples).
rebuildModule
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> [ExternsFile]
-> Module
-> m ExternsFile
rebuildModule actions externs m = do
env <- fmap fst . runWriterT $ foldM externsEnv primEnv externs
rebuildModule' actions env externs m
rebuildModule'
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> Env
-> [ExternsFile]
-> Module
-> m ExternsFile
rebuildModule' act env ext mdl = rebuildModuleWithIndex act env ext mdl Nothing
rebuildModuleWithIndex
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> Env
-> [ExternsFile]
-> Module
-> Maybe (Int, Int)
-> m ExternsFile
rebuildModuleWithIndex MakeActions{..} exEnv externs m@(Module _ _ moduleName _ _) moduleIndex = do
progress $ CompilingModule moduleName moduleIndex
let env = foldl' (flip applyExternsFileToEnvironment) initEnvironment externs
withPrim = importPrim m
lint withPrim
((Module ss coms _ elaborated exps, env'), nextVar) <- runSupplyT 0 $ do
(desugared, (exEnv', usedImports)) <- runStateT (desugar externs withPrim) (exEnv, mempty)
let modulesExports = (\(_, _, exports) -> exports) <$> exEnv'
(checked, CheckState{..}) <- runStateT (typeCheckModule modulesExports desugared) $ emptyCheckState env
let usedImports' = foldl' (flip $ \(fromModuleName, newtypeCtorName) ->
M.alter (Just . (fmap DctorName newtypeCtorName :) . fold) fromModuleName) usedImports checkConstructorImportsForCoercible
-- Imports cannot be linted before type checking because we need to
-- known which newtype constructors are used to solve Coercible
-- constraints in order to not report them as unused.
censor (addHint (ErrorInModule moduleName)) $ lintImports checked exEnv' usedImports'
return (checked, checkEnv)
-- desugar case declarations *after* type- and exhaustiveness checking
-- since pattern guards introduces cases which the exhaustiveness checker
-- reports as not-exhaustive.
(deguarded, nextVar') <- runSupplyT nextVar $ do
desugarCaseGuards elaborated
regrouped <- createBindingGroups moduleName . collapseBindingGroups $ deguarded
let mod' = Module ss coms moduleName regrouped exps
corefn = CF.moduleToCoreFn env' mod'
(optimized, nextVar'') = runSupply nextVar' $ CF.optimizeCoreFn corefn
(renamedIdents, renamed) = renameInModule optimized
exts = moduleToExternsFile mod' env' renamedIdents
ffiCodegen renamed
-- It may seem more obvious to write `docs <- Docs.convertModule m env' here,
but I have not done so for two reasons :
1 . This should never fail ; any genuine errors in the code should have been
-- caught earlier in this function. Therefore if we do fail here it indicates
-- a bug in the compiler, which should be reported as such.
2 . We do not want to perform any extra work generating docs unless the
-- user has asked for docs to be generated.
let docs = case Docs.convertModule externs exEnv env' m of
Left errs -> internalError $
"Failed to produce docs for " ++ T.unpack (runModuleName moduleName)
++ "; details:\n" ++ prettyPrintMultipleErrors defaultPPEOptions errs
Right d -> d
evalSupplyT nextVar'' $ codegen renamed docs exts
return exts
-- | Compiles in "make" mode, compiling each module separately to a @.js@ file and an @externs.cbor@ file.
--
If timestamps or hashes have not changed , existing externs files can be used to provide upstream modules ' types without
-- having to typecheck those modules again.
make :: forall m. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> [CST.PartialResult Module]
-> m [ExternsFile]
make ma@MakeActions{..} ms = do
checkModuleNames
cacheDb <- readCacheDb
(sorted, graph) <- sortModules Transitive (moduleSignature . CST.resPartial) ms
(buildPlan, newCacheDb) <- BuildPlan.construct ma cacheDb (sorted, graph)
let toBeRebuilt = filter (BuildPlan.needsRebuild buildPlan . getModuleName . CST.resPartial) sorted
let totalModuleCount = length toBeRebuilt
for_ toBeRebuilt $ \m -> fork $ do
let moduleName = getModuleName . CST.resPartial $ m
let deps = fromMaybe (internalError "make: module not found in dependency graph.") (lookup moduleName graph)
buildModule buildPlan moduleName totalModuleCount
(spanName . getModuleSourceSpan . CST.resPartial $ m)
(fst $ CST.resFull m)
(fmap importPrim . snd $ CST.resFull m)
(deps `inOrderOf` map (getModuleName . CST.resPartial) sorted)
-- Prevent hanging on other modules when there is an internal error
-- (the exception is thrown, but other threads waiting on MVars are released)
`onExceptionLifted` BuildPlan.markComplete buildPlan moduleName (BuildJobFailed mempty)
-- Wait for all threads to complete, and collect results (and errors).
(failures, successes) <-
let
splitResults = \case
BuildJobSucceeded _ exts ->
Right exts
BuildJobFailed errs ->
Left errs
BuildJobSkipped ->
Left mempty
in
M.mapEither splitResults <$> BuildPlan.collectResults buildPlan
-- Write the updated build cache database to disk
writeCacheDb $ Cache.removeModules (M.keysSet failures) newCacheDb
writePackageJson
If generating docs , also generate them for the Prim modules
outputPrimDocs
-- All threads have completed, rethrow any caught errors.
let errors = M.elems failures
unless (null errors) $ throwError (mconcat errors)
Here we return all the ExternsFile in the ordering of the topological sort ,
-- so they can be folded into an Environment. This result is used in the tests
and in PSCI .
let lookupResult mn =
fromMaybe (internalError "make: module not found in results")
$ M.lookup mn successes
return (map (lookupResult . getModuleName . CST.resPartial) sorted)
where
checkModuleNames :: m ()
checkModuleNames = checkNoPrim *> checkModuleNamesAreUnique
checkNoPrim :: m ()
checkNoPrim =
for_ ms $ \m ->
let mn = getModuleName $ CST.resPartial m
in when (isBuiltinModuleName mn) $
throwError
. errorMessage' (getModuleSourceSpan $ CST.resPartial m)
$ CannotDefinePrimModules mn
checkModuleNamesAreUnique :: m ()
checkModuleNamesAreUnique =
for_ (findDuplicates (getModuleName . CST.resPartial) ms) $ \mss ->
throwError . flip foldMap mss $ \ms' ->
let mn = getModuleName . CST.resPartial . NEL.head $ ms'
in errorMessage'' (fmap (getModuleSourceSpan . CST.resPartial) ms') $ DuplicateModule mn
-- Find all groups of duplicate values in a list based on a projection.
findDuplicates :: Ord b => (a -> b) -> [a] -> Maybe [NEL.NonEmpty a]
findDuplicates f xs =
case filter ((> 1) . length) . NEL.groupBy ((==) `on` f) . sortOn f $ xs of
[] -> Nothing
xss -> Just xss
-- Sort a list so its elements appear in the same order as in another list.
inOrderOf :: (Ord a) => [a] -> [a] -> [a]
inOrderOf xs ys = let s = S.fromList xs in filter (`S.member` s) ys
buildModule :: BuildPlan -> ModuleName -> Int -> FilePath -> [CST.ParserWarning] -> Either (NEL.NonEmpty CST.ParserError) Module -> [ModuleName] -> m ()
buildModule buildPlan moduleName cnt fp pwarnings mres deps = do
result <- flip catchError (return . BuildJobFailed) $ do
let pwarnings' = CST.toMultipleWarnings fp pwarnings
tell pwarnings'
m <- CST.unwrapParserError fp mres
-- We need to wait for dependencies to be built, before checking if the current
module should be rebuilt , so the first thing to do is to wait on the
-- MVars for the module's dependencies.
mexterns <- fmap unzip . sequence <$> traverse (getResult buildPlan) deps
case mexterns of
Just (_, externs) -> do
-- We need to ensure that all dependencies have been included in Env
C.modifyMVar_ (bpEnv buildPlan) $ \env -> do
let
go :: Env -> ModuleName -> m Env
go e dep = case lookup dep (zip deps externs) of
Just exts
| not (M.member dep e) -> externsEnv e exts
_ -> return e
foldM go env deps
env <- C.readMVar (bpEnv buildPlan)
idx <- C.takeMVar (bpIndex buildPlan)
C.putMVar (bpIndex buildPlan) (idx + 1)
(exts, warnings) <- listen $ rebuildModuleWithIndex ma env externs m (Just (idx, cnt))
return $ BuildJobSucceeded (pwarnings' <> warnings) exts
Nothing -> return BuildJobSkipped
BuildPlan.markComplete buildPlan moduleName result
onExceptionLifted :: m a -> m b -> m a
onExceptionLifted l r = control $ \runInIO -> runInIO l `onException` runInIO r
-- | Infer the module name for a module by looking for the same filename with
-- a .js extension.
inferForeignModules
:: forall m
. MonadIO m
=> M.Map ModuleName (Either RebuildPolicy FilePath)
-> m (M.Map ModuleName FilePath)
inferForeignModules =
fmap (M.mapMaybe id) . traverse inferForeignModule
where
inferForeignModule :: Either RebuildPolicy FilePath -> m (Maybe FilePath)
inferForeignModule (Left _) = return Nothing
inferForeignModule (Right path) = do
let jsFile = replaceExtension path "js"
exists <- liftIO $ doesFileExist jsFile
if exists
then return (Just jsFile)
else return Nothing
| null | https://raw.githubusercontent.com/purescript/purescript/b71cb532c7d8d97505376cb528080ca3046615fe/src/Language/PureScript/Make.hs | haskell | * Make API
| Rebuild a single module.
This function is used for fast-rebuild workflows (PSCi and psc-ide are examples).
Imports cannot be linted before type checking because we need to
known which newtype constructors are used to solve Coercible
constraints in order to not report them as unused.
desugar case declarations *after* type- and exhaustiveness checking
since pattern guards introduces cases which the exhaustiveness checker
reports as not-exhaustive.
It may seem more obvious to write `docs <- Docs.convertModule m env' here,
caught earlier in this function. Therefore if we do fail here it indicates
a bug in the compiler, which should be reported as such.
user has asked for docs to be generated.
| Compiles in "make" mode, compiling each module separately to a @.js@ file and an @externs.cbor@ file.
having to typecheck those modules again.
Prevent hanging on other modules when there is an internal error
(the exception is thrown, but other threads waiting on MVars are released)
Wait for all threads to complete, and collect results (and errors).
Write the updated build cache database to disk
All threads have completed, rethrow any caught errors.
so they can be folded into an Environment. This result is used in the tests
Find all groups of duplicate values in a list based on a projection.
Sort a list so its elements appear in the same order as in another list.
We need to wait for dependencies to be built, before checking if the current
MVars for the module's dependencies.
We need to ensure that all dependencies have been included in Env
| Infer the module name for a module by looking for the same filename with
a .js extension. | module Language.PureScript.Make
(
rebuildModule
, rebuildModule'
, make
, inferForeignModules
, module Monad
, module Actions
) where
import Prelude
import Control.Concurrent.Lifted as C
import Control.Exception.Base (onException)
import Control.Monad hiding (sequence)
import Control.Monad.Error.Class (MonadError(..))
import Control.Monad.IO.Class
import Control.Monad.Supply
import Control.Monad.Trans.Control (MonadBaseControl(..), control)
import Control.Monad.Trans.State (runStateT)
import Control.Monad.Writer.Class (MonadWriter(..), censor)
import Control.Monad.Writer.Strict (runWriterT)
import Data.Function (on)
import Data.Foldable (fold, for_)
import Data.List (foldl', sortOn)
import qualified Data.List.NonEmpty as NEL
import Data.Maybe (fromMaybe)
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
import Language.PureScript.AST
import Language.PureScript.Crash
import qualified Language.PureScript.CST as CST
import qualified Language.PureScript.Docs.Convert as Docs
import Language.PureScript.Environment
import Language.PureScript.Errors
import Language.PureScript.Externs
import Language.PureScript.Linter
import Language.PureScript.ModuleDependencies
import Language.PureScript.Names
import Language.PureScript.Renamer
import Language.PureScript.Sugar
import Language.PureScript.TypeChecker
import Language.PureScript.Make.BuildPlan
import qualified Language.PureScript.Make.BuildPlan as BuildPlan
import qualified Language.PureScript.Make.Cache as Cache
import Language.PureScript.Make.Actions as Actions
import Language.PureScript.Make.Monad as Monad
import qualified Language.PureScript.CoreFn as CF
import System.Directory (doesFileExist)
import System.FilePath (replaceExtension)
rebuildModule
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> [ExternsFile]
-> Module
-> m ExternsFile
rebuildModule actions externs m = do
env <- fmap fst . runWriterT $ foldM externsEnv primEnv externs
rebuildModule' actions env externs m
rebuildModule'
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> Env
-> [ExternsFile]
-> Module
-> m ExternsFile
rebuildModule' act env ext mdl = rebuildModuleWithIndex act env ext mdl Nothing
rebuildModuleWithIndex
:: forall m
. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> Env
-> [ExternsFile]
-> Module
-> Maybe (Int, Int)
-> m ExternsFile
rebuildModuleWithIndex MakeActions{..} exEnv externs m@(Module _ _ moduleName _ _) moduleIndex = do
progress $ CompilingModule moduleName moduleIndex
let env = foldl' (flip applyExternsFileToEnvironment) initEnvironment externs
withPrim = importPrim m
lint withPrim
((Module ss coms _ elaborated exps, env'), nextVar) <- runSupplyT 0 $ do
(desugared, (exEnv', usedImports)) <- runStateT (desugar externs withPrim) (exEnv, mempty)
let modulesExports = (\(_, _, exports) -> exports) <$> exEnv'
(checked, CheckState{..}) <- runStateT (typeCheckModule modulesExports desugared) $ emptyCheckState env
let usedImports' = foldl' (flip $ \(fromModuleName, newtypeCtorName) ->
M.alter (Just . (fmap DctorName newtypeCtorName :) . fold) fromModuleName) usedImports checkConstructorImportsForCoercible
censor (addHint (ErrorInModule moduleName)) $ lintImports checked exEnv' usedImports'
return (checked, checkEnv)
(deguarded, nextVar') <- runSupplyT nextVar $ do
desugarCaseGuards elaborated
regrouped <- createBindingGroups moduleName . collapseBindingGroups $ deguarded
let mod' = Module ss coms moduleName regrouped exps
corefn = CF.moduleToCoreFn env' mod'
(optimized, nextVar'') = runSupply nextVar' $ CF.optimizeCoreFn corefn
(renamedIdents, renamed) = renameInModule optimized
exts = moduleToExternsFile mod' env' renamedIdents
ffiCodegen renamed
but I have not done so for two reasons :
1 . This should never fail ; any genuine errors in the code should have been
2 . We do not want to perform any extra work generating docs unless the
let docs = case Docs.convertModule externs exEnv env' m of
Left errs -> internalError $
"Failed to produce docs for " ++ T.unpack (runModuleName moduleName)
++ "; details:\n" ++ prettyPrintMultipleErrors defaultPPEOptions errs
Right d -> d
evalSupplyT nextVar'' $ codegen renamed docs exts
return exts
If timestamps or hashes have not changed , existing externs files can be used to provide upstream modules ' types without
make :: forall m. (MonadBaseControl IO m, MonadError MultipleErrors m, MonadWriter MultipleErrors m)
=> MakeActions m
-> [CST.PartialResult Module]
-> m [ExternsFile]
make ma@MakeActions{..} ms = do
checkModuleNames
cacheDb <- readCacheDb
(sorted, graph) <- sortModules Transitive (moduleSignature . CST.resPartial) ms
(buildPlan, newCacheDb) <- BuildPlan.construct ma cacheDb (sorted, graph)
let toBeRebuilt = filter (BuildPlan.needsRebuild buildPlan . getModuleName . CST.resPartial) sorted
let totalModuleCount = length toBeRebuilt
for_ toBeRebuilt $ \m -> fork $ do
let moduleName = getModuleName . CST.resPartial $ m
let deps = fromMaybe (internalError "make: module not found in dependency graph.") (lookup moduleName graph)
buildModule buildPlan moduleName totalModuleCount
(spanName . getModuleSourceSpan . CST.resPartial $ m)
(fst $ CST.resFull m)
(fmap importPrim . snd $ CST.resFull m)
(deps `inOrderOf` map (getModuleName . CST.resPartial) sorted)
`onExceptionLifted` BuildPlan.markComplete buildPlan moduleName (BuildJobFailed mempty)
(failures, successes) <-
let
splitResults = \case
BuildJobSucceeded _ exts ->
Right exts
BuildJobFailed errs ->
Left errs
BuildJobSkipped ->
Left mempty
in
M.mapEither splitResults <$> BuildPlan.collectResults buildPlan
writeCacheDb $ Cache.removeModules (M.keysSet failures) newCacheDb
writePackageJson
If generating docs , also generate them for the Prim modules
outputPrimDocs
let errors = M.elems failures
unless (null errors) $ throwError (mconcat errors)
Here we return all the ExternsFile in the ordering of the topological sort ,
and in PSCI .
let lookupResult mn =
fromMaybe (internalError "make: module not found in results")
$ M.lookup mn successes
return (map (lookupResult . getModuleName . CST.resPartial) sorted)
where
checkModuleNames :: m ()
checkModuleNames = checkNoPrim *> checkModuleNamesAreUnique
checkNoPrim :: m ()
checkNoPrim =
for_ ms $ \m ->
let mn = getModuleName $ CST.resPartial m
in when (isBuiltinModuleName mn) $
throwError
. errorMessage' (getModuleSourceSpan $ CST.resPartial m)
$ CannotDefinePrimModules mn
checkModuleNamesAreUnique :: m ()
checkModuleNamesAreUnique =
for_ (findDuplicates (getModuleName . CST.resPartial) ms) $ \mss ->
throwError . flip foldMap mss $ \ms' ->
let mn = getModuleName . CST.resPartial . NEL.head $ ms'
in errorMessage'' (fmap (getModuleSourceSpan . CST.resPartial) ms') $ DuplicateModule mn
findDuplicates :: Ord b => (a -> b) -> [a] -> Maybe [NEL.NonEmpty a]
findDuplicates f xs =
case filter ((> 1) . length) . NEL.groupBy ((==) `on` f) . sortOn f $ xs of
[] -> Nothing
xss -> Just xss
inOrderOf :: (Ord a) => [a] -> [a] -> [a]
inOrderOf xs ys = let s = S.fromList xs in filter (`S.member` s) ys
buildModule :: BuildPlan -> ModuleName -> Int -> FilePath -> [CST.ParserWarning] -> Either (NEL.NonEmpty CST.ParserError) Module -> [ModuleName] -> m ()
buildModule buildPlan moduleName cnt fp pwarnings mres deps = do
result <- flip catchError (return . BuildJobFailed) $ do
let pwarnings' = CST.toMultipleWarnings fp pwarnings
tell pwarnings'
m <- CST.unwrapParserError fp mres
module should be rebuilt , so the first thing to do is to wait on the
mexterns <- fmap unzip . sequence <$> traverse (getResult buildPlan) deps
case mexterns of
Just (_, externs) -> do
C.modifyMVar_ (bpEnv buildPlan) $ \env -> do
let
go :: Env -> ModuleName -> m Env
go e dep = case lookup dep (zip deps externs) of
Just exts
| not (M.member dep e) -> externsEnv e exts
_ -> return e
foldM go env deps
env <- C.readMVar (bpEnv buildPlan)
idx <- C.takeMVar (bpIndex buildPlan)
C.putMVar (bpIndex buildPlan) (idx + 1)
(exts, warnings) <- listen $ rebuildModuleWithIndex ma env externs m (Just (idx, cnt))
return $ BuildJobSucceeded (pwarnings' <> warnings) exts
Nothing -> return BuildJobSkipped
BuildPlan.markComplete buildPlan moduleName result
onExceptionLifted :: m a -> m b -> m a
onExceptionLifted l r = control $ \runInIO -> runInIO l `onException` runInIO r
inferForeignModules
:: forall m
. MonadIO m
=> M.Map ModuleName (Either RebuildPolicy FilePath)
-> m (M.Map ModuleName FilePath)
inferForeignModules =
fmap (M.mapMaybe id) . traverse inferForeignModule
where
inferForeignModule :: Either RebuildPolicy FilePath -> m (Maybe FilePath)
inferForeignModule (Left _) = return Nothing
inferForeignModule (Right path) = do
let jsFile = replaceExtension path "js"
exists <- liftIO $ doesFileExist jsFile
if exists
then return (Just jsFile)
else return Nothing
|
e0c0602a0fa71baa29280f55d53f9bb3f284d73ded7a0d70644c66cb795624d9 | geophf/1HaskellADay | Exercise.hs | module Y2018.M10.D15.Exercise where
-
You have a set of assets on your iPod , a piece of hardware older than your
daughters , let 's say , hypothetically , and you have a sets of assets on your
thumbdrives . You want to move the assets off your iPod before the needle wrecks
your hard drive . I 'm not kidding .
With the assets defined on your iPod and then the assets defined on each of
your thumbdrive , what assests do you need to move off your iPod without
moving assets that you already have on your thumbdrives ?
-
You have a set of assets on your iPod, a piece of hardware older than your
daughters, let's say, hypothetically, and you have a sets of assets on your
thumbdrives. You want to move the assets off your iPod before the needle wrecks
your hard drive. I'm not kidding.
With the assets defined on your iPod and then the assets defined on each of
your thumbdrive, what assests do you need to move off your iPod without
moving assets that you already have on your thumbdrives?
--}
type Asset = String
iPod, thumb1, thumb2, exDir :: FilePath
exDir = "Y2018/M10/D15/"
iPod = "iPod.sha"
thumb1 = "thumb1.sha"
thumb2 = "thumb2.sha"
Given the files with the assets , what assets need to be moved off the iPod ?
assetsToMove :: FilePath -> FilePath -> FilePath -> IO [Asset]
assetsToMove ipod th1 th2 = undefined
| null | https://raw.githubusercontent.com/geophf/1HaskellADay/514792071226cd1e2ba7640af942667b85601006/exercises/HAD/Y2018/M10/D15/Exercise.hs | haskell | } | module Y2018.M10.D15.Exercise where
-
You have a set of assets on your iPod , a piece of hardware older than your
daughters , let 's say , hypothetically , and you have a sets of assets on your
thumbdrives . You want to move the assets off your iPod before the needle wrecks
your hard drive . I 'm not kidding .
With the assets defined on your iPod and then the assets defined on each of
your thumbdrive , what assests do you need to move off your iPod without
moving assets that you already have on your thumbdrives ?
-
You have a set of assets on your iPod, a piece of hardware older than your
daughters, let's say, hypothetically, and you have a sets of assets on your
thumbdrives. You want to move the assets off your iPod before the needle wrecks
your hard drive. I'm not kidding.
With the assets defined on your iPod and then the assets defined on each of
your thumbdrive, what assests do you need to move off your iPod without
moving assets that you already have on your thumbdrives?
type Asset = String
iPod, thumb1, thumb2, exDir :: FilePath
exDir = "Y2018/M10/D15/"
iPod = "iPod.sha"
thumb1 = "thumb1.sha"
thumb2 = "thumb2.sha"
Given the files with the assets , what assets need to be moved off the iPod ?
assetsToMove :: FilePath -> FilePath -> FilePath -> IO [Asset]
assetsToMove ipod th1 th2 = undefined
|
b73189ea0d92e5d410fe2f35e7ca44748b6225bbf79572ca01d722bbab4aa451 | mkoppmann/eselsohr | Uri.hs | module Lib.Domain.Uri
( Uri (..)
, UriValidationError (..)
, mkUri
, unfilteredUri
, getHostname
, baseUri
) where
import qualified Data.Text as T
import qualified Net.IPv4 as IPv4
import qualified Net.IPv6 as IPv6
import qualified Net.IPv6.Helper as IPv6
import qualified Text.Show
import qualified Text.URI as U
import qualified Text.URI.Lens as UL
import qualified Validation
import Lens.Micro
( (^.)
, (^?)
, _Right
)
import Text.URI (URI)
import Validation
( Validation
, failure
, validateAll
, validationToEither
)
import Lib.Domain.Error
( AppErrorType
, invalid
)
newtype Uri = Uri {unUri :: U.URI}
deriving (Eq, Show) via U.URI
instance ToText Uri where
toText = toText . U.render . coerce
-- | Type representing different validation errors.
data UriValidationError
| Only port 80 and 443 are allowed .
ForbiddenPort
| -- | Only HTTP and HTTPS are allowed.
ForbiddenProtocol
| -- | Hostnames like @localhost@ are forbidden.
ForbiddenHostname
| -- | Only public IPv4 ranges are allowed.
ForbiddenIPv4Range
| -- | Only public IPv6 ranges are allowed.
ForbiddenIPv6Range
instance Show UriValidationError where
show ForbiddenPort = "Only port 80 and 443 are allowed."
show ForbiddenProtocol = "Only HTTP and HTTPS are allowed."
show ForbiddenHostname = "Hostnames like `localhost` are forbidden."
show ForbiddenIPv4Range = "Only public IPv4 ranges are allowed."
show ForbiddenIPv6Range = "Only public IPv6 ranges are allowed."
mkUri :: Text -> Either AppErrorType Uri
mkUri url = case U.mkURI url of
Left err -> Left . invalid . toText $ displayException err
Right uri -> validationToEither . bimap (invalid . show) Uri $ validateUri uri
where
validateUri :: URI -> Validation (NonEmpty UriValidationError) URI
validateUri = validateAll [validatePort, validateProtocol, validateHostname, validateIPv4, validateIPv6]
validatePort :: URI -> Validation (NonEmpty UriValidationError) URI
validatePort uri = case join $ uri ^? UL.uriAuthority . _Right . UL.authPort of
Just 80 -> Validation.Success uri
Just 443 -> Validation.Success uri
Nothing -> Validation.Success uri
_nonAllowedPort -> failure ForbiddenPort
validateProtocol :: URI -> Validation (NonEmpty UriValidationError) URI
validateProtocol uri = case U.unRText <$> uri ^. UL.uriScheme of
Just "http" -> Validation.Success uri
Just "https" -> Validation.Success uri
Nothing -> Validation.Success uri
_nonAllowedProtocol -> failure ForbiddenProtocol
validateHostname :: URI -> Validation (NonEmpty UriValidationError) URI
validateHostname uri = case getHostname' uri of
Just "localhost" -> failure ForbiddenHostname
_otherHostnames -> Validation.Success uri
validateIPv4 :: URI -> Validation (NonEmpty UriValidationError) URI
validateIPv4 uri = case IPv4.public <$> (IPv4.decode =<< getHostname' uri) of
Nothing -> Validation.Success uri
Just isPub -> if isPub then Validation.Success uri else failure ForbiddenIPv4Range
validateIPv6 :: URI -> Validation (NonEmpty UriValidationError) URI
validateIPv6 uri = case IPv6.public <$> (IPv6.decode =<< getHostnameFromIpv6 uri) of
Nothing -> Validation.Success uri
Just isPub -> if isPub then Validation.Success uri else failure ForbiddenIPv6Range
getHostnameFromIpv6 :: URI -> Maybe Text
getHostnameFromIpv6 = fmap dropIPv6Brackets . getHostname'
-- \| Literal IPv6 addresses are put into brackets in URLs:
--
--
dropIPv6Brackets :: Text -> Text
dropIPv6Brackets = T.dropEnd 1 . T.drop 1
{- | Returns an 'Uri' like 'mkUri' does but with no applied validation. Use
with caution.
-}
unfilteredUri :: Text -> Either AppErrorType Uri
unfilteredUri = either failureCase uri . U.mkURI
where
failureCase :: SomeException -> Either AppErrorType a
failureCase = Left . invalid . toText . displayException
uri :: URI -> Either AppErrorType Uri
uri = pure . Uri
getHostname :: Uri -> Maybe Text
getHostname (Uri uri) = getHostname' uri
baseUri :: Text -> Uri
baseUri url = case U.mkURI url of
Left err -> error . (<>) "Invalid base url: " . toText $ displayException err
Right uri -> Uri uri
getHostname' :: URI -> Maybe Text
getHostname' uri = U.unRText <$> uri ^? UL.uriAuthority . _Right . UL.authHost
| null | https://raw.githubusercontent.com/mkoppmann/eselsohr/3bb8609199c1dfda94935e6dde0c46fc429de84e/src/Lib/Domain/Uri.hs | haskell | | Type representing different validation errors.
| Only HTTP and HTTPS are allowed.
| Hostnames like @localhost@ are forbidden.
| Only public IPv4 ranges are allowed.
| Only public IPv6 ranges are allowed.
\| Literal IPv6 addresses are put into brackets in URLs:
| Returns an 'Uri' like 'mkUri' does but with no applied validation. Use
with caution.
| module Lib.Domain.Uri
( Uri (..)
, UriValidationError (..)
, mkUri
, unfilteredUri
, getHostname
, baseUri
) where
import qualified Data.Text as T
import qualified Net.IPv4 as IPv4
import qualified Net.IPv6 as IPv6
import qualified Net.IPv6.Helper as IPv6
import qualified Text.Show
import qualified Text.URI as U
import qualified Text.URI.Lens as UL
import qualified Validation
import Lens.Micro
( (^.)
, (^?)
, _Right
)
import Text.URI (URI)
import Validation
( Validation
, failure
, validateAll
, validationToEither
)
import Lib.Domain.Error
( AppErrorType
, invalid
)
newtype Uri = Uri {unUri :: U.URI}
deriving (Eq, Show) via U.URI
instance ToText Uri where
toText = toText . U.render . coerce
data UriValidationError
| Only port 80 and 443 are allowed .
ForbiddenPort
ForbiddenProtocol
ForbiddenHostname
ForbiddenIPv4Range
ForbiddenIPv6Range
instance Show UriValidationError where
show ForbiddenPort = "Only port 80 and 443 are allowed."
show ForbiddenProtocol = "Only HTTP and HTTPS are allowed."
show ForbiddenHostname = "Hostnames like `localhost` are forbidden."
show ForbiddenIPv4Range = "Only public IPv4 ranges are allowed."
show ForbiddenIPv6Range = "Only public IPv6 ranges are allowed."
mkUri :: Text -> Either AppErrorType Uri
mkUri url = case U.mkURI url of
Left err -> Left . invalid . toText $ displayException err
Right uri -> validationToEither . bimap (invalid . show) Uri $ validateUri uri
where
validateUri :: URI -> Validation (NonEmpty UriValidationError) URI
validateUri = validateAll [validatePort, validateProtocol, validateHostname, validateIPv4, validateIPv6]
validatePort :: URI -> Validation (NonEmpty UriValidationError) URI
validatePort uri = case join $ uri ^? UL.uriAuthority . _Right . UL.authPort of
Just 80 -> Validation.Success uri
Just 443 -> Validation.Success uri
Nothing -> Validation.Success uri
_nonAllowedPort -> failure ForbiddenPort
validateProtocol :: URI -> Validation (NonEmpty UriValidationError) URI
validateProtocol uri = case U.unRText <$> uri ^. UL.uriScheme of
Just "http" -> Validation.Success uri
Just "https" -> Validation.Success uri
Nothing -> Validation.Success uri
_nonAllowedProtocol -> failure ForbiddenProtocol
validateHostname :: URI -> Validation (NonEmpty UriValidationError) URI
validateHostname uri = case getHostname' uri of
Just "localhost" -> failure ForbiddenHostname
_otherHostnames -> Validation.Success uri
validateIPv4 :: URI -> Validation (NonEmpty UriValidationError) URI
validateIPv4 uri = case IPv4.public <$> (IPv4.decode =<< getHostname' uri) of
Nothing -> Validation.Success uri
Just isPub -> if isPub then Validation.Success uri else failure ForbiddenIPv4Range
validateIPv6 :: URI -> Validation (NonEmpty UriValidationError) URI
validateIPv6 uri = case IPv6.public <$> (IPv6.decode =<< getHostnameFromIpv6 uri) of
Nothing -> Validation.Success uri
Just isPub -> if isPub then Validation.Success uri else failure ForbiddenIPv6Range
getHostnameFromIpv6 :: URI -> Maybe Text
getHostnameFromIpv6 = fmap dropIPv6Brackets . getHostname'
dropIPv6Brackets :: Text -> Text
dropIPv6Brackets = T.dropEnd 1 . T.drop 1
unfilteredUri :: Text -> Either AppErrorType Uri
unfilteredUri = either failureCase uri . U.mkURI
where
failureCase :: SomeException -> Either AppErrorType a
failureCase = Left . invalid . toText . displayException
uri :: URI -> Either AppErrorType Uri
uri = pure . Uri
getHostname :: Uri -> Maybe Text
getHostname (Uri uri) = getHostname' uri
baseUri :: Text -> Uri
baseUri url = case U.mkURI url of
Left err -> error . (<>) "Invalid base url: " . toText $ displayException err
Right uri -> Uri uri
getHostname' :: URI -> Maybe Text
getHostname' uri = U.unRText <$> uri ^? UL.uriAuthority . _Right . UL.authHost
|
1372065e028288d51013d412e5153253796501ecd6a5b5e8526fd563bf0922bb | dalaing/little-languages | TermL.hs | {-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE DeriveFoldable #-}
{-# LANGUAGE DeriveTraversable #-}
module TermL where
import Control.Monad (ap)
import Bound
import Bound.Name
import Bound.Scope
import Prelude.Extras
data Term l n a =
TODO split Var into a Var with a location and a Var without ?
TODO might need to track location in a Name - like structure along with the name
Var (Maybe l) a
| Lam l (Scope (Name n ()) (Term l n) a)
| App l (Term l n a) (Term l n a)
| TmInt l Int
| TmBool l Bool
| Add l (Term l n a) (Term l n a)
| Equ l (Term l n a) (Term l n a)
| And l (Term l n a) (Term l n a)
deriving (Eq, Ord, Show, Functor, Foldable, Traversable)
instance (Eq l, Eq n) => Eq1 (Term l n) where
(==#) = (==)
instance (Ord l, Ord n) => Ord1 (Term l n) where
compare1 = compare
instance (Show l, Show n) => Show1 (Term l n) where
showsPrec1 = showsPrec
instance Applicative (Term l n) where
pure = return
(<*>) = ap
instance Monad (Term l n) where
return = Var Nothing
Var _ x >>= g = g x
Lam l e >>= g = Lam l (e >>>= g)
App l f x >>= g = App l (f >>= g) (x >>= g)
TmInt l i >>= _ = TmInt l i
TmBool l b >>= _ = TmBool l b
Add l x y >>= g = Add l (x >>= g) (y >>= g)
Equ l x y >>= g = Equ l (x >>= g) (y >>= g)
And l x y >>= g = And l (x >>= g) (y >>= g)
| null | https://raw.githubusercontent.com/dalaing/little-languages/9f089f646a5344b8f7178700455a36a755d29b1f/code/old/prototypes/frag/src/TermL.hs | haskell | # LANGUAGE DeriveFunctor #
# LANGUAGE DeriveFoldable #
# LANGUAGE DeriveTraversable # | module TermL where
import Control.Monad (ap)
import Bound
import Bound.Name
import Bound.Scope
import Prelude.Extras
data Term l n a =
TODO split Var into a Var with a location and a Var without ?
TODO might need to track location in a Name - like structure along with the name
Var (Maybe l) a
| Lam l (Scope (Name n ()) (Term l n) a)
| App l (Term l n a) (Term l n a)
| TmInt l Int
| TmBool l Bool
| Add l (Term l n a) (Term l n a)
| Equ l (Term l n a) (Term l n a)
| And l (Term l n a) (Term l n a)
deriving (Eq, Ord, Show, Functor, Foldable, Traversable)
instance (Eq l, Eq n) => Eq1 (Term l n) where
(==#) = (==)
instance (Ord l, Ord n) => Ord1 (Term l n) where
compare1 = compare
instance (Show l, Show n) => Show1 (Term l n) where
showsPrec1 = showsPrec
instance Applicative (Term l n) where
pure = return
(<*>) = ap
instance Monad (Term l n) where
return = Var Nothing
Var _ x >>= g = g x
Lam l e >>= g = Lam l (e >>>= g)
App l f x >>= g = App l (f >>= g) (x >>= g)
TmInt l i >>= _ = TmInt l i
TmBool l b >>= _ = TmBool l b
Add l x y >>= g = Add l (x >>= g) (y >>= g)
Equ l x y >>= g = Equ l (x >>= g) (y >>= g)
And l x y >>= g = And l (x >>= g) (y >>= g)
|
0d97792047606777737dcb9630d7e448109963bfef030d0986e138561c1a9b79 | metosin/scjsv | core.clj | (ns scjsv.core
"Use [[validator]], [[json-validator]], or [[json-reader-validator]] to
construct a validator function.
### Validator functions
The first argument for the validator function is the data. The optional second
argument is an options map with the following keys:
| key | default | description |
|---------------|----------|-------------|
| `:deep-check` | `false` | Check nested elements even if the parent elements are invalid.
"
(:require [jsonista.core :as jsonista])
(:import [com.fasterxml.jackson.databind JsonNode ObjectMapper]
[com.github.fge.jackson JsonNodeReader]
[com.github.fge.jsonschema.main JsonSchemaFactory]
[com.github.fge.jsonschema.core.load Dereferencing]
[com.github.fge.jsonschema.core.load.configuration LoadingConfiguration]
[com.github.fge.jsonschema.core.report ListProcessingReport ProcessingMessage]
[com.github.fge.jsonschema.main JsonSchema]
[java.io Reader]))
(def ^:private +object-mapper+
(jsonista/object-mapper {:decode-key-fn true}))
(defn- build-reader [] (JsonNodeReader. +object-mapper+))
(def ^{:tag JsonNodeReader, :private true} reader (build-reader))
(defn- ^JsonNode reader->json-node
"Creates a JsonNode from a Reader"
[^Reader data-reader]
(.fromReader reader data-reader))
(defn- ^JsonNode string->json-node
"Creates a JsonNode from a String"
[^String data]
(reader->json-node (java.io.StringReader. data)))
(defn- build-factory
"Creates a JsonSchemaFactory based on the options map."
[{:keys [dereferencing] :or {dereferencing :canonical}}]
(let [dereferencing-mode (case dereferencing
:inline (Dereferencing/INLINE)
:canonical (Dereferencing/CANONICAL))
loading-config (-> (LoadingConfiguration/newBuilder)
(.dereferencing dereferencing-mode)
(.freeze))]
(-> (JsonSchemaFactory/newBuilder)
(.setLoadingConfiguration loading-config)
(.freeze))))
(defn- ->json-schema
"Creates a JSONSchema instance either from a JSON string or a Clojure map."
[schema ^JsonSchemaFactory factory]
(let [schema-string (if (string? schema)
schema
(jsonista/write-value-as-string schema))
schema-object (string->json-node schema-string)]
(.getJsonSchema factory schema-object)))
(defn- validate
"Validates (f json-data) against a given JSON Schema."
([^JsonSchema json-schema
^JsonNode json-data
{:keys [deep-check] :or {deep-check false}}]
(let [report (.validate json-schema json-data deep-check)
lp (doto (ListProcessingReport.) (.mergeWith report))
errors (iterator-seq (.iterator lp))
->clj #(-> (.asJson ^ProcessingMessage %) str (jsonista/read-value +object-mapper+))]
(if (seq errors)
(map ->clj errors)))))
(defn- ->factory
"Converts value to a JsonSchemaFactory if it isn't one."
[value]
(cond
(instance? JsonSchemaFactory value) value
(map? value) (build-factory value)
:else (throw (Exception. (str "Don't know how to convert " (pr-str value)
" into a JsonSchemaFactory.")))))
;;
;; Public API
;;
(defn- build-validator
"Returns a validator function. Schema can be given either as a JSON String or
a Clojure map.
`->json-node` is the function which will be applied to datum to transform them into
a JsonNode"
[schema json-schema-factory ->json-node]
(let [validator-opts (when (map? json-schema-factory)
(select-keys json-schema-factory [:deep-check]))
factory (->factory (or json-schema-factory {}))]
(fn validator
([data] (validator data nil))
([data opts]
(validate (->json-schema schema factory) (->json-node data) (merge validator-opts opts))))))
(defn json-reader-validator
"Returns a `java.io.Reader` validator function. Schema can be given either as
a JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or a
options map as the second parameter. See [[scjsv.core/validator]] docstring for
the options."
([schema]
(json-reader-validator schema (build-factory {})))
([schema json-schema-factory]
(build-validator schema json-schema-factory reader->json-node)))
(defn json-validator
"Returns a JSON string validator function. Schema can be given either as a
JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or a
options map as the second parameter. See [[scjsv.core/validator]] docstring for
the options."
([schema]
(json-validator schema (build-factory {})))
([schema json-schema-factory]
(build-validator schema json-schema-factory string->json-node)))
(defn validator
"Returns a Clojure data structure validator function. Schema can be given
either as a JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or an
options map as the second parameter. The options map can have the following
keys:
| key | default | description |
|------------------|--------------|--------------|
| `:dereferencing` | `:canonical` | Which dereferencing mode to use. Either `:canonical` or `:inline`.
| `:deep-check` | `false` | Check nested elements even if the parent elements are invalid.
Note that you can't pass a `JsonSchemaFactory` instance and enable
`:deep-check` at once. If you need this, pass `{:deep-check true}` as the
second argument to the validator function."
([schema]
(validator schema nil))
([schema json-schema-factory]
(build-validator schema
json-schema-factory
(comp string->json-node jsonista/write-value-as-string)))) | null | https://raw.githubusercontent.com/metosin/scjsv/8dcc9d81341645f30bba91a25b5c90242d43b44b/src/scjsv/core.clj | clojure |
Public API
| (ns scjsv.core
"Use [[validator]], [[json-validator]], or [[json-reader-validator]] to
construct a validator function.
### Validator functions
The first argument for the validator function is the data. The optional second
argument is an options map with the following keys:
| key | default | description |
|---------------|----------|-------------|
| `:deep-check` | `false` | Check nested elements even if the parent elements are invalid.
"
(:require [jsonista.core :as jsonista])
(:import [com.fasterxml.jackson.databind JsonNode ObjectMapper]
[com.github.fge.jackson JsonNodeReader]
[com.github.fge.jsonschema.main JsonSchemaFactory]
[com.github.fge.jsonschema.core.load Dereferencing]
[com.github.fge.jsonschema.core.load.configuration LoadingConfiguration]
[com.github.fge.jsonschema.core.report ListProcessingReport ProcessingMessage]
[com.github.fge.jsonschema.main JsonSchema]
[java.io Reader]))
(def ^:private +object-mapper+
(jsonista/object-mapper {:decode-key-fn true}))
(defn- build-reader [] (JsonNodeReader. +object-mapper+))
(def ^{:tag JsonNodeReader, :private true} reader (build-reader))
(defn- ^JsonNode reader->json-node
"Creates a JsonNode from a Reader"
[^Reader data-reader]
(.fromReader reader data-reader))
(defn- ^JsonNode string->json-node
"Creates a JsonNode from a String"
[^String data]
(reader->json-node (java.io.StringReader. data)))
(defn- build-factory
"Creates a JsonSchemaFactory based on the options map."
[{:keys [dereferencing] :or {dereferencing :canonical}}]
(let [dereferencing-mode (case dereferencing
:inline (Dereferencing/INLINE)
:canonical (Dereferencing/CANONICAL))
loading-config (-> (LoadingConfiguration/newBuilder)
(.dereferencing dereferencing-mode)
(.freeze))]
(-> (JsonSchemaFactory/newBuilder)
(.setLoadingConfiguration loading-config)
(.freeze))))
(defn- ->json-schema
"Creates a JSONSchema instance either from a JSON string or a Clojure map."
[schema ^JsonSchemaFactory factory]
(let [schema-string (if (string? schema)
schema
(jsonista/write-value-as-string schema))
schema-object (string->json-node schema-string)]
(.getJsonSchema factory schema-object)))
(defn- validate
"Validates (f json-data) against a given JSON Schema."
([^JsonSchema json-schema
^JsonNode json-data
{:keys [deep-check] :or {deep-check false}}]
(let [report (.validate json-schema json-data deep-check)
lp (doto (ListProcessingReport.) (.mergeWith report))
errors (iterator-seq (.iterator lp))
->clj #(-> (.asJson ^ProcessingMessage %) str (jsonista/read-value +object-mapper+))]
(if (seq errors)
(map ->clj errors)))))
(defn- ->factory
"Converts value to a JsonSchemaFactory if it isn't one."
[value]
(cond
(instance? JsonSchemaFactory value) value
(map? value) (build-factory value)
:else (throw (Exception. (str "Don't know how to convert " (pr-str value)
" into a JsonSchemaFactory.")))))
(defn- build-validator
"Returns a validator function. Schema can be given either as a JSON String or
a Clojure map.
`->json-node` is the function which will be applied to datum to transform them into
a JsonNode"
[schema json-schema-factory ->json-node]
(let [validator-opts (when (map? json-schema-factory)
(select-keys json-schema-factory [:deep-check]))
factory (->factory (or json-schema-factory {}))]
(fn validator
([data] (validator data nil))
([data opts]
(validate (->json-schema schema factory) (->json-node data) (merge validator-opts opts))))))
(defn json-reader-validator
"Returns a `java.io.Reader` validator function. Schema can be given either as
a JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or a
options map as the second parameter. See [[scjsv.core/validator]] docstring for
the options."
([schema]
(json-reader-validator schema (build-factory {})))
([schema json-schema-factory]
(build-validator schema json-schema-factory reader->json-node)))
(defn json-validator
"Returns a JSON string validator function. Schema can be given either as a
JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or a
options map as the second parameter. See [[scjsv.core/validator]] docstring for
the options."
([schema]
(json-validator schema (build-factory {})))
([schema json-schema-factory]
(build-validator schema json-schema-factory string->json-node)))
(defn validator
"Returns a Clojure data structure validator function. Schema can be given
either as a JSON String or a Clojure map.
To configure the validator, you can pass a `JsonSchemaFactory` instance or an
options map as the second parameter. The options map can have the following
keys:
| key | default | description |
|------------------|--------------|--------------|
| `:dereferencing` | `:canonical` | Which dereferencing mode to use. Either `:canonical` or `:inline`.
| `:deep-check` | `false` | Check nested elements even if the parent elements are invalid.
Note that you can't pass a `JsonSchemaFactory` instance and enable
`:deep-check` at once. If you need this, pass `{:deep-check true}` as the
second argument to the validator function."
([schema]
(validator schema nil))
([schema json-schema-factory]
(build-validator schema
json-schema-factory
(comp string->json-node jsonista/write-value-as-string)))) |
4b890377dd3f198024f69d0e69b37b87d4831111296397db7f55e8c5362bf9b0 | mbutterick/aoc-racket | main.rkt | #lang reader "../aoc-lang.rkt"
(provide (rename-out [#%mb #%module-begin]) ★ ★★)
(define-macro (#%mb (STARS) TOKS ...)
#`(#%module-begin
(time (STARS (vector (λ () TOKS) ...)))))
(define regs (make-hasheq))
(define last-sound-played (make-parameter #f))
(struct offset-signal (val))
(struct end-signal (val))
(provide snd set add mul mod rcv jgz)
(define-macro (value VAL) #'(let ([val 'VAL]) (if (number? val) val (hash-ref regs val))))
(define-macro (snd REG) #'(last-sound-played (hash-ref regs 'REG)))
(define-macro (set REG VAL) #'(hash-set! regs 'REG (value VAL)))
(define-macro (add REG VAL) #'(hash-update! regs 'REG (λ (val) (+ val (value VAL))) 0))
(define-macro (mul REG VAL) #'(hash-update! regs 'REG (λ (val) (* val (value VAL))) 0))
(define-macro (mod REG VAL) #'(hash-update! regs 'REG (λ (val) (modulo val (value VAL))) 0))
(define-macro (rcv REG) #'(unless (zero? (hash-ref regs 'REG)) (raise (last-sound-played))))
(define-macro (jgz REG VAL) #'(when (positive? (hash-ref regs 'REG)) (raise (offset-signal (value VAL)))))
(define (★ insts)
(with-handlers ([number? values])
(for/fold ([offset 0])
([i (in-naturals)])
(with-handlers ([offset-signal? (λ (os) (+ (offset-signal-val os) offset))])
(define proc (vector-ref insts offset))
(proc)
(add1 offset)))))
| null | https://raw.githubusercontent.com/mbutterick/aoc-racket/2c6cb2f3ad876a91a82f33ce12844f7758b969d6/2017/d18/main.rkt | racket | #lang reader "../aoc-lang.rkt"
(provide (rename-out [#%mb #%module-begin]) ★ ★★)
(define-macro (#%mb (STARS) TOKS ...)
#`(#%module-begin
(time (STARS (vector (λ () TOKS) ...)))))
(define regs (make-hasheq))
(define last-sound-played (make-parameter #f))
(struct offset-signal (val))
(struct end-signal (val))
(provide snd set add mul mod rcv jgz)
(define-macro (value VAL) #'(let ([val 'VAL]) (if (number? val) val (hash-ref regs val))))
(define-macro (snd REG) #'(last-sound-played (hash-ref regs 'REG)))
(define-macro (set REG VAL) #'(hash-set! regs 'REG (value VAL)))
(define-macro (add REG VAL) #'(hash-update! regs 'REG (λ (val) (+ val (value VAL))) 0))
(define-macro (mul REG VAL) #'(hash-update! regs 'REG (λ (val) (* val (value VAL))) 0))
(define-macro (mod REG VAL) #'(hash-update! regs 'REG (λ (val) (modulo val (value VAL))) 0))
(define-macro (rcv REG) #'(unless (zero? (hash-ref regs 'REG)) (raise (last-sound-played))))
(define-macro (jgz REG VAL) #'(when (positive? (hash-ref regs 'REG)) (raise (offset-signal (value VAL)))))
(define (★ insts)
(with-handlers ([number? values])
(for/fold ([offset 0])
([i (in-naturals)])
(with-handlers ([offset-signal? (λ (os) (+ (offset-signal-val os) offset))])
(define proc (vector-ref insts offset))
(proc)
(add1 offset)))))
| |
d2a65919fdedee9968aff8feeb85e326dcb812bdab230e63cf6a58750234a9ce | gonzojive/elephant | package-new.lisp | -*- Mode : Lisp ; Syntax : ANSI - Common - Lisp ; Base : 10 -*-
;;;
;;; package.lisp -- package definition
;;;
Initial version 8/26/2004 by
;;; <>
;;;
;;; part of
;;;
Elephant : an object - oriented database for Common Lisp
;;;
Copyright ( c ) 2004 by and
;;; <> <>
;;;
;;; Elephant users are granted the rights to distribute and use this software
as governed by the terms of the Lisp Lesser GNU Public License
;;; (), also known as the LLGPL.
;;;
(in-package :cl-user)
(defpackage elephant-btrees
(:use :closer-common-lisp)
(:export
#:cursor #:secondary-cursor #:make-cursor
#:with-btree-cursor #:cursor-close #:cursor-init
#:cursor-duplicate #:cursor-current #:cursor-first
#:cursor-last #:cursor-next #:cursor-next-dup
#:cursor-next-nodup #:cursor-prev #:cursor-prev-nodup
#:cursor-set #:cursor-set-range #:cursor-get-both
#:cursor-get-both-range #:cursor-delete #:cursor-put
#:cursor-pcurrent #:cursor-pfirst #:cursor-plast
#:cursor-pnext #:cursor-pnext-dup #:cursor-pnext-nodup
#:cursor-pprev #:cursor-pprev-nodup #:cursor-pset
#:cursor-pset-range #:cursor-pget-both
#:cursor-pget-both-range))
(defpackage elephant
(:use :closer-common-lisp :elephant-memutil :elephant-btrees)
(:nicknames ele :ele)
(:documentation
"Elephant: an object-oriented database for Common Lisp with
multiple backends for Berkeley DB, SQL and others.")
(:export #:*store-controller* #:*current-transaction* #:*auto-commit*
#:*elephant-lib-path*
#:store-controller
#:open-store #:close-store #:with-open-store
#:add-to-root #:get-from-root #:remove-from-root #:root-existsp
#:flush-instance-cache #:optimize-storage
#:with-transaction
#:start-ele-transaction #:commit-transaction #:abort-transaction
#:persistent #:persistent-object #:persistent-metaclass
#:persistent-collection #:defpclass
#:btree #:make-btree #:get-value #:remove-kv #:existp #:map-btree
#:indexed-btree #:make-indexed-btree
#:add-index #:get-index #:remove-index #:map-indices
#:btree-index #:get-primary-key
#:primary #:key-form #:key-fn
#:btree-differ
#:migrate #:*inhibit-slot-copy*
#:run-elephant-thread
;; Class indexing management API
#:*default-indexed-class-synch-policy*
#:find-class-index #:find-inverted-index
#:enable-class-indexing #:disable-class-indexing
#:add-class-slot-index #:remove-class-slot-index
#:add-class-derived-index #:remove-class-derived-index
#:describe-db-class-index
#:report-indexed-classes
#:class-indexedp-by-name
;; Low level cursor API
#:make-inverted-cursor #:make-class-cursor
#:with-inverted-cursor #:with-class-cursor
;; Instance query API
#:get-instances-by-class
#:get-instance-by-value
#:get-instances-by-value
#:get-instances-by-range
#:drop-instances
)
)
(in-package "ELE")
#+cmu
(eval-when (:compile-toplevel)
(proclaim '(optimize (ext:inhibit-warnings 3))))
| null | https://raw.githubusercontent.com/gonzojive/elephant/b29a012ab75ccea2fc7fc4f1e9d5e821f0bd60bf/src/contrib/eslick/package-new.lisp | lisp | Syntax : ANSI - Common - Lisp ; Base : 10 -*-
package.lisp -- package definition
<>
part of
<> <>
Elephant users are granted the rights to distribute and use this software
(), also known as the LLGPL.
Class indexing management API
Low level cursor API
Instance query API | Initial version 8/26/2004 by
Elephant : an object - oriented database for Common Lisp
Copyright ( c ) 2004 by and
as governed by the terms of the Lisp Lesser GNU Public License
(in-package :cl-user)
(defpackage elephant-btrees
(:use :closer-common-lisp)
(:export
#:cursor #:secondary-cursor #:make-cursor
#:with-btree-cursor #:cursor-close #:cursor-init
#:cursor-duplicate #:cursor-current #:cursor-first
#:cursor-last #:cursor-next #:cursor-next-dup
#:cursor-next-nodup #:cursor-prev #:cursor-prev-nodup
#:cursor-set #:cursor-set-range #:cursor-get-both
#:cursor-get-both-range #:cursor-delete #:cursor-put
#:cursor-pcurrent #:cursor-pfirst #:cursor-plast
#:cursor-pnext #:cursor-pnext-dup #:cursor-pnext-nodup
#:cursor-pprev #:cursor-pprev-nodup #:cursor-pset
#:cursor-pset-range #:cursor-pget-both
#:cursor-pget-both-range))
(defpackage elephant
(:use :closer-common-lisp :elephant-memutil :elephant-btrees)
(:nicknames ele :ele)
(:documentation
"Elephant: an object-oriented database for Common Lisp with
multiple backends for Berkeley DB, SQL and others.")
(:export #:*store-controller* #:*current-transaction* #:*auto-commit*
#:*elephant-lib-path*
#:store-controller
#:open-store #:close-store #:with-open-store
#:add-to-root #:get-from-root #:remove-from-root #:root-existsp
#:flush-instance-cache #:optimize-storage
#:with-transaction
#:start-ele-transaction #:commit-transaction #:abort-transaction
#:persistent #:persistent-object #:persistent-metaclass
#:persistent-collection #:defpclass
#:btree #:make-btree #:get-value #:remove-kv #:existp #:map-btree
#:indexed-btree #:make-indexed-btree
#:add-index #:get-index #:remove-index #:map-indices
#:btree-index #:get-primary-key
#:primary #:key-form #:key-fn
#:btree-differ
#:migrate #:*inhibit-slot-copy*
#:run-elephant-thread
#:*default-indexed-class-synch-policy*
#:find-class-index #:find-inverted-index
#:enable-class-indexing #:disable-class-indexing
#:add-class-slot-index #:remove-class-slot-index
#:add-class-derived-index #:remove-class-derived-index
#:describe-db-class-index
#:report-indexed-classes
#:class-indexedp-by-name
#:make-inverted-cursor #:make-class-cursor
#:with-inverted-cursor #:with-class-cursor
#:get-instances-by-class
#:get-instance-by-value
#:get-instances-by-value
#:get-instances-by-range
#:drop-instances
)
)
(in-package "ELE")
#+cmu
(eval-when (:compile-toplevel)
(proclaim '(optimize (ext:inhibit-warnings 3))))
|
a8793a92811d78e7df571689b5c6bfaf78ade69e557d744898348e06bf1dd5ee | penpot/penpot | snap_distances.cljs | This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
;;
;; Copyright (c) KALEIDOS INC
(ns app.main.ui.workspace.viewport.snap-distances
(:require
[app.common.data :as d]
[app.common.geom.shapes :as gsh]
[app.common.math :as mth]
[app.common.types.shape.layout :as ctl]
[app.main.refs :as refs]
[app.main.snap :as ams]
[app.main.ui.formats :as fmt]
[beicon.core :as rx]
[clojure.set :as set]
[cuerdas.core :as str]
[rumext.v2 :as mf]))
(def ^:private line-color "var(--color-snap)")
(def ^:private segment-gap 2)
(def ^:private segment-gap-side 5)
(defn selected->cross-selrec [frame selrect coord]
(let [areas (gsh/selrect->areas (:selrect frame) selrect)]
(if (= :x coord)
[(gsh/pad-selrec (:left areas))
(gsh/pad-selrec (:right areas))]
[(gsh/pad-selrec (:top areas))
(gsh/pad-selrec (:bottom areas))])))
(defn half-point
"Calculates the middle point of the overlap between two selrects in the opposite axis"
[coord sr1 sr2]
(let [c1 (max (get sr1 (if (= :x coord) :y1 :x1))
(get sr2 (if (= :x coord) :y1 :x1)))
c2 (min (get sr1 (if (= :x coord) :y2 :x2))
(get sr2 (if (= :x coord) :y2 :x2)))
half-point (+ c1 (/ (- c2 c1) 2))]
half-point))
(def pill-text-width-letter 6)
(def pill-text-width-margin 6)
(def pill-text-font-size 12)
(def pill-text-height 20)
(def pill-text-border-radius 4)
(def pill-text-padding 4)
(mf/defc shape-distance-segment
"Displays a segment between two selrects with the distance between them"
[{:keys [sr1 sr2 coord zoom]}]
(let [from-c (min (get sr1 (if (= :x coord) :x2 :y2))
(get sr2 (if (= :x coord) :x2 :y2)))
to-c (max (get sr1 (if (= :x coord) :x1 :y1))
(get sr2 (if (= :x coord) :x1 :y1)))
distance (- to-c from-c)
distance-str (fmt/format-number distance)
half-point (half-point coord sr1 sr2)
width (-> distance-str
count
(* (/ pill-text-width-letter zoom))
(+ (/ pill-text-width-margin zoom))
(+ (* (/ pill-text-width-margin zoom) 2)))]
[:g.distance-segment
(let [point [(+ from-c (/ distance 2))
(if (= coord :x)
(- half-point (/ 10 zoom))
(+ half-point (/ 5 zoom)))]
[x y] (if (= :x coord) point (reverse point))]
[:*
[:rect {:x (if (= coord :x) (- x (/ width 2)) x)
:y (- (- y (/ (/ pill-text-height zoom) 2)) (if (= coord :x) (/ 2 zoom) 0))
:width width
:height (/ pill-text-height zoom)
:rx (/ pill-text-border-radius zoom)
:fill line-color}]
[:text {:x (if (= coord :x) x (+ x (/ width 2)))
:y (- (+ y (/ (/ pill-text-height zoom) 2) (- (/ 6 zoom))) (if (= coord :x) (/ 2 zoom) 0))
:font-size (/ pill-text-font-size zoom)
:fill "var(--color-white)"
:text-anchor "middle"}
(fmt/format-number distance)]])
(let [p1 [(+ from-c (/ segment-gap zoom)) (+ half-point (/ segment-gap-side zoom))]
p2 [(+ from-c (/ segment-gap zoom)) (- half-point (/ segment-gap-side zoom))]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])
(let [p1 [(- to-c (/ segment-gap zoom)) (+ half-point (/ segment-gap-side zoom))]
p2 [(- to-c (/ segment-gap zoom)) (- half-point (/ segment-gap-side zoom))]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])
(let [p1 [(+ from-c (/ segment-gap zoom)) half-point]
p2 [(- to-c (/ segment-gap zoom)) half-point]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])]))
(defn add-distance [coord sh1 sh2]
(let [sr1 (:selrect sh1)
sr2 (:selrect sh2)
c1 (if (= coord :x) :x1 :y1)
c2 (if (= coord :x) :x2 :y2)
dist (- (c1 sr2) (c2 sr1))]
[dist [sh1 sh2]]))
(defn overlap? [coord sh1 sh2]
(let [sr1 (:selrect sh1)
sr2 (:selrect sh2)
c1 (if (= coord :x) :y1 :x1)
c2 (if (= coord :x) :y2 :x2)
s1c1 (c1 sr1)
s1c2 (c2 sr1)
s2c1 (c1 sr2)
s2c2 (c2 sr2)]
(or (and (>= s2c1 s1c1) (<= s2c1 s1c2))
(and (>= s2c2 s1c1) (<= s2c2 s1c2))
(and (>= s1c1 s2c1) (<= s1c1 s2c2))
(and (>= s1c2 s2c1) (<= s1c2 s2c2)))))
(defn calculate-segments [coord selrect lt-shapes gt-shapes]
(let [distance-to-selrect
(fn [shape]
(let [sr (:selrect shape)]
(-> (if (<= (coord sr) (coord selrect))
(gsh/distance-selrect sr selrect)
(gsh/distance-selrect selrect sr))
coord)))
get-shapes-match
(fn [pred? shapes]
(->> shapes
(sort-by (comp coord :selrect))
(d/map-perm #(add-distance coord %1 %2)
#(overlap? coord %1 %2))
(filterv (comp pred? first))))
;; Checks if the value is in a set of numbers with an error margin
check-in-set
(fn [value number-set]
(->> number-set
(some #(<= (mth/abs (- value %)) 1.5))))
Left / Top shapes and right / bottom shapes ( depends on ` coord ` parameter )
;; Gets the distance to the current selection
distances-xf (comp (map distance-to-selrect) (filter pos?))
lt-distances (into #{} distances-xf lt-shapes)
gt-distances (into #{} distances-xf gt-shapes)
distances (set/union lt-distances gt-distances)
;; We'll show the distances that match a distance from the selrect
show-candidate? #(check-in-set % distances)
;; Checks the distances between elements for distances that match the set of distances
distance-coincidences (d/concat-vec
(get-shapes-match show-candidate? lt-shapes)
(get-shapes-match show-candidate? gt-shapes))
;; Stores the distance candidates to be shown
distance-candidates (d/concat-set
(map first distance-coincidences)
(filter #(check-in-set % lt-distances) gt-distances)
(filter #(check-in-set % gt-distances) lt-distances))
;; Of these candidates we keep only the smaller to be displayed
min-distance (apply min distance-candidates)
Show the distances that either match one of the distances from the selrect
;; or are from the selrect and go to a shape on the left and to the right
show-distance? #(check-in-set % #{min-distance})
;; These are the segments whose distance will be displayed
First segments from segments different that the selection
other-shapes-segments (->> distance-coincidences
(filter #(show-distance? (first %)))
(map second) ;; Retrieves list of [shape,shape] tuples
Changes [ shape , shape ] to [ selrec , ]
;; Segments from the selection to the other shapes
selection-segments (->> (concat lt-shapes gt-shapes)
(filter #(show-distance? (distance-to-selrect %)))
(map #(vector selrect (:selrect %))))
segments-to-display (d/concat-set other-shapes-segments selection-segments)]
segments-to-display))
(mf/defc shape-distance
{::mf/wrap-props false}
[props]
(let [frame (unchecked-get props "frame")
selrect (unchecked-get props "selrect")
page-id (unchecked-get props "page-id")
zoom (unchecked-get props "zoom")
coord (unchecked-get props "coord")
selected (unchecked-get props "selected")
subject (mf/use-memo #(rx/subject))
to-measure (mf/use-state [])
query-worker
(fn [[selrect selected frame]]
(let [lt-side (if (= coord :x) :left :top)
gt-side (if (= coord :x) :right :bottom)
vbox (gsh/rect->selrect @refs/vbox)
areas (gsh/selrect->areas
(or (gsh/clip-selrect (:selrect frame) vbox) vbox)
selrect)
query-side (fn [side]
(let [rect (get areas side)]
(if (and (> (:width rect) 0) (> (:height rect) 0))
(ams/select-shapes-area page-id (:id frame) selected @refs/workspace-page-objects rect)
(rx/of nil))))]
(rx/combine-latest (query-side lt-side)
(query-side gt-side))))
[lt-shapes gt-shapes] @to-measure
segments-to-display (mf/use-memo
(mf/deps @to-measure)
#(calculate-segments coord selrect lt-shapes gt-shapes))]
(mf/use-effect
(fn []
(let [sub (->> subject
(rx/throttle 100)
(rx/switch-map query-worker)
(rx/subs #(reset! to-measure %)))]
;; On unmount dispose
#(rx/dispose! sub))))
(mf/use-effect
(mf/deps selrect)
#(rx/push! subject [selrect selected frame]))
(for [[sr1 sr2] segments-to-display]
[:& shape-distance-segment {:key (str/join "-" [(:x sr1) (:y sr1) (:x sr2) (:y sr2)])
:sr1 sr1
:sr2 sr2
:coord coord
:zoom zoom}])))
(mf/defc snap-distances
{::mf/wrap-props false}
[props]
(let [page-id (unchecked-get props "page-id")
zoom (unchecked-get props "zoom")
selected (unchecked-get props "selected")
selected-shapes (unchecked-get props "selected-shapes")
frame-id (-> selected-shapes first :frame-id)
frame (mf/deref (refs/object-by-id frame-id))
selrect (gsh/selection-rect selected-shapes)]
(when-not (ctl/any-layout? frame)
[:g.distance
[:& shape-distance
{:selrect selrect
:page-id page-id
:frame frame
:zoom zoom
:coord :x
:selected selected}]
[:& shape-distance
{:selrect selrect
:page-id page-id
:frame frame
:zoom zoom
:coord :y
:selected selected}]])))
| null | https://raw.githubusercontent.com/penpot/penpot/c8360b19949a34a9b0878a3a6f2dd08529c9c4cb/frontend/src/app/main/ui/workspace/viewport/snap_distances.cljs | clojure |
Copyright (c) KALEIDOS INC
Checks if the value is in a set of numbers with an error margin
Gets the distance to the current selection
We'll show the distances that match a distance from the selrect
Checks the distances between elements for distances that match the set of distances
Stores the distance candidates to be shown
Of these candidates we keep only the smaller to be displayed
or are from the selrect and go to a shape on the left and to the right
These are the segments whose distance will be displayed
Retrieves list of [shape,shape] tuples
Segments from the selection to the other shapes
On unmount dispose | This Source Code Form is subject to the terms of the Mozilla Public
License , v. 2.0 . If a copy of the MPL was not distributed with this
file , You can obtain one at /.
(ns app.main.ui.workspace.viewport.snap-distances
(:require
[app.common.data :as d]
[app.common.geom.shapes :as gsh]
[app.common.math :as mth]
[app.common.types.shape.layout :as ctl]
[app.main.refs :as refs]
[app.main.snap :as ams]
[app.main.ui.formats :as fmt]
[beicon.core :as rx]
[clojure.set :as set]
[cuerdas.core :as str]
[rumext.v2 :as mf]))
(def ^:private line-color "var(--color-snap)")
(def ^:private segment-gap 2)
(def ^:private segment-gap-side 5)
(defn selected->cross-selrec [frame selrect coord]
(let [areas (gsh/selrect->areas (:selrect frame) selrect)]
(if (= :x coord)
[(gsh/pad-selrec (:left areas))
(gsh/pad-selrec (:right areas))]
[(gsh/pad-selrec (:top areas))
(gsh/pad-selrec (:bottom areas))])))
(defn half-point
"Calculates the middle point of the overlap between two selrects in the opposite axis"
[coord sr1 sr2]
(let [c1 (max (get sr1 (if (= :x coord) :y1 :x1))
(get sr2 (if (= :x coord) :y1 :x1)))
c2 (min (get sr1 (if (= :x coord) :y2 :x2))
(get sr2 (if (= :x coord) :y2 :x2)))
half-point (+ c1 (/ (- c2 c1) 2))]
half-point))
(def pill-text-width-letter 6)
(def pill-text-width-margin 6)
(def pill-text-font-size 12)
(def pill-text-height 20)
(def pill-text-border-radius 4)
(def pill-text-padding 4)
(mf/defc shape-distance-segment
"Displays a segment between two selrects with the distance between them"
[{:keys [sr1 sr2 coord zoom]}]
(let [from-c (min (get sr1 (if (= :x coord) :x2 :y2))
(get sr2 (if (= :x coord) :x2 :y2)))
to-c (max (get sr1 (if (= :x coord) :x1 :y1))
(get sr2 (if (= :x coord) :x1 :y1)))
distance (- to-c from-c)
distance-str (fmt/format-number distance)
half-point (half-point coord sr1 sr2)
width (-> distance-str
count
(* (/ pill-text-width-letter zoom))
(+ (/ pill-text-width-margin zoom))
(+ (* (/ pill-text-width-margin zoom) 2)))]
[:g.distance-segment
(let [point [(+ from-c (/ distance 2))
(if (= coord :x)
(- half-point (/ 10 zoom))
(+ half-point (/ 5 zoom)))]
[x y] (if (= :x coord) point (reverse point))]
[:*
[:rect {:x (if (= coord :x) (- x (/ width 2)) x)
:y (- (- y (/ (/ pill-text-height zoom) 2)) (if (= coord :x) (/ 2 zoom) 0))
:width width
:height (/ pill-text-height zoom)
:rx (/ pill-text-border-radius zoom)
:fill line-color}]
[:text {:x (if (= coord :x) x (+ x (/ width 2)))
:y (- (+ y (/ (/ pill-text-height zoom) 2) (- (/ 6 zoom))) (if (= coord :x) (/ 2 zoom) 0))
:font-size (/ pill-text-font-size zoom)
:fill "var(--color-white)"
:text-anchor "middle"}
(fmt/format-number distance)]])
(let [p1 [(+ from-c (/ segment-gap zoom)) (+ half-point (/ segment-gap-side zoom))]
p2 [(+ from-c (/ segment-gap zoom)) (- half-point (/ segment-gap-side zoom))]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])
(let [p1 [(- to-c (/ segment-gap zoom)) (+ half-point (/ segment-gap-side zoom))]
p2 [(- to-c (/ segment-gap zoom)) (- half-point (/ segment-gap-side zoom))]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])
(let [p1 [(+ from-c (/ segment-gap zoom)) half-point]
p2 [(- to-c (/ segment-gap zoom)) half-point]
[x1 y1] (if (= :x coord) p1 (reverse p1))
[x2 y2] (if (= :x coord) p2 (reverse p2))]
[:line {:x1 x1 :y1 y1
:x2 x2 :y2 y2
:style {:stroke line-color :stroke-width (str (/ 1 zoom))}}])]))
(defn add-distance [coord sh1 sh2]
(let [sr1 (:selrect sh1)
sr2 (:selrect sh2)
c1 (if (= coord :x) :x1 :y1)
c2 (if (= coord :x) :x2 :y2)
dist (- (c1 sr2) (c2 sr1))]
[dist [sh1 sh2]]))
(defn overlap? [coord sh1 sh2]
(let [sr1 (:selrect sh1)
sr2 (:selrect sh2)
c1 (if (= coord :x) :y1 :x1)
c2 (if (= coord :x) :y2 :x2)
s1c1 (c1 sr1)
s1c2 (c2 sr1)
s2c1 (c1 sr2)
s2c2 (c2 sr2)]
(or (and (>= s2c1 s1c1) (<= s2c1 s1c2))
(and (>= s2c2 s1c1) (<= s2c2 s1c2))
(and (>= s1c1 s2c1) (<= s1c1 s2c2))
(and (>= s1c2 s2c1) (<= s1c2 s2c2)))))
(defn calculate-segments [coord selrect lt-shapes gt-shapes]
(let [distance-to-selrect
(fn [shape]
(let [sr (:selrect shape)]
(-> (if (<= (coord sr) (coord selrect))
(gsh/distance-selrect sr selrect)
(gsh/distance-selrect selrect sr))
coord)))
get-shapes-match
(fn [pred? shapes]
(->> shapes
(sort-by (comp coord :selrect))
(d/map-perm #(add-distance coord %1 %2)
#(overlap? coord %1 %2))
(filterv (comp pred? first))))
check-in-set
(fn [value number-set]
(->> number-set
(some #(<= (mth/abs (- value %)) 1.5))))
Left / Top shapes and right / bottom shapes ( depends on ` coord ` parameter )
distances-xf (comp (map distance-to-selrect) (filter pos?))
lt-distances (into #{} distances-xf lt-shapes)
gt-distances (into #{} distances-xf gt-shapes)
distances (set/union lt-distances gt-distances)
show-candidate? #(check-in-set % distances)
distance-coincidences (d/concat-vec
(get-shapes-match show-candidate? lt-shapes)
(get-shapes-match show-candidate? gt-shapes))
distance-candidates (d/concat-set
(map first distance-coincidences)
(filter #(check-in-set % lt-distances) gt-distances)
(filter #(check-in-set % gt-distances) lt-distances))
min-distance (apply min distance-candidates)
Show the distances that either match one of the distances from the selrect
show-distance? #(check-in-set % #{min-distance})
First segments from segments different that the selection
other-shapes-segments (->> distance-coincidences
(filter #(show-distance? (first %)))
Changes [ shape , shape ] to [ selrec , ]
selection-segments (->> (concat lt-shapes gt-shapes)
(filter #(show-distance? (distance-to-selrect %)))
(map #(vector selrect (:selrect %))))
segments-to-display (d/concat-set other-shapes-segments selection-segments)]
segments-to-display))
(mf/defc shape-distance
{::mf/wrap-props false}
[props]
(let [frame (unchecked-get props "frame")
selrect (unchecked-get props "selrect")
page-id (unchecked-get props "page-id")
zoom (unchecked-get props "zoom")
coord (unchecked-get props "coord")
selected (unchecked-get props "selected")
subject (mf/use-memo #(rx/subject))
to-measure (mf/use-state [])
query-worker
(fn [[selrect selected frame]]
(let [lt-side (if (= coord :x) :left :top)
gt-side (if (= coord :x) :right :bottom)
vbox (gsh/rect->selrect @refs/vbox)
areas (gsh/selrect->areas
(or (gsh/clip-selrect (:selrect frame) vbox) vbox)
selrect)
query-side (fn [side]
(let [rect (get areas side)]
(if (and (> (:width rect) 0) (> (:height rect) 0))
(ams/select-shapes-area page-id (:id frame) selected @refs/workspace-page-objects rect)
(rx/of nil))))]
(rx/combine-latest (query-side lt-side)
(query-side gt-side))))
[lt-shapes gt-shapes] @to-measure
segments-to-display (mf/use-memo
(mf/deps @to-measure)
#(calculate-segments coord selrect lt-shapes gt-shapes))]
(mf/use-effect
(fn []
(let [sub (->> subject
(rx/throttle 100)
(rx/switch-map query-worker)
(rx/subs #(reset! to-measure %)))]
#(rx/dispose! sub))))
(mf/use-effect
(mf/deps selrect)
#(rx/push! subject [selrect selected frame]))
(for [[sr1 sr2] segments-to-display]
[:& shape-distance-segment {:key (str/join "-" [(:x sr1) (:y sr1) (:x sr2) (:y sr2)])
:sr1 sr1
:sr2 sr2
:coord coord
:zoom zoom}])))
(mf/defc snap-distances
{::mf/wrap-props false}
[props]
(let [page-id (unchecked-get props "page-id")
zoom (unchecked-get props "zoom")
selected (unchecked-get props "selected")
selected-shapes (unchecked-get props "selected-shapes")
frame-id (-> selected-shapes first :frame-id)
frame (mf/deref (refs/object-by-id frame-id))
selrect (gsh/selection-rect selected-shapes)]
(when-not (ctl/any-layout? frame)
[:g.distance
[:& shape-distance
{:selrect selrect
:page-id page-id
:frame frame
:zoom zoom
:coord :x
:selected selected}]
[:& shape-distance
{:selrect selrect
:page-id page-id
:frame frame
:zoom zoom
:coord :y
:selected selected}]])))
|
3e144e9665688ada9489a8339bb3c5dc2dccd532fc3b757a3f06f62ae7009842 | euhmeuh/virtual-mpu | cli.rkt | #!/usr/bin/env racket
#lang racket/base
(require
racket/cmdline
racket/function
racket/string
virtual-mpu
command-tree)
(define (assemble-to-binary mpu assembly)
(display (assemble assembly)))
(define (assemble-to-hex mpu assembly)
(displayln
(string-join
(map (curry format-hex)
(bytes->list (assemble assembly))))))
(define (assemble-to-s-record mpu assembly [header #f])
(bytes->s-record (assemble assembly)
#:header header))
(define (emulate-machine machine kernel)
(emulate machine kernel))
(define (test-mpu mpu)
(local-require rackunit/text-ui)
(run-tests (dynamic-require (format "mpus/~a.test" mpu) 'suite)))
(command-tree
`([assemble (to-binary ,assemble-to-binary)
(to-hex ,assemble-to-hex)
(to-s-record ,assemble-to-s-record)]
[emulate ,emulate-machine]
[test ,test-mpu])
(current-command-line-arguments))
| null | https://raw.githubusercontent.com/euhmeuh/virtual-mpu/d8056f928a646bb9ac96fdb78cde794efc82d144/cli.rkt | racket | #!/usr/bin/env racket
#lang racket/base
(require
racket/cmdline
racket/function
racket/string
virtual-mpu
command-tree)
(define (assemble-to-binary mpu assembly)
(display (assemble assembly)))
(define (assemble-to-hex mpu assembly)
(displayln
(string-join
(map (curry format-hex)
(bytes->list (assemble assembly))))))
(define (assemble-to-s-record mpu assembly [header #f])
(bytes->s-record (assemble assembly)
#:header header))
(define (emulate-machine machine kernel)
(emulate machine kernel))
(define (test-mpu mpu)
(local-require rackunit/text-ui)
(run-tests (dynamic-require (format "mpus/~a.test" mpu) 'suite)))
(command-tree
`([assemble (to-binary ,assemble-to-binary)
(to-hex ,assemble-to-hex)
(to-s-record ,assemble-to-s-record)]
[emulate ,emulate-machine]
[test ,test-mpu])
(current-command-line-arguments))
| |
9badb90e056d29ec6fdb9db996dd3d7227f3229d2a271ca2c0900d887a9b8fc0 | craigl64/clim-ccl | packages.lisp | -*- Mode : Lisp ; Syntax : ANSI - Common - Lisp ; Package : CL - USER ; Base : 10 ; Lowercase : Yes -*-
;; See the file LICENSE for the full license governing this code.
;;
(in-package #-ansi-90 :user #+ansi-90 :common-lisp-user)
" Copyright ( c ) 1990 , 1991 Symbolics , Inc. All rights reserved .
Portions copyright ( c ) 1988 , 1989 , 1990 International Lisp Associates . "
(#-ansi-90 clim-lisp::defpackage #+ansi-90 defpackage clim-demo
(:use clim-lisp clim)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent)
(:export
*demo-root*
define-demo
start-demo))
(#-ansi-90 clim-lisp::defpackage #+ansi-90 defpackage clim-graphics-editor
(:use clim-lisp clim clim-demo)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent))
this little gem results in the japanese - graphics - editor package
;;; always being created at compile time (the defpackage is processed
;;; regardless of whether this is ics or not). At load time either
the package is created ( ics ) or an alias to clim - graphics - editor
;;; is added (non-ics). The unless deals with the situation of
compiling and then loading in the same non - ICS image ! ( cim 2/28/96 )
#+allegro
(excl:ics-target-case
(:+ics
(defpackage japanese-graphics-editor
(:use clim-lisp clim clim-demo)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent)))
(:-ics
(unless (find-package :japanese-graphics-editor)
(rename-package (find-package :clim-graphics-editor) :clim-graphics-editor
(cons :japanese-graphics-editor
(package-nicknames (find-package :clim-graphics-editor)))))
)) ;; ics-target-case
(defpackage clim-browser
(:use clim-lisp clim clim-demo)
(:shadow package)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent))
| null | https://raw.githubusercontent.com/craigl64/clim-ccl/301efbd770745b429f2b00b4e8ca6624de9d9ea9/demo/packages.lisp | lisp | Syntax : ANSI - Common - Lisp ; Package : CL - USER ; Base : 10 ; Lowercase : Yes -*-
See the file LICENSE for the full license governing this code.
always being created at compile time (the defpackage is processed
regardless of whether this is ics or not). At load time either
is added (non-ics). The unless deals with the situation of
ics-target-case |
(in-package #-ansi-90 :user #+ansi-90 :common-lisp-user)
" Copyright ( c ) 1990 , 1991 Symbolics , Inc. All rights reserved .
Portions copyright ( c ) 1988 , 1989 , 1990 International Lisp Associates . "
(#-ansi-90 clim-lisp::defpackage #+ansi-90 defpackage clim-demo
(:use clim-lisp clim)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent)
(:export
*demo-root*
define-demo
start-demo))
(#-ansi-90 clim-lisp::defpackage #+ansi-90 defpackage clim-graphics-editor
(:use clim-lisp clim clim-demo)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent))
this little gem results in the japanese - graphics - editor package
the package is created ( ics ) or an alias to clim - graphics - editor
compiling and then loading in the same non - ICS image ! ( cim 2/28/96 )
#+allegro
(excl:ics-target-case
(:+ics
(defpackage japanese-graphics-editor
(:use clim-lisp clim clim-demo)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent)))
(:-ics
(unless (find-package :japanese-graphics-editor)
(rename-package (find-package :clim-graphics-editor) :clim-graphics-editor
(cons :japanese-graphics-editor
(package-nicknames (find-package :clim-graphics-editor)))))
(defpackage clim-browser
(:use clim-lisp clim clim-demo)
(:shadow package)
(:shadowing-import-from clim-utils
defun
flet labels
defgeneric defmethod
#+(and allegro (not (version>= 4 1))) with-slots
dynamic-extent non-dynamic-extent))
|
5dbea5db07d9cfe77c7da824e6d248135a85e5884062e6d0295c686331bcd896 | art-w/sherlocode | colorize.ml | module H = Tyxml.Html
module Higlo = Higlo.Lang
let span cl s = H.span ~a:[ H.a_class [ cl ] ] [ H.txt s ]
let html_of_token = function
| Higlo.Text str -> H.txt str
| Symbol (_, s) -> span "symbol" s
| String s -> span "string" s
| Numeric s -> span "numeric" s
| Lcomment s -> span "comment" s
| Bcomment s -> span "comment" s
| Keyword (_, s) -> span "kw" s
| Escape s -> span "escape" s
| Directive s -> span "directive" s
| Constant s -> span "constant" s
| Id s -> span "ident" s
let string_of_token = function
| Higlo.Text s
| Symbol (_, s)
| String s
| Numeric s
| Lcomment s
| Bcomment s
| Keyword (_, s)
| Escape s
| Directive s
| Constant s
| Id s -> s
let token_replace s = function
| Higlo.Text _ -> Higlo.Text s
| Symbol (n, _) -> Symbol (n, s)
| String _ -> String s
| Numeric _ -> Numeric s
| Lcomment _ -> Lcomment s
| Bcomment _ -> Bcomment s
| Keyword (n, _) -> Keyword (n, s)
| Escape _ -> Escape s
| Directive _ -> Directive s
| Constant _ -> Constant s
| Id _ -> Id s
let string_split i str = String.sub str 0 i, String.sub str i (String.length str - i)
let rec take acc n = function
| [] -> List.rev acc, []
| t :: ts ->
let txt = string_of_token t in
let txt_len = String.length txt in
if n > txt_len
then take (t :: acc) (n - txt_len) ts
else (
let txt_before, txt_after = string_split n txt in
let tok_before = token_replace txt_before t in
let tok_after = token_replace txt_after t in
List.rev (tok_before :: acc), tok_after :: ts)
let take n ts = take [] n ts
let to_html line =
let tokens = Higlo.parse ~lang:"ocaml" line in
List.map html_of_token tokens
let to_html_highlight ~mark line (start_at, end_at) =
let tokens = Higlo.parse ~lang:"ocaml" line in
let start, rest = take start_at tokens in
let inside, rest = take (end_at - start_at) rest in
List.map html_of_token start
@ [ mark @@ List.map html_of_token inside ]
@ List.map html_of_token rest
| null | https://raw.githubusercontent.com/art-w/sherlocode/3008cbab7177acd89b1f0c526a9820e92c55440c/www/colorize.ml | ocaml | module H = Tyxml.Html
module Higlo = Higlo.Lang
let span cl s = H.span ~a:[ H.a_class [ cl ] ] [ H.txt s ]
let html_of_token = function
| Higlo.Text str -> H.txt str
| Symbol (_, s) -> span "symbol" s
| String s -> span "string" s
| Numeric s -> span "numeric" s
| Lcomment s -> span "comment" s
| Bcomment s -> span "comment" s
| Keyword (_, s) -> span "kw" s
| Escape s -> span "escape" s
| Directive s -> span "directive" s
| Constant s -> span "constant" s
| Id s -> span "ident" s
let string_of_token = function
| Higlo.Text s
| Symbol (_, s)
| String s
| Numeric s
| Lcomment s
| Bcomment s
| Keyword (_, s)
| Escape s
| Directive s
| Constant s
| Id s -> s
let token_replace s = function
| Higlo.Text _ -> Higlo.Text s
| Symbol (n, _) -> Symbol (n, s)
| String _ -> String s
| Numeric _ -> Numeric s
| Lcomment _ -> Lcomment s
| Bcomment _ -> Bcomment s
| Keyword (n, _) -> Keyword (n, s)
| Escape _ -> Escape s
| Directive _ -> Directive s
| Constant _ -> Constant s
| Id _ -> Id s
let string_split i str = String.sub str 0 i, String.sub str i (String.length str - i)
let rec take acc n = function
| [] -> List.rev acc, []
| t :: ts ->
let txt = string_of_token t in
let txt_len = String.length txt in
if n > txt_len
then take (t :: acc) (n - txt_len) ts
else (
let txt_before, txt_after = string_split n txt in
let tok_before = token_replace txt_before t in
let tok_after = token_replace txt_after t in
List.rev (tok_before :: acc), tok_after :: ts)
let take n ts = take [] n ts
let to_html line =
let tokens = Higlo.parse ~lang:"ocaml" line in
List.map html_of_token tokens
let to_html_highlight ~mark line (start_at, end_at) =
let tokens = Higlo.parse ~lang:"ocaml" line in
let start, rest = take start_at tokens in
let inside, rest = take (end_at - start_at) rest in
List.map html_of_token start
@ [ mark @@ List.map html_of_token inside ]
@ List.map html_of_token rest
| |
bc16ed64413103310c42a0a420bb67c85c858ecca5068f07f7880e716c9699d9 | ChrisPenner/json-to-haskell | Options.hs | # LANGUAGE TemplateHaskell #
module JsonToHaskell.Internal.Options where
import Lens.Micro.Platform (makeLenses)
-- | Choose which type to use for Numbers
data NumberType =
-- | Use 'Int' for whole numbers, 'Float' for decimals
UseSmartFloats
-- | Use 'Int' for whole numbers, 'Double' for decimals
| UseSmartDoubles
-- | Use 'Float' for all numbers
| UseFloats
-- | Use 'Double' for all numbers
| UseDoubles
-- | Use 'Scientific' for all numbers
| UseScientific
deriving (Show, Eq, Ord)
-- | Choose which type to use for strings
data TextType =
-- | Use 'String' for strings
UseString
-- | Use 'Text' for string
| UseText
-- | Use 'ByteString' for strings
| UseByteString
deriving (Show, Eq, Ord)
-- | Choose which type to use for key-value maps
data MapType =
-- | Use Data.Map
UseMap
| Use Data .
| UseHashMap
deriving (Show, Eq, Ord)
-- | Choose which type to use for arrays
data ListType =
-- | Use lists
UseList
-- | Use vectors
| UseVector
deriving (Show, Eq, Ord)
-- | Options for module generation
data Options = Options
{ _tabStop :: Int
, _numberType :: NumberType
, _textType :: TextType
, _mapType :: MapType
, _listType :: ListType
, _includeHeader :: Bool
, _includeInstances :: Bool
, _strictData :: Bool
, _prefixRecordFields :: Bool
} deriving (Show, Eq, Ord)
makeLenses ''Options
-- | Simple module generation options.
-- These are reasonable defaults for a simple module
simpleOptions :: Options
simpleOptions = Options
{ _tabStop = 2
, _numberType = UseDoubles
, _textType = UseText
, _mapType = UseMap
, _listType = UseList
, _includeHeader = True
, _includeInstances = False
, _strictData = False
, _prefixRecordFields = True
}
-- | Use more performant data types, use these for production apps.
performantOptions :: Options
performantOptions = Options
{ _tabStop = 2
, _numberType = UseDoubles
, _textType = UseText
, _mapType = UseMap
, _listType = UseList
, _includeHeader = True
, _includeInstances = False
, _strictData = True
, _prefixRecordFields = True
}
| null | https://raw.githubusercontent.com/ChrisPenner/json-to-haskell/37276529efa2d861f061d8cf0642daef5ab8c47b/src/JsonToHaskell/Internal/Options.hs | haskell | | Choose which type to use for Numbers
| Use 'Int' for whole numbers, 'Float' for decimals
| Use 'Int' for whole numbers, 'Double' for decimals
| Use 'Float' for all numbers
| Use 'Double' for all numbers
| Use 'Scientific' for all numbers
| Choose which type to use for strings
| Use 'String' for strings
| Use 'Text' for string
| Use 'ByteString' for strings
| Choose which type to use for key-value maps
| Use Data.Map
| Choose which type to use for arrays
| Use lists
| Use vectors
| Options for module generation
| Simple module generation options.
These are reasonable defaults for a simple module
| Use more performant data types, use these for production apps. | # LANGUAGE TemplateHaskell #
module JsonToHaskell.Internal.Options where
import Lens.Micro.Platform (makeLenses)
data NumberType =
UseSmartFloats
| UseSmartDoubles
| UseFloats
| UseDoubles
| UseScientific
deriving (Show, Eq, Ord)
data TextType =
UseString
| UseText
| UseByteString
deriving (Show, Eq, Ord)
data MapType =
UseMap
| Use Data .
| UseHashMap
deriving (Show, Eq, Ord)
data ListType =
UseList
| UseVector
deriving (Show, Eq, Ord)
data Options = Options
{ _tabStop :: Int
, _numberType :: NumberType
, _textType :: TextType
, _mapType :: MapType
, _listType :: ListType
, _includeHeader :: Bool
, _includeInstances :: Bool
, _strictData :: Bool
, _prefixRecordFields :: Bool
} deriving (Show, Eq, Ord)
makeLenses ''Options
simpleOptions :: Options
simpleOptions = Options
{ _tabStop = 2
, _numberType = UseDoubles
, _textType = UseText
, _mapType = UseMap
, _listType = UseList
, _includeHeader = True
, _includeInstances = False
, _strictData = False
, _prefixRecordFields = True
}
performantOptions :: Options
performantOptions = Options
{ _tabStop = 2
, _numberType = UseDoubles
, _textType = UseText
, _mapType = UseMap
, _listType = UseList
, _includeHeader = True
, _includeInstances = False
, _strictData = True
, _prefixRecordFields = True
}
|
467988cf351b89b654451387a71c16213082ad8fdcbdabce299bdeb034e5fa94 | ChrisPenner/lens-errors | Spec.hs | # LANGUAGE TypeApplications #
# LANGUAGE FlexibleContexts #
# LANGUAGE ScopedTypeVariables #
{-# LANGUAGE RankNTypes #-}
import Control.Lens
import Control.Lens.Error
import Test.Hspec
import Data.Tree
import Data.Tree.Lens
numbers :: (String, [ Int ])
numbers = ("hi", [1, 2, 3, 4])
main :: IO ()
main = hspec $ do
describe "fizzler" $ do
let fizzHead :: Fizzler' [String] [Int] Int
fizzHead = fizzler getter setter
getter :: [Int] -> Either [String] Int
getter [] = Left ["empty list!"]
getter (x:_) = Right x
setter :: [Int] -> Int -> Either [String] [Int]
setter (x:_) _ | x < 0 = Left ["refusing to set over negative head"]
setter (_:xs) x = Right (x : xs)
setter [] _ = Right []
it "should get when succesful" $ do
("hi", [1, 2, 3, 4]) ^&.. _2 . fizzHead
`shouldBe` (([], [1]))
it "should return errors from getter" $ do
("hi", []) ^&.. _2 . fizzHead
`shouldBe` ((["empty list!"], []))
it "should set when succesful" $ do
(("hi", [1, 2, 3, 4]) & _2 . fizzHead %&~ (*10))
`shouldBe` (Success ("hi", [10, 2, 3, 4]))
it "should return errors from getter when setting iff that fails first" $ do
(("hi", []) & _2 . fizzHead %&~ (*10))
`shouldBe` Failure ["empty list!"]
it "should return errors from setter iff getter passes but setter fails" $ do
(("hi", [-10, 2, 3, 4]) & _2 . fizzHead %&~ (*10))
`shouldBe` Failure ["refusing to set over negative head"]
describe "examine (^&.)" $ do
it "should view properly through traversals/folds" $ do
numbers ^&. _2 . traversed . to show
`shouldBe` ((), "1234")
it "should view properly through successful assertions" $ do
numbers ^&. _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) . to show
`shouldBe` ([], "1234")
it "should collect failures when they occur" $ do
numbers ^&. _2 . traversed . fizzleWithWhen (\n -> [show n]) (const True) . to show
`shouldBe` (["1", "2", "3", "4"], "")
it "should collect failures AND successes when they occur" $ do
numbers ^&. _2 . traversed . fizzleWithWhen (\n -> [show n]) even . to (:[])
`shouldBe` (["2", "4"], [1, 3])
describe "examineList (^&..)" $ do
it "should view properly through traversals/folds" $ do
numbers ^&.. _2 . traversed
`shouldBe` ((), [1, 2, 3, 4])
it "should view properly through successful assertions" $ do
numbers ^&.. (_2 . traversed . fizzleWhen ["shouldn't fail"] (const False))
`shouldBe` ([], [1, 2, 3, 4])
it "should collect failures when they occur" $ do
numbers ^&.. (_2 . traversed . fizzleWithWhen (\n -> [show n]) (const True))
`shouldBe` (["1", "2", "3", "4"], [])
it "should collect failures AND successes when they occur" $ do
numbers ^&.. (_2 . traversed . fizzleWithWhen (\n -> [show n]) even)
`shouldBe` (["2", "4"], [1, 3])
describe "preexamine ^&?" $ do
it "Should find first success or return all errors" $ do
let prismError p name = p `orFizzleWith` (\v -> ["Value " <> show v <> " didn't match: " <> name])
let _R = prismError _Right "_Right"
([Left (1 :: Int), Left 2, Right (3 :: Int)] ^&? traversed . _R)
`shouldBe` (Success 3)
([Left (1 :: Int), Left 2, Left 3] ^&? traversed . _R)
`shouldBe` (Failure [ "Value Left 1 didn't match: _Right"
, "Value Left 2 didn't match: _Right"
, "Value Left 3 didn't match: _Right"])
describe "trySet .&~" $ do
it "should set successfully" $ do
(numbers & _2 . ix 1 . fizzleWhen ["shouldn't fail"] (const False) .&~ 42)
`shouldBe` Success ("hi",[1,42,3,4])
it "should return failures" $ do
(numbers & _2 . ix 1 . fizzleWithWhen (\n -> [n]) even .&~ 42)
`shouldBe` Failure [2]
describe "tryModify %&~" $ do
it "should edit successfully with no assertions" $ do
(numbers & _2 . traversed %&~ (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]) :: Validation () (String, [Int]))
it "should edit successfully through valid assertions" $ do
(numbers & _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) %&~ (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]))
it "should return failures" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) (const True) %&~ (*100))
`shouldBe` Failure [1, 2, 3, 4]
it "should collect all failures if anything fails" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) even %&~ (*100))
`shouldBe` Failure [2, 4]
describe "tryModify' %%&~" $ do
it "should edit successfully with no assertions" $ do
(numbers & _2 . traversed %%&~ Success . (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]) :: Validation () (String, [Int]))
it "should edit successfully through valid assertions" $ do
(numbers & _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) %%&~ Success . (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]))
it "should return failures" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) (const True) %%&~ Success . (*100))
`shouldBe` Failure [1, 2, 3, 4]
it "should collect all failures if anything fails" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) even %%&~ Success . (*100))
`shouldBe` Failure [2, 4]
it "should fail if the function fails" $ do
(numbers & _2 . traversed %%&~ (\n -> Failure [show n <> " failed"]))
`shouldBe` (Failure ["1 failed","2 failed","3 failed","4 failed"] :: Validation [String] (String, [Int]))
describe "fizzleWhen" $ do
it "should fizzle when predicate is true" $ do
numbers ^&.. _2 . traversed . fizzleWhen ["failure"] even
`shouldBe` (["failure", "failure"], [1, 3])
describe "fizzleUnless" $ do
it "should fizzle when predicate is false" $ do
numbers ^&.. _2 . traversed . fizzleUnless ["failure"] even
`shouldBe` (["failure", "failure"], [2, 4])
describe "maybeFizzleWith" $ do
it "should fizzle when returning Just" $ do
let p x
| even x = Just [show x <> " was even"]
| otherwise = Nothing
numbers ^&.. _2 . traversed . maybeFizzleWith p
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWithWhen" $ do
it "should fizzle using the error builder when predicate is true" $ do
let p x = [show x <> " was even"]
numbers ^&.. _2 . traversed . fizzleWithWhen p even
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWithUnless" $ do
it "should fizzle using the error builder when predicate is false" $ do
let p x = [show x <> " was even"]
numbers ^&.. _2 . traversed . fizzleWithUnless p odd
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWith" $ do
it "should always fizzle using the error builder" $ do
let p x = [show x]
numbers ^&.. _2 . traversed . fizzleWith p
`shouldBe` (["1", "2", "3", "4"], [] :: [Int])
describe "orFizzle" $ do
it "should always fizzle using the error builder" $ do
numbers ^&.. (_2 . traversed . filtered (> 10)) `orFizzle` ["nothing over 10"]
`shouldBe` (["nothing over 10"], [])
describe "orFizzleWith" $ do
it "should always fizzle using the error builder" $ do
numbers ^&.. (_2 . traversed . filtered (> 10)) `orFizzleWith` (\(_, xs) -> ["searched " <> show (length xs) <> " elements, no luck"])
`shouldBe` (["searched 4 elements, no luck"], [])
describe "adjustingErrors" $ do
it "should alter errors from its sub-branch, but not outside of it" $ do
[1, 2, 3, 4 :: Int] ^&.. traversed . fizzleWhen ["got 4"] (== 4) . adjustingErrors (fmap (<> "!")) . fizzleWhen ["got 3"] (== 3)
`shouldBe` (["got 3!", "got 4"], [1, 2])
describe "adjustingErrorsWith" $ do
it "should alter errors from its sub-branch, but not outside of it, using the value to construct the error" $ do
[1, 2, 3, 4 :: Int] ^&.. traversed . fizzleWhen ["got 4"] (== 4) . adjustingErrorsWith (\n -> fmap (\e -> show n <> ": " <> e)) . fizzleWhen ["fail"] (== 3)
`shouldBe` (["3: fail","got 4"], [1, 2])
describe "real examples" $ do
it "tree get success" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]]
let tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
tree ^&.. branches . tryIx 0 . branches . tryIx 0 . root
`shouldBe` ([],["bottom"])
it "tree get failure" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]]
let tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
tree ^&.. branches . tryIx 0 . branches . tryIx 10 . root
`shouldBe` (["10 was out of bounds in list: [Node {rootLabel = \"bottom\", subForest = []}]"],[])
it "tree set" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]] :: Tree String
let tryIx :: (Applicative f, LensFail [String] f, Show a) => Int -> LensLike' f [a] a
tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
(tree & branches . tryIx 0 . branches . tryIx 10 . root %&~ (<> "!!"))
`shouldBe` (Failure ["10 was out of bounds in list: [Node {rootLabel = \"bottom\", subForest = []}]"])
| null | https://raw.githubusercontent.com/ChrisPenner/lens-errors/d50f08213dc51e57f1eb48108a02eabd1aaf7650/test/Spec.hs | haskell | # LANGUAGE RankNTypes # | # LANGUAGE TypeApplications #
# LANGUAGE FlexibleContexts #
# LANGUAGE ScopedTypeVariables #
import Control.Lens
import Control.Lens.Error
import Test.Hspec
import Data.Tree
import Data.Tree.Lens
numbers :: (String, [ Int ])
numbers = ("hi", [1, 2, 3, 4])
main :: IO ()
main = hspec $ do
describe "fizzler" $ do
let fizzHead :: Fizzler' [String] [Int] Int
fizzHead = fizzler getter setter
getter :: [Int] -> Either [String] Int
getter [] = Left ["empty list!"]
getter (x:_) = Right x
setter :: [Int] -> Int -> Either [String] [Int]
setter (x:_) _ | x < 0 = Left ["refusing to set over negative head"]
setter (_:xs) x = Right (x : xs)
setter [] _ = Right []
it "should get when succesful" $ do
("hi", [1, 2, 3, 4]) ^&.. _2 . fizzHead
`shouldBe` (([], [1]))
it "should return errors from getter" $ do
("hi", []) ^&.. _2 . fizzHead
`shouldBe` ((["empty list!"], []))
it "should set when succesful" $ do
(("hi", [1, 2, 3, 4]) & _2 . fizzHead %&~ (*10))
`shouldBe` (Success ("hi", [10, 2, 3, 4]))
it "should return errors from getter when setting iff that fails first" $ do
(("hi", []) & _2 . fizzHead %&~ (*10))
`shouldBe` Failure ["empty list!"]
it "should return errors from setter iff getter passes but setter fails" $ do
(("hi", [-10, 2, 3, 4]) & _2 . fizzHead %&~ (*10))
`shouldBe` Failure ["refusing to set over negative head"]
describe "examine (^&.)" $ do
it "should view properly through traversals/folds" $ do
numbers ^&. _2 . traversed . to show
`shouldBe` ((), "1234")
it "should view properly through successful assertions" $ do
numbers ^&. _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) . to show
`shouldBe` ([], "1234")
it "should collect failures when they occur" $ do
numbers ^&. _2 . traversed . fizzleWithWhen (\n -> [show n]) (const True) . to show
`shouldBe` (["1", "2", "3", "4"], "")
it "should collect failures AND successes when they occur" $ do
numbers ^&. _2 . traversed . fizzleWithWhen (\n -> [show n]) even . to (:[])
`shouldBe` (["2", "4"], [1, 3])
describe "examineList (^&..)" $ do
it "should view properly through traversals/folds" $ do
numbers ^&.. _2 . traversed
`shouldBe` ((), [1, 2, 3, 4])
it "should view properly through successful assertions" $ do
numbers ^&.. (_2 . traversed . fizzleWhen ["shouldn't fail"] (const False))
`shouldBe` ([], [1, 2, 3, 4])
it "should collect failures when they occur" $ do
numbers ^&.. (_2 . traversed . fizzleWithWhen (\n -> [show n]) (const True))
`shouldBe` (["1", "2", "3", "4"], [])
it "should collect failures AND successes when they occur" $ do
numbers ^&.. (_2 . traversed . fizzleWithWhen (\n -> [show n]) even)
`shouldBe` (["2", "4"], [1, 3])
describe "preexamine ^&?" $ do
it "Should find first success or return all errors" $ do
let prismError p name = p `orFizzleWith` (\v -> ["Value " <> show v <> " didn't match: " <> name])
let _R = prismError _Right "_Right"
([Left (1 :: Int), Left 2, Right (3 :: Int)] ^&? traversed . _R)
`shouldBe` (Success 3)
([Left (1 :: Int), Left 2, Left 3] ^&? traversed . _R)
`shouldBe` (Failure [ "Value Left 1 didn't match: _Right"
, "Value Left 2 didn't match: _Right"
, "Value Left 3 didn't match: _Right"])
describe "trySet .&~" $ do
it "should set successfully" $ do
(numbers & _2 . ix 1 . fizzleWhen ["shouldn't fail"] (const False) .&~ 42)
`shouldBe` Success ("hi",[1,42,3,4])
it "should return failures" $ do
(numbers & _2 . ix 1 . fizzleWithWhen (\n -> [n]) even .&~ 42)
`shouldBe` Failure [2]
describe "tryModify %&~" $ do
it "should edit successfully with no assertions" $ do
(numbers & _2 . traversed %&~ (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]) :: Validation () (String, [Int]))
it "should edit successfully through valid assertions" $ do
(numbers & _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) %&~ (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]))
it "should return failures" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) (const True) %&~ (*100))
`shouldBe` Failure [1, 2, 3, 4]
it "should collect all failures if anything fails" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) even %&~ (*100))
`shouldBe` Failure [2, 4]
describe "tryModify' %%&~" $ do
it "should edit successfully with no assertions" $ do
(numbers & _2 . traversed %%&~ Success . (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]) :: Validation () (String, [Int]))
it "should edit successfully through valid assertions" $ do
(numbers & _2 . traversed . fizzleWhen ["shouldn't fail"] (const False) %%&~ Success . (*100))
`shouldBe` (Success ("hi", [100, 200, 300, 400]))
it "should return failures" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) (const True) %%&~ Success . (*100))
`shouldBe` Failure [1, 2, 3, 4]
it "should collect all failures if anything fails" $ do
(numbers & _2 . traversed . fizzleWithWhen (\n -> [n]) even %%&~ Success . (*100))
`shouldBe` Failure [2, 4]
it "should fail if the function fails" $ do
(numbers & _2 . traversed %%&~ (\n -> Failure [show n <> " failed"]))
`shouldBe` (Failure ["1 failed","2 failed","3 failed","4 failed"] :: Validation [String] (String, [Int]))
describe "fizzleWhen" $ do
it "should fizzle when predicate is true" $ do
numbers ^&.. _2 . traversed . fizzleWhen ["failure"] even
`shouldBe` (["failure", "failure"], [1, 3])
describe "fizzleUnless" $ do
it "should fizzle when predicate is false" $ do
numbers ^&.. _2 . traversed . fizzleUnless ["failure"] even
`shouldBe` (["failure", "failure"], [2, 4])
describe "maybeFizzleWith" $ do
it "should fizzle when returning Just" $ do
let p x
| even x = Just [show x <> " was even"]
| otherwise = Nothing
numbers ^&.. _2 . traversed . maybeFizzleWith p
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWithWhen" $ do
it "should fizzle using the error builder when predicate is true" $ do
let p x = [show x <> " was even"]
numbers ^&.. _2 . traversed . fizzleWithWhen p even
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWithUnless" $ do
it "should fizzle using the error builder when predicate is false" $ do
let p x = [show x <> " was even"]
numbers ^&.. _2 . traversed . fizzleWithUnless p odd
`shouldBe` (["2 was even", "4 was even"], [1, 3])
describe "fizzleWith" $ do
it "should always fizzle using the error builder" $ do
let p x = [show x]
numbers ^&.. _2 . traversed . fizzleWith p
`shouldBe` (["1", "2", "3", "4"], [] :: [Int])
describe "orFizzle" $ do
it "should always fizzle using the error builder" $ do
numbers ^&.. (_2 . traversed . filtered (> 10)) `orFizzle` ["nothing over 10"]
`shouldBe` (["nothing over 10"], [])
describe "orFizzleWith" $ do
it "should always fizzle using the error builder" $ do
numbers ^&.. (_2 . traversed . filtered (> 10)) `orFizzleWith` (\(_, xs) -> ["searched " <> show (length xs) <> " elements, no luck"])
`shouldBe` (["searched 4 elements, no luck"], [])
describe "adjustingErrors" $ do
it "should alter errors from its sub-branch, but not outside of it" $ do
[1, 2, 3, 4 :: Int] ^&.. traversed . fizzleWhen ["got 4"] (== 4) . adjustingErrors (fmap (<> "!")) . fizzleWhen ["got 3"] (== 3)
`shouldBe` (["got 3!", "got 4"], [1, 2])
describe "adjustingErrorsWith" $ do
it "should alter errors from its sub-branch, but not outside of it, using the value to construct the error" $ do
[1, 2, 3, 4 :: Int] ^&.. traversed . fizzleWhen ["got 4"] (== 4) . adjustingErrorsWith (\n -> fmap (\e -> show n <> ": " <> e)) . fizzleWhen ["fail"] (== 3)
`shouldBe` (["3: fail","got 4"], [1, 2])
describe "real examples" $ do
it "tree get success" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]]
let tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
tree ^&.. branches . tryIx 0 . branches . tryIx 0 . root
`shouldBe` ([],["bottom"])
it "tree get failure" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]]
let tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
tree ^&.. branches . tryIx 0 . branches . tryIx 10 . root
`shouldBe` (["10 was out of bounds in list: [Node {rootLabel = \"bottom\", subForest = []}]"],[])
it "tree set" $ do
let tree = Node "top" [Node "mid" [Node "bottom" []]] :: Tree String
let tryIx :: (Applicative f, LensFail [String] f, Show a) => Int -> LensLike' f [a] a
tryIx n = ix n `orFizzleWith` (\xs -> [show n <> " was out of bounds in list: " <> show xs])
(tree & branches . tryIx 0 . branches . tryIx 10 . root %&~ (<> "!!"))
`shouldBe` (Failure ["10 was out of bounds in list: [Node {rootLabel = \"bottom\", subForest = []}]"])
|
195bbfff785eaa3ba7e382c83b2f548909d50fea91609916bf6fe4f617082664 | plum-umd/fundamentals | yo-client.rkt | The first three lines of this file were inserted by . They record metadata
;; about the language level of this file in a form that our tools can easily process.
#reader(lib "htdp-intermediate-lambda-reader.ss" "lang")((modname yo) (read-case-sensitive #t) (teachpacks ()) (htdp-settings #(#t constructor repeating-decimal #f #t none #f () #f)))
(require 2htdp/universe)
(require 2htdp/image)
(define (yo _)
(big-bang "yo!"
[register LOCALHOST]
[to-draw (λ (w)
(overlay (text w 40 "black")
(empty-scene 600 200)))]
[on-key (λ (w ke)
(make-package w w))]
[on-receive (λ (w msg) msg)]))
(define (run _)
(launch-many-worlds (yo 5)
(yo 3)
(yo 1))) | null | https://raw.githubusercontent.com/plum-umd/fundamentals/eb01ac528d42855be53649991a17d19c025a97ad/1/www/code/yo-client.rkt | racket | about the language level of this file in a form that our tools can easily process. | The first three lines of this file were inserted by . They record metadata
#reader(lib "htdp-intermediate-lambda-reader.ss" "lang")((modname yo) (read-case-sensitive #t) (teachpacks ()) (htdp-settings #(#t constructor repeating-decimal #f #t none #f () #f)))
(require 2htdp/universe)
(require 2htdp/image)
(define (yo _)
(big-bang "yo!"
[register LOCALHOST]
[to-draw (λ (w)
(overlay (text w 40 "black")
(empty-scene 600 200)))]
[on-key (λ (w ke)
(make-package w w))]
[on-receive (λ (w msg) msg)]))
(define (run _)
(launch-many-worlds (yo 5)
(yo 3)
(yo 1))) |
7c9b8bf73cfe595e325d3b899edd015afb1b3e358b8be65eb61c87fe4e55ad99 | pfdietz/ansi-test | exp.lsp | ;-*- Mode: Lisp -*-
Author :
Created : Mon Sep 1 21:24:44 2003
Contains : Tests of EXP
;;; Error tests
(deftest exp.error.1
(signals-error (exp) program-error)
t)
(deftest exp.error.2
(signals-error (exp 0 nil) program-error)
t)
(deftest exp.error.3
(signals-error (exp 0 0 0) program-error)
t)
;;; Other tests
(deftest exp.1
(let ((result (exp 0)))
(or (eqlt result 1)
(eqlt result 1.0f0)))
t)
(deftest exp.2
(mapcar #'exp '(0.0s0 0.0f0 0.0d0 0.0l0))
(1.0s0 1.0f0 1.0d0 1.0l0))
(deftest exp.3
(mapcar #'exp '(-0.0s0 -0.0f0 -0.0d0 -0.0l0))
(1.0s0 1.0f0 1.0d0 1.0l0))
FIXME
;;; Add more tests here for floating point accuracy
(deftest exp.error.4
(signals-error (exp (+ (log most-positive-short-float) 100))
floating-point-overflow)
t)
(deftest exp.error.5
(signals-error (exp (+ (log most-positive-single-float) 100))
floating-point-overflow)
t)
(deftest exp.error.6
(signals-error (exp (+ (log most-positive-double-float) 100))
floating-point-overflow)
t)
(deftest exp.error.7
(signals-error (exp (+ (log most-positive-long-float) 100))
floating-point-overflow)
t)
(deftest exp.error.8
(signals-error (exp (- (log least-positive-short-float) 100))
floating-point-underflow)
t)
(deftest exp.error.9
(signals-error (exp (- (log least-positive-single-float) 100))
floating-point-underflow)
t)
(deftest exp.error.10
(signals-error (exp (- (log least-positive-double-float) 100))
floating-point-underflow)
t)
(deftest exp.error.11
(signals-error (exp (- (log least-positive-double-float) 100))
floating-point-underflow)
t)
| null | https://raw.githubusercontent.com/pfdietz/ansi-test/3f4b9d31c3408114f0467eaeca4fd13b28e2ce31/numbers/exp.lsp | lisp | -*- Mode: Lisp -*-
Error tests
Other tests
Add more tests here for floating point accuracy | Author :
Created : Mon Sep 1 21:24:44 2003
Contains : Tests of EXP
(deftest exp.error.1
(signals-error (exp) program-error)
t)
(deftest exp.error.2
(signals-error (exp 0 nil) program-error)
t)
(deftest exp.error.3
(signals-error (exp 0 0 0) program-error)
t)
(deftest exp.1
(let ((result (exp 0)))
(or (eqlt result 1)
(eqlt result 1.0f0)))
t)
(deftest exp.2
(mapcar #'exp '(0.0s0 0.0f0 0.0d0 0.0l0))
(1.0s0 1.0f0 1.0d0 1.0l0))
(deftest exp.3
(mapcar #'exp '(-0.0s0 -0.0f0 -0.0d0 -0.0l0))
(1.0s0 1.0f0 1.0d0 1.0l0))
FIXME
(deftest exp.error.4
(signals-error (exp (+ (log most-positive-short-float) 100))
floating-point-overflow)
t)
(deftest exp.error.5
(signals-error (exp (+ (log most-positive-single-float) 100))
floating-point-overflow)
t)
(deftest exp.error.6
(signals-error (exp (+ (log most-positive-double-float) 100))
floating-point-overflow)
t)
(deftest exp.error.7
(signals-error (exp (+ (log most-positive-long-float) 100))
floating-point-overflow)
t)
(deftest exp.error.8
(signals-error (exp (- (log least-positive-short-float) 100))
floating-point-underflow)
t)
(deftest exp.error.9
(signals-error (exp (- (log least-positive-single-float) 100))
floating-point-underflow)
t)
(deftest exp.error.10
(signals-error (exp (- (log least-positive-double-float) 100))
floating-point-underflow)
t)
(deftest exp.error.11
(signals-error (exp (- (log least-positive-double-float) 100))
floating-point-underflow)
t)
|
0317258c0fa1794a024167044fbc994707db4cec614816d110c9ab5f8a8306f9 | nklein/Woolly | app.lisp | (in-package #:woolly)
(sheeple:defproto =app= ()
())
(sheeple:defmessage main-loop (a)
(:documentation "This message begins handling GUI events for the application A")
(:reply (a) (error "No main-loop for ~A" a)))
(sheeple:defmessage exit-main-loop (a)
(:documentation "This message stops handling GUI events for the application A")
(:reply (a) (error "No way to exit main-loop for ~A" a)))
| null | https://raw.githubusercontent.com/nklein/Woolly/f18b5d5dc28b04e6a194757d7ebc1f03bb6ebd4d/woolly/app.lisp | lisp | (in-package #:woolly)
(sheeple:defproto =app= ()
())
(sheeple:defmessage main-loop (a)
(:documentation "This message begins handling GUI events for the application A")
(:reply (a) (error "No main-loop for ~A" a)))
(sheeple:defmessage exit-main-loop (a)
(:documentation "This message stops handling GUI events for the application A")
(:reply (a) (error "No way to exit main-loop for ~A" a)))
| |
defdfe43dd1d948b83c3cda766b04fb859f4b29e2eedb30dd375ea5e611fcbdf | spawnfest/eep49ers | wxPanel.erl | %%
%% %CopyrightBegin%
%%
Copyright Ericsson AB 2008 - 2020 . All Rights Reserved .
%%
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% -2.0
%%
%% Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an " AS IS " BASIS ,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%% This file is generated DO NOT EDIT
-module(wxPanel).
-include("wxe.hrl").
-export([destroy/1,initDialog/1,new/0,new/1,new/2,setFocusIgnoringChildren/1]).
%% inherited exports
-export([cacheBestSize/2,canSetTransparent/1,captureMouse/1,center/1,center/2,
centerOnParent/1,centerOnParent/2,centre/1,centre/2,centreOnParent/1,
centreOnParent/2,clearBackground/1,clientToScreen/2,clientToScreen/3,
close/1,close/2,connect/2,connect/3,convertDialogToPixels/2,convertPixelsToDialog/2,
destroyChildren/1,disable/1,disconnect/1,disconnect/2,disconnect/3,
dragAcceptFiles/2,enable/1,enable/2,findWindow/2,fit/1,fitInside/1,
freeze/1,getAcceleratorTable/1,getBackgroundColour/1,getBackgroundStyle/1,
getBestSize/1,getCaret/1,getCharHeight/1,getCharWidth/1,getChildren/1,
getClientSize/1,getContainingSizer/1,getContentScaleFactor/1,getCursor/1,
getDPI/1,getDPIScaleFactor/1,getDropTarget/1,getExtraStyle/1,getFont/1,
getForegroundColour/1,getGrandParent/1,getHandle/1,getHelpText/1,
getId/1,getLabel/1,getMaxSize/1,getMinSize/1,getName/1,getParent/1,
getPosition/1,getRect/1,getScreenPosition/1,getScreenRect/1,getScrollPos/2,
getScrollRange/2,getScrollThumb/2,getSize/1,getSizer/1,getTextExtent/2,
getTextExtent/3,getThemeEnabled/1,getToolTip/1,getUpdateRegion/1,
getVirtualSize/1,getWindowStyleFlag/1,getWindowVariant/1,hasCapture/1,
hasScrollbar/2,hasTransparentBackground/1,hide/1,inheritAttributes/1,
invalidateBestSize/1,isDoubleBuffered/1,isEnabled/1,isExposed/2,isExposed/3,
isExposed/5,isFrozen/1,isRetained/1,isShown/1,isShownOnScreen/1,isTopLevel/1,
layout/1,lineDown/1,lineUp/1,lower/1,move/2,move/3,move/4,moveAfterInTabOrder/2,
moveBeforeInTabOrder/2,navigate/1,navigate/2,pageDown/1,pageUp/1,parent_class/1,
popupMenu/2,popupMenu/3,popupMenu/4,raise/1,refresh/1,refresh/2,refreshRect/2,
refreshRect/3,releaseMouse/1,removeChild/2,reparent/2,screenToClient/1,
screenToClient/2,scrollLines/2,scrollPages/2,scrollWindow/3,scrollWindow/4,
setAcceleratorTable/2,setAutoLayout/2,setBackgroundColour/2,setBackgroundStyle/2,
setCaret/2,setClientSize/2,setClientSize/3,setContainingSizer/2,setCursor/2,
setDoubleBuffered/2,setDropTarget/2,setExtraStyle/2,setFocus/1,setFocusFromKbd/1,
setFont/2,setForegroundColour/2,setHelpText/2,setId/2,setLabel/2,setMaxSize/2,
setMinSize/2,setName/2,setOwnBackgroundColour/2,setOwnFont/2,setOwnForegroundColour/2,
setPalette/2,setScrollPos/3,setScrollPos/4,setScrollbar/5,setScrollbar/6,
setSize/2,setSize/3,setSize/5,setSize/6,setSizeHints/2,setSizeHints/3,
setSizeHints/4,setSizer/2,setSizer/3,setSizerAndFit/2,setSizerAndFit/3,
setThemeEnabled/2,setToolTip/2,setTransparent/2,setVirtualSize/2,
setVirtualSize/3,setWindowStyle/2,setWindowStyleFlag/2,setWindowVariant/2,
shouldInheritColours/1,show/1,show/2,thaw/1,transferDataFromWindow/1,
transferDataToWindow/1,update/1,updateWindowUI/1,updateWindowUI/2,
validate/1,warpPointer/3]).
-type wxPanel() :: wx:wx_object().
-export_type([wxPanel/0]).
%% @hidden
parent_class(wxWindow) -> true;
parent_class(wxEvtHandler) -> true;
parent_class(_Class) -> erlang:error({badtype, ?MODULE}).
%% @doc See <a href="#wxpanelwxpanel">external documentation</a>.
-spec new() -> wxPanel().
new() ->
wxe_util:queue_cmd(?get_env(), ?wxPanel_new_0),
wxe_util:rec(?wxPanel_new_0).
%% @equiv new(Parent, [])
-spec new(Parent) -> wxPanel() when
Parent::wxWindow:wxWindow().
new(Parent)
when is_record(Parent, wx_ref) ->
new(Parent, []).
%% @doc See <a href="#wxpanelwxpanel">external documentation</a>.
-spec new(Parent, [Option]) -> wxPanel() when
Parent::wxWindow:wxWindow(),
Option :: {'winid', integer()}
| {'pos', {X::integer(), Y::integer()}}
| {'size', {W::integer(), H::integer()}}
| {'style', integer()}.
new(#wx_ref{type=ParentT}=Parent, Options)
when is_list(Options) ->
?CLASS(ParentT,wxWindow),
MOpts = fun({winid, _winid} = Arg) -> Arg;
({pos, {_posX,_posY}} = Arg) -> Arg;
({size, {_sizeW,_sizeH}} = Arg) -> Arg;
({style, _style} = Arg) -> Arg;
(BadOpt) -> erlang:error({badoption, BadOpt}) end,
Opts = lists:map(MOpts, Options),
wxe_util:queue_cmd(Parent, Opts,?get_env(),?wxPanel_new_2),
wxe_util:rec(?wxPanel_new_2).
%% @doc See <a href="#wxpanelinitdialog">external documentation</a>.
-spec initDialog(This) -> 'ok' when
This::wxPanel().
initDialog(#wx_ref{type=ThisT}=This) ->
?CLASS(ThisT,wxPanel),
wxe_util:queue_cmd(This,?get_env(),?wxPanel_InitDialog).
@doc See < a href=" / manuals/2.8.12 / wx_wxpanel.html#wxpanelsetfocusignoringchildren">external documentation</a > .
-spec setFocusIgnoringChildren(This) -> 'ok' when
This::wxPanel().
setFocusIgnoringChildren(#wx_ref{type=ThisT}=This) ->
?CLASS(ThisT,wxPanel),
wxe_util:queue_cmd(This,?get_env(),?wxPanel_SetFocusIgnoringChildren).
%% @doc Destroys this object, do not use object again
-spec destroy(This::wxPanel()) -> 'ok'.
destroy(Obj=#wx_ref{type=Type}) ->
?CLASS(Type,wxPanel),
wxe_util:queue_cmd(Obj, ?get_env(), ?DESTROY_OBJECT),
ok.
%% From wxWindow
%% @hidden
getDPI(This) -> wxWindow:getDPI(This).
%% @hidden
getContentScaleFactor(This) -> wxWindow:getContentScaleFactor(This).
%% @hidden
setDoubleBuffered(This,On) -> wxWindow:setDoubleBuffered(This,On).
%% @hidden
isDoubleBuffered(This) -> wxWindow:isDoubleBuffered(This).
%% @hidden
canSetTransparent(This) -> wxWindow:canSetTransparent(This).
%% @hidden
setTransparent(This,Alpha) -> wxWindow:setTransparent(This,Alpha).
%% @hidden
warpPointer(This,X,Y) -> wxWindow:warpPointer(This,X,Y).
%% @hidden
validate(This) -> wxWindow:validate(This).
%% @hidden
updateWindowUI(This, Options) -> wxWindow:updateWindowUI(This, Options).
%% @hidden
updateWindowUI(This) -> wxWindow:updateWindowUI(This).
%% @hidden
update(This) -> wxWindow:update(This).
%% @hidden
transferDataToWindow(This) -> wxWindow:transferDataToWindow(This).
%% @hidden
transferDataFromWindow(This) -> wxWindow:transferDataFromWindow(This).
%% @hidden
thaw(This) -> wxWindow:thaw(This).
%% @hidden
show(This, Options) -> wxWindow:show(This, Options).
%% @hidden
show(This) -> wxWindow:show(This).
%% @hidden
shouldInheritColours(This) -> wxWindow:shouldInheritColours(This).
%% @hidden
setWindowVariant(This,Variant) -> wxWindow:setWindowVariant(This,Variant).
%% @hidden
setWindowStyleFlag(This,Style) -> wxWindow:setWindowStyleFlag(This,Style).
%% @hidden
setWindowStyle(This,Style) -> wxWindow:setWindowStyle(This,Style).
%% @hidden
setVirtualSize(This,Width,Height) -> wxWindow:setVirtualSize(This,Width,Height).
%% @hidden
setVirtualSize(This,Size) -> wxWindow:setVirtualSize(This,Size).
%% @hidden
setToolTip(This,TipString) -> wxWindow:setToolTip(This,TipString).
%% @hidden
setThemeEnabled(This,Enable) -> wxWindow:setThemeEnabled(This,Enable).
%% @hidden
setSizerAndFit(This,Sizer, Options) -> wxWindow:setSizerAndFit(This,Sizer, Options).
%% @hidden
setSizerAndFit(This,Sizer) -> wxWindow:setSizerAndFit(This,Sizer).
%% @hidden
setSizer(This,Sizer, Options) -> wxWindow:setSizer(This,Sizer, Options).
%% @hidden
setSizer(This,Sizer) -> wxWindow:setSizer(This,Sizer).
%% @hidden
setSizeHints(This,MinW,MinH, Options) -> wxWindow:setSizeHints(This,MinW,MinH, Options).
%% @hidden
setSizeHints(This,MinW,MinH) -> wxWindow:setSizeHints(This,MinW,MinH).
%% @hidden
setSizeHints(This,MinSize) -> wxWindow:setSizeHints(This,MinSize).
%% @hidden
setSize(This,X,Y,Width,Height, Options) -> wxWindow:setSize(This,X,Y,Width,Height, Options).
%% @hidden
setSize(This,X,Y,Width,Height) -> wxWindow:setSize(This,X,Y,Width,Height).
%% @hidden
setSize(This,Width,Height) -> wxWindow:setSize(This,Width,Height).
%% @hidden
setSize(This,Rect) -> wxWindow:setSize(This,Rect).
%% @hidden
setScrollPos(This,Orientation,Pos, Options) -> wxWindow:setScrollPos(This,Orientation,Pos, Options).
%% @hidden
setScrollPos(This,Orientation,Pos) -> wxWindow:setScrollPos(This,Orientation,Pos).
%% @hidden
setScrollbar(This,Orientation,Position,ThumbSize,Range, Options) -> wxWindow:setScrollbar(This,Orientation,Position,ThumbSize,Range, Options).
%% @hidden
setScrollbar(This,Orientation,Position,ThumbSize,Range) -> wxWindow:setScrollbar(This,Orientation,Position,ThumbSize,Range).
%% @hidden
setPalette(This,Pal) -> wxWindow:setPalette(This,Pal).
%% @hidden
setName(This,Name) -> wxWindow:setName(This,Name).
%% @hidden
setLabel(This,Label) -> wxWindow:setLabel(This,Label).
%% @hidden
setId(This,Winid) -> wxWindow:setId(This,Winid).
%% @hidden
setHelpText(This,HelpText) -> wxWindow:setHelpText(This,HelpText).
%% @hidden
setForegroundColour(This,Colour) -> wxWindow:setForegroundColour(This,Colour).
%% @hidden
setFont(This,Font) -> wxWindow:setFont(This,Font).
%% @hidden
setFocusFromKbd(This) -> wxWindow:setFocusFromKbd(This).
%% @hidden
setFocus(This) -> wxWindow:setFocus(This).
%% @hidden
setExtraStyle(This,ExStyle) -> wxWindow:setExtraStyle(This,ExStyle).
%% @hidden
setDropTarget(This,Target) -> wxWindow:setDropTarget(This,Target).
%% @hidden
setOwnForegroundColour(This,Colour) -> wxWindow:setOwnForegroundColour(This,Colour).
%% @hidden
setOwnFont(This,Font) -> wxWindow:setOwnFont(This,Font).
%% @hidden
setOwnBackgroundColour(This,Colour) -> wxWindow:setOwnBackgroundColour(This,Colour).
%% @hidden
setMinSize(This,Size) -> wxWindow:setMinSize(This,Size).
%% @hidden
setMaxSize(This,Size) -> wxWindow:setMaxSize(This,Size).
%% @hidden
setCursor(This,Cursor) -> wxWindow:setCursor(This,Cursor).
%% @hidden
setContainingSizer(This,Sizer) -> wxWindow:setContainingSizer(This,Sizer).
%% @hidden
setClientSize(This,Width,Height) -> wxWindow:setClientSize(This,Width,Height).
%% @hidden
setClientSize(This,Size) -> wxWindow:setClientSize(This,Size).
%% @hidden
setCaret(This,Caret) -> wxWindow:setCaret(This,Caret).
%% @hidden
setBackgroundStyle(This,Style) -> wxWindow:setBackgroundStyle(This,Style).
%% @hidden
setBackgroundColour(This,Colour) -> wxWindow:setBackgroundColour(This,Colour).
%% @hidden
setAutoLayout(This,AutoLayout) -> wxWindow:setAutoLayout(This,AutoLayout).
%% @hidden
setAcceleratorTable(This,Accel) -> wxWindow:setAcceleratorTable(This,Accel).
%% @hidden
scrollWindow(This,Dx,Dy, Options) -> wxWindow:scrollWindow(This,Dx,Dy, Options).
%% @hidden
scrollWindow(This,Dx,Dy) -> wxWindow:scrollWindow(This,Dx,Dy).
%% @hidden
scrollPages(This,Pages) -> wxWindow:scrollPages(This,Pages).
%% @hidden
scrollLines(This,Lines) -> wxWindow:scrollLines(This,Lines).
%% @hidden
screenToClient(This,Pt) -> wxWindow:screenToClient(This,Pt).
%% @hidden
screenToClient(This) -> wxWindow:screenToClient(This).
%% @hidden
reparent(This,NewParent) -> wxWindow:reparent(This,NewParent).
%% @hidden
removeChild(This,Child) -> wxWindow:removeChild(This,Child).
%% @hidden
releaseMouse(This) -> wxWindow:releaseMouse(This).
%% @hidden
refreshRect(This,Rect, Options) -> wxWindow:refreshRect(This,Rect, Options).
%% @hidden
refreshRect(This,Rect) -> wxWindow:refreshRect(This,Rect).
%% @hidden
refresh(This, Options) -> wxWindow:refresh(This, Options).
%% @hidden
refresh(This) -> wxWindow:refresh(This).
%% @hidden
raise(This) -> wxWindow:raise(This).
%% @hidden
popupMenu(This,Menu,X,Y) -> wxWindow:popupMenu(This,Menu,X,Y).
%% @hidden
popupMenu(This,Menu, Options) -> wxWindow:popupMenu(This,Menu, Options).
%% @hidden
popupMenu(This,Menu) -> wxWindow:popupMenu(This,Menu).
%% @hidden
pageUp(This) -> wxWindow:pageUp(This).
%% @hidden
pageDown(This) -> wxWindow:pageDown(This).
%% @hidden
navigate(This, Options) -> wxWindow:navigate(This, Options).
%% @hidden
navigate(This) -> wxWindow:navigate(This).
%% @hidden
moveBeforeInTabOrder(This,Win) -> wxWindow:moveBeforeInTabOrder(This,Win).
%% @hidden
moveAfterInTabOrder(This,Win) -> wxWindow:moveAfterInTabOrder(This,Win).
%% @hidden
move(This,X,Y, Options) -> wxWindow:move(This,X,Y, Options).
%% @hidden
move(This,X,Y) -> wxWindow:move(This,X,Y).
%% @hidden
move(This,Pt) -> wxWindow:move(This,Pt).
%% @hidden
lower(This) -> wxWindow:lower(This).
%% @hidden
lineUp(This) -> wxWindow:lineUp(This).
%% @hidden
lineDown(This) -> wxWindow:lineDown(This).
%% @hidden
layout(This) -> wxWindow:layout(This).
%% @hidden
isShownOnScreen(This) -> wxWindow:isShownOnScreen(This).
%% @hidden
isTopLevel(This) -> wxWindow:isTopLevel(This).
%% @hidden
isShown(This) -> wxWindow:isShown(This).
%% @hidden
isRetained(This) -> wxWindow:isRetained(This).
%% @hidden
isExposed(This,X,Y,W,H) -> wxWindow:isExposed(This,X,Y,W,H).
%% @hidden
isExposed(This,X,Y) -> wxWindow:isExposed(This,X,Y).
%% @hidden
isExposed(This,Pt) -> wxWindow:isExposed(This,Pt).
%% @hidden
isEnabled(This) -> wxWindow:isEnabled(This).
%% @hidden
isFrozen(This) -> wxWindow:isFrozen(This).
%% @hidden
invalidateBestSize(This) -> wxWindow:invalidateBestSize(This).
%% @hidden
inheritAttributes(This) -> wxWindow:inheritAttributes(This).
%% @hidden
hide(This) -> wxWindow:hide(This).
%% @hidden
hasTransparentBackground(This) -> wxWindow:hasTransparentBackground(This).
%% @hidden
hasScrollbar(This,Orient) -> wxWindow:hasScrollbar(This,Orient).
%% @hidden
hasCapture(This) -> wxWindow:hasCapture(This).
%% @hidden
getWindowVariant(This) -> wxWindow:getWindowVariant(This).
%% @hidden
getWindowStyleFlag(This) -> wxWindow:getWindowStyleFlag(This).
%% @hidden
getVirtualSize(This) -> wxWindow:getVirtualSize(This).
%% @hidden
getUpdateRegion(This) -> wxWindow:getUpdateRegion(This).
%% @hidden
getToolTip(This) -> wxWindow:getToolTip(This).
%% @hidden
getThemeEnabled(This) -> wxWindow:getThemeEnabled(This).
%% @hidden
getTextExtent(This,String, Options) -> wxWindow:getTextExtent(This,String, Options).
%% @hidden
getTextExtent(This,String) -> wxWindow:getTextExtent(This,String).
%% @hidden
getSizer(This) -> wxWindow:getSizer(This).
%% @hidden
getSize(This) -> wxWindow:getSize(This).
%% @hidden
getScrollThumb(This,Orientation) -> wxWindow:getScrollThumb(This,Orientation).
%% @hidden
getScrollRange(This,Orientation) -> wxWindow:getScrollRange(This,Orientation).
%% @hidden
getScrollPos(This,Orientation) -> wxWindow:getScrollPos(This,Orientation).
%% @hidden
getScreenRect(This) -> wxWindow:getScreenRect(This).
%% @hidden
getScreenPosition(This) -> wxWindow:getScreenPosition(This).
%% @hidden
getRect(This) -> wxWindow:getRect(This).
%% @hidden
getPosition(This) -> wxWindow:getPosition(This).
%% @hidden
getParent(This) -> wxWindow:getParent(This).
%% @hidden
getName(This) -> wxWindow:getName(This).
%% @hidden
getMinSize(This) -> wxWindow:getMinSize(This).
%% @hidden
getMaxSize(This) -> wxWindow:getMaxSize(This).
%% @hidden
getLabel(This) -> wxWindow:getLabel(This).
%% @hidden
getId(This) -> wxWindow:getId(This).
%% @hidden
getHelpText(This) -> wxWindow:getHelpText(This).
%% @hidden
getHandle(This) -> wxWindow:getHandle(This).
%% @hidden
getGrandParent(This) -> wxWindow:getGrandParent(This).
%% @hidden
getForegroundColour(This) -> wxWindow:getForegroundColour(This).
%% @hidden
getFont(This) -> wxWindow:getFont(This).
%% @hidden
getExtraStyle(This) -> wxWindow:getExtraStyle(This).
%% @hidden
getDPIScaleFactor(This) -> wxWindow:getDPIScaleFactor(This).
%% @hidden
getDropTarget(This) -> wxWindow:getDropTarget(This).
%% @hidden
getCursor(This) -> wxWindow:getCursor(This).
%% @hidden
getContainingSizer(This) -> wxWindow:getContainingSizer(This).
%% @hidden
getClientSize(This) -> wxWindow:getClientSize(This).
%% @hidden
getChildren(This) -> wxWindow:getChildren(This).
%% @hidden
getCharWidth(This) -> wxWindow:getCharWidth(This).
%% @hidden
getCharHeight(This) -> wxWindow:getCharHeight(This).
%% @hidden
getCaret(This) -> wxWindow:getCaret(This).
%% @hidden
getBestSize(This) -> wxWindow:getBestSize(This).
%% @hidden
getBackgroundStyle(This) -> wxWindow:getBackgroundStyle(This).
%% @hidden
getBackgroundColour(This) -> wxWindow:getBackgroundColour(This).
%% @hidden
getAcceleratorTable(This) -> wxWindow:getAcceleratorTable(This).
%% @hidden
freeze(This) -> wxWindow:freeze(This).
%% @hidden
fitInside(This) -> wxWindow:fitInside(This).
%% @hidden
fit(This) -> wxWindow:fit(This).
%% @hidden
findWindow(This,Id) -> wxWindow:findWindow(This,Id).
%% @hidden
enable(This, Options) -> wxWindow:enable(This, Options).
%% @hidden
enable(This) -> wxWindow:enable(This).
%% @hidden
dragAcceptFiles(This,Accept) -> wxWindow:dragAcceptFiles(This,Accept).
%% @hidden
disable(This) -> wxWindow:disable(This).
%% @hidden
destroyChildren(This) -> wxWindow:destroyChildren(This).
%% @hidden
convertPixelsToDialog(This,Sz) -> wxWindow:convertPixelsToDialog(This,Sz).
%% @hidden
convertDialogToPixels(This,Sz) -> wxWindow:convertDialogToPixels(This,Sz).
%% @hidden
close(This, Options) -> wxWindow:close(This, Options).
%% @hidden
close(This) -> wxWindow:close(This).
%% @hidden
clientToScreen(This,X,Y) -> wxWindow:clientToScreen(This,X,Y).
%% @hidden
clientToScreen(This,Pt) -> wxWindow:clientToScreen(This,Pt).
%% @hidden
clearBackground(This) -> wxWindow:clearBackground(This).
%% @hidden
centreOnParent(This, Options) -> wxWindow:centreOnParent(This, Options).
%% @hidden
centerOnParent(This, Options) -> wxWindow:centerOnParent(This, Options).
%% @hidden
centreOnParent(This) -> wxWindow:centreOnParent(This).
%% @hidden
centerOnParent(This) -> wxWindow:centerOnParent(This).
%% @hidden
centre(This, Options) -> wxWindow:centre(This, Options).
%% @hidden
center(This, Options) -> wxWindow:center(This, Options).
%% @hidden
centre(This) -> wxWindow:centre(This).
%% @hidden
center(This) -> wxWindow:center(This).
%% @hidden
captureMouse(This) -> wxWindow:captureMouse(This).
%% @hidden
cacheBestSize(This,Size) -> wxWindow:cacheBestSize(This,Size).
%% From wxEvtHandler
%% @hidden
disconnect(This,EventType, Options) -> wxEvtHandler:disconnect(This,EventType, Options).
%% @hidden
disconnect(This,EventType) -> wxEvtHandler:disconnect(This,EventType).
%% @hidden
disconnect(This) -> wxEvtHandler:disconnect(This).
%% @hidden
connect(This,EventType, Options) -> wxEvtHandler:connect(This,EventType, Options).
%% @hidden
connect(This,EventType) -> wxEvtHandler:connect(This,EventType).
| null | https://raw.githubusercontent.com/spawnfest/eep49ers/d1020fd625a0bbda8ab01caf0e1738eb1cf74886/lib/wx/src/gen/wxPanel.erl | erlang |
%CopyrightBegin%
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
%CopyrightEnd%
This file is generated DO NOT EDIT
inherited exports
@hidden
@doc See <a href="#wxpanelwxpanel">external documentation</a>.
@equiv new(Parent, [])
@doc See <a href="#wxpanelwxpanel">external documentation</a>.
@doc See <a href="#wxpanelinitdialog">external documentation</a>.
@doc Destroys this object, do not use object again
From wxWindow
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
@hidden
From wxEvtHandler
@hidden
@hidden
@hidden
@hidden
@hidden | Copyright Ericsson AB 2008 - 2020 . All Rights Reserved .
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
distributed under the License is distributed on an " AS IS " BASIS ,
-module(wxPanel).
-include("wxe.hrl").
-export([destroy/1,initDialog/1,new/0,new/1,new/2,setFocusIgnoringChildren/1]).
-export([cacheBestSize/2,canSetTransparent/1,captureMouse/1,center/1,center/2,
centerOnParent/1,centerOnParent/2,centre/1,centre/2,centreOnParent/1,
centreOnParent/2,clearBackground/1,clientToScreen/2,clientToScreen/3,
close/1,close/2,connect/2,connect/3,convertDialogToPixels/2,convertPixelsToDialog/2,
destroyChildren/1,disable/1,disconnect/1,disconnect/2,disconnect/3,
dragAcceptFiles/2,enable/1,enable/2,findWindow/2,fit/1,fitInside/1,
freeze/1,getAcceleratorTable/1,getBackgroundColour/1,getBackgroundStyle/1,
getBestSize/1,getCaret/1,getCharHeight/1,getCharWidth/1,getChildren/1,
getClientSize/1,getContainingSizer/1,getContentScaleFactor/1,getCursor/1,
getDPI/1,getDPIScaleFactor/1,getDropTarget/1,getExtraStyle/1,getFont/1,
getForegroundColour/1,getGrandParent/1,getHandle/1,getHelpText/1,
getId/1,getLabel/1,getMaxSize/1,getMinSize/1,getName/1,getParent/1,
getPosition/1,getRect/1,getScreenPosition/1,getScreenRect/1,getScrollPos/2,
getScrollRange/2,getScrollThumb/2,getSize/1,getSizer/1,getTextExtent/2,
getTextExtent/3,getThemeEnabled/1,getToolTip/1,getUpdateRegion/1,
getVirtualSize/1,getWindowStyleFlag/1,getWindowVariant/1,hasCapture/1,
hasScrollbar/2,hasTransparentBackground/1,hide/1,inheritAttributes/1,
invalidateBestSize/1,isDoubleBuffered/1,isEnabled/1,isExposed/2,isExposed/3,
isExposed/5,isFrozen/1,isRetained/1,isShown/1,isShownOnScreen/1,isTopLevel/1,
layout/1,lineDown/1,lineUp/1,lower/1,move/2,move/3,move/4,moveAfterInTabOrder/2,
moveBeforeInTabOrder/2,navigate/1,navigate/2,pageDown/1,pageUp/1,parent_class/1,
popupMenu/2,popupMenu/3,popupMenu/4,raise/1,refresh/1,refresh/2,refreshRect/2,
refreshRect/3,releaseMouse/1,removeChild/2,reparent/2,screenToClient/1,
screenToClient/2,scrollLines/2,scrollPages/2,scrollWindow/3,scrollWindow/4,
setAcceleratorTable/2,setAutoLayout/2,setBackgroundColour/2,setBackgroundStyle/2,
setCaret/2,setClientSize/2,setClientSize/3,setContainingSizer/2,setCursor/2,
setDoubleBuffered/2,setDropTarget/2,setExtraStyle/2,setFocus/1,setFocusFromKbd/1,
setFont/2,setForegroundColour/2,setHelpText/2,setId/2,setLabel/2,setMaxSize/2,
setMinSize/2,setName/2,setOwnBackgroundColour/2,setOwnFont/2,setOwnForegroundColour/2,
setPalette/2,setScrollPos/3,setScrollPos/4,setScrollbar/5,setScrollbar/6,
setSize/2,setSize/3,setSize/5,setSize/6,setSizeHints/2,setSizeHints/3,
setSizeHints/4,setSizer/2,setSizer/3,setSizerAndFit/2,setSizerAndFit/3,
setThemeEnabled/2,setToolTip/2,setTransparent/2,setVirtualSize/2,
setVirtualSize/3,setWindowStyle/2,setWindowStyleFlag/2,setWindowVariant/2,
shouldInheritColours/1,show/1,show/2,thaw/1,transferDataFromWindow/1,
transferDataToWindow/1,update/1,updateWindowUI/1,updateWindowUI/2,
validate/1,warpPointer/3]).
-type wxPanel() :: wx:wx_object().
-export_type([wxPanel/0]).
parent_class(wxWindow) -> true;
parent_class(wxEvtHandler) -> true;
parent_class(_Class) -> erlang:error({badtype, ?MODULE}).
-spec new() -> wxPanel().
new() ->
wxe_util:queue_cmd(?get_env(), ?wxPanel_new_0),
wxe_util:rec(?wxPanel_new_0).
-spec new(Parent) -> wxPanel() when
Parent::wxWindow:wxWindow().
new(Parent)
when is_record(Parent, wx_ref) ->
new(Parent, []).
-spec new(Parent, [Option]) -> wxPanel() when
Parent::wxWindow:wxWindow(),
Option :: {'winid', integer()}
| {'pos', {X::integer(), Y::integer()}}
| {'size', {W::integer(), H::integer()}}
| {'style', integer()}.
new(#wx_ref{type=ParentT}=Parent, Options)
when is_list(Options) ->
?CLASS(ParentT,wxWindow),
MOpts = fun({winid, _winid} = Arg) -> Arg;
({pos, {_posX,_posY}} = Arg) -> Arg;
({size, {_sizeW,_sizeH}} = Arg) -> Arg;
({style, _style} = Arg) -> Arg;
(BadOpt) -> erlang:error({badoption, BadOpt}) end,
Opts = lists:map(MOpts, Options),
wxe_util:queue_cmd(Parent, Opts,?get_env(),?wxPanel_new_2),
wxe_util:rec(?wxPanel_new_2).
-spec initDialog(This) -> 'ok' when
This::wxPanel().
initDialog(#wx_ref{type=ThisT}=This) ->
?CLASS(ThisT,wxPanel),
wxe_util:queue_cmd(This,?get_env(),?wxPanel_InitDialog).
@doc See < a href=" / manuals/2.8.12 / wx_wxpanel.html#wxpanelsetfocusignoringchildren">external documentation</a > .
-spec setFocusIgnoringChildren(This) -> 'ok' when
This::wxPanel().
setFocusIgnoringChildren(#wx_ref{type=ThisT}=This) ->
?CLASS(ThisT,wxPanel),
wxe_util:queue_cmd(This,?get_env(),?wxPanel_SetFocusIgnoringChildren).
-spec destroy(This::wxPanel()) -> 'ok'.
destroy(Obj=#wx_ref{type=Type}) ->
?CLASS(Type,wxPanel),
wxe_util:queue_cmd(Obj, ?get_env(), ?DESTROY_OBJECT),
ok.
getDPI(This) -> wxWindow:getDPI(This).
getContentScaleFactor(This) -> wxWindow:getContentScaleFactor(This).
setDoubleBuffered(This,On) -> wxWindow:setDoubleBuffered(This,On).
isDoubleBuffered(This) -> wxWindow:isDoubleBuffered(This).
canSetTransparent(This) -> wxWindow:canSetTransparent(This).
setTransparent(This,Alpha) -> wxWindow:setTransparent(This,Alpha).
warpPointer(This,X,Y) -> wxWindow:warpPointer(This,X,Y).
validate(This) -> wxWindow:validate(This).
updateWindowUI(This, Options) -> wxWindow:updateWindowUI(This, Options).
updateWindowUI(This) -> wxWindow:updateWindowUI(This).
update(This) -> wxWindow:update(This).
transferDataToWindow(This) -> wxWindow:transferDataToWindow(This).
transferDataFromWindow(This) -> wxWindow:transferDataFromWindow(This).
thaw(This) -> wxWindow:thaw(This).
show(This, Options) -> wxWindow:show(This, Options).
show(This) -> wxWindow:show(This).
shouldInheritColours(This) -> wxWindow:shouldInheritColours(This).
setWindowVariant(This,Variant) -> wxWindow:setWindowVariant(This,Variant).
setWindowStyleFlag(This,Style) -> wxWindow:setWindowStyleFlag(This,Style).
setWindowStyle(This,Style) -> wxWindow:setWindowStyle(This,Style).
setVirtualSize(This,Width,Height) -> wxWindow:setVirtualSize(This,Width,Height).
setVirtualSize(This,Size) -> wxWindow:setVirtualSize(This,Size).
setToolTip(This,TipString) -> wxWindow:setToolTip(This,TipString).
setThemeEnabled(This,Enable) -> wxWindow:setThemeEnabled(This,Enable).
setSizerAndFit(This,Sizer, Options) -> wxWindow:setSizerAndFit(This,Sizer, Options).
setSizerAndFit(This,Sizer) -> wxWindow:setSizerAndFit(This,Sizer).
setSizer(This,Sizer, Options) -> wxWindow:setSizer(This,Sizer, Options).
setSizer(This,Sizer) -> wxWindow:setSizer(This,Sizer).
setSizeHints(This,MinW,MinH, Options) -> wxWindow:setSizeHints(This,MinW,MinH, Options).
setSizeHints(This,MinW,MinH) -> wxWindow:setSizeHints(This,MinW,MinH).
setSizeHints(This,MinSize) -> wxWindow:setSizeHints(This,MinSize).
setSize(This,X,Y,Width,Height, Options) -> wxWindow:setSize(This,X,Y,Width,Height, Options).
setSize(This,X,Y,Width,Height) -> wxWindow:setSize(This,X,Y,Width,Height).
setSize(This,Width,Height) -> wxWindow:setSize(This,Width,Height).
setSize(This,Rect) -> wxWindow:setSize(This,Rect).
setScrollPos(This,Orientation,Pos, Options) -> wxWindow:setScrollPos(This,Orientation,Pos, Options).
setScrollPos(This,Orientation,Pos) -> wxWindow:setScrollPos(This,Orientation,Pos).
setScrollbar(This,Orientation,Position,ThumbSize,Range, Options) -> wxWindow:setScrollbar(This,Orientation,Position,ThumbSize,Range, Options).
setScrollbar(This,Orientation,Position,ThumbSize,Range) -> wxWindow:setScrollbar(This,Orientation,Position,ThumbSize,Range).
setPalette(This,Pal) -> wxWindow:setPalette(This,Pal).
setName(This,Name) -> wxWindow:setName(This,Name).
setLabel(This,Label) -> wxWindow:setLabel(This,Label).
setId(This,Winid) -> wxWindow:setId(This,Winid).
setHelpText(This,HelpText) -> wxWindow:setHelpText(This,HelpText).
setForegroundColour(This,Colour) -> wxWindow:setForegroundColour(This,Colour).
setFont(This,Font) -> wxWindow:setFont(This,Font).
setFocusFromKbd(This) -> wxWindow:setFocusFromKbd(This).
setFocus(This) -> wxWindow:setFocus(This).
setExtraStyle(This,ExStyle) -> wxWindow:setExtraStyle(This,ExStyle).
setDropTarget(This,Target) -> wxWindow:setDropTarget(This,Target).
setOwnForegroundColour(This,Colour) -> wxWindow:setOwnForegroundColour(This,Colour).
setOwnFont(This,Font) -> wxWindow:setOwnFont(This,Font).
setOwnBackgroundColour(This,Colour) -> wxWindow:setOwnBackgroundColour(This,Colour).
setMinSize(This,Size) -> wxWindow:setMinSize(This,Size).
setMaxSize(This,Size) -> wxWindow:setMaxSize(This,Size).
setCursor(This,Cursor) -> wxWindow:setCursor(This,Cursor).
setContainingSizer(This,Sizer) -> wxWindow:setContainingSizer(This,Sizer).
setClientSize(This,Width,Height) -> wxWindow:setClientSize(This,Width,Height).
setClientSize(This,Size) -> wxWindow:setClientSize(This,Size).
setCaret(This,Caret) -> wxWindow:setCaret(This,Caret).
setBackgroundStyle(This,Style) -> wxWindow:setBackgroundStyle(This,Style).
setBackgroundColour(This,Colour) -> wxWindow:setBackgroundColour(This,Colour).
setAutoLayout(This,AutoLayout) -> wxWindow:setAutoLayout(This,AutoLayout).
setAcceleratorTable(This,Accel) -> wxWindow:setAcceleratorTable(This,Accel).
scrollWindow(This,Dx,Dy, Options) -> wxWindow:scrollWindow(This,Dx,Dy, Options).
scrollWindow(This,Dx,Dy) -> wxWindow:scrollWindow(This,Dx,Dy).
scrollPages(This,Pages) -> wxWindow:scrollPages(This,Pages).
scrollLines(This,Lines) -> wxWindow:scrollLines(This,Lines).
screenToClient(This,Pt) -> wxWindow:screenToClient(This,Pt).
screenToClient(This) -> wxWindow:screenToClient(This).
reparent(This,NewParent) -> wxWindow:reparent(This,NewParent).
removeChild(This,Child) -> wxWindow:removeChild(This,Child).
releaseMouse(This) -> wxWindow:releaseMouse(This).
refreshRect(This,Rect, Options) -> wxWindow:refreshRect(This,Rect, Options).
refreshRect(This,Rect) -> wxWindow:refreshRect(This,Rect).
refresh(This, Options) -> wxWindow:refresh(This, Options).
refresh(This) -> wxWindow:refresh(This).
raise(This) -> wxWindow:raise(This).
popupMenu(This,Menu,X,Y) -> wxWindow:popupMenu(This,Menu,X,Y).
popupMenu(This,Menu, Options) -> wxWindow:popupMenu(This,Menu, Options).
popupMenu(This,Menu) -> wxWindow:popupMenu(This,Menu).
pageUp(This) -> wxWindow:pageUp(This).
pageDown(This) -> wxWindow:pageDown(This).
navigate(This, Options) -> wxWindow:navigate(This, Options).
navigate(This) -> wxWindow:navigate(This).
moveBeforeInTabOrder(This,Win) -> wxWindow:moveBeforeInTabOrder(This,Win).
moveAfterInTabOrder(This,Win) -> wxWindow:moveAfterInTabOrder(This,Win).
move(This,X,Y, Options) -> wxWindow:move(This,X,Y, Options).
move(This,X,Y) -> wxWindow:move(This,X,Y).
move(This,Pt) -> wxWindow:move(This,Pt).
lower(This) -> wxWindow:lower(This).
lineUp(This) -> wxWindow:lineUp(This).
lineDown(This) -> wxWindow:lineDown(This).
layout(This) -> wxWindow:layout(This).
isShownOnScreen(This) -> wxWindow:isShownOnScreen(This).
isTopLevel(This) -> wxWindow:isTopLevel(This).
isShown(This) -> wxWindow:isShown(This).
isRetained(This) -> wxWindow:isRetained(This).
isExposed(This,X,Y,W,H) -> wxWindow:isExposed(This,X,Y,W,H).
isExposed(This,X,Y) -> wxWindow:isExposed(This,X,Y).
isExposed(This,Pt) -> wxWindow:isExposed(This,Pt).
isEnabled(This) -> wxWindow:isEnabled(This).
isFrozen(This) -> wxWindow:isFrozen(This).
invalidateBestSize(This) -> wxWindow:invalidateBestSize(This).
inheritAttributes(This) -> wxWindow:inheritAttributes(This).
hide(This) -> wxWindow:hide(This).
hasTransparentBackground(This) -> wxWindow:hasTransparentBackground(This).
hasScrollbar(This,Orient) -> wxWindow:hasScrollbar(This,Orient).
hasCapture(This) -> wxWindow:hasCapture(This).
getWindowVariant(This) -> wxWindow:getWindowVariant(This).
getWindowStyleFlag(This) -> wxWindow:getWindowStyleFlag(This).
getVirtualSize(This) -> wxWindow:getVirtualSize(This).
getUpdateRegion(This) -> wxWindow:getUpdateRegion(This).
getToolTip(This) -> wxWindow:getToolTip(This).
getThemeEnabled(This) -> wxWindow:getThemeEnabled(This).
getTextExtent(This,String, Options) -> wxWindow:getTextExtent(This,String, Options).
getTextExtent(This,String) -> wxWindow:getTextExtent(This,String).
getSizer(This) -> wxWindow:getSizer(This).
getSize(This) -> wxWindow:getSize(This).
getScrollThumb(This,Orientation) -> wxWindow:getScrollThumb(This,Orientation).
getScrollRange(This,Orientation) -> wxWindow:getScrollRange(This,Orientation).
getScrollPos(This,Orientation) -> wxWindow:getScrollPos(This,Orientation).
getScreenRect(This) -> wxWindow:getScreenRect(This).
getScreenPosition(This) -> wxWindow:getScreenPosition(This).
getRect(This) -> wxWindow:getRect(This).
getPosition(This) -> wxWindow:getPosition(This).
getParent(This) -> wxWindow:getParent(This).
getName(This) -> wxWindow:getName(This).
getMinSize(This) -> wxWindow:getMinSize(This).
getMaxSize(This) -> wxWindow:getMaxSize(This).
getLabel(This) -> wxWindow:getLabel(This).
getId(This) -> wxWindow:getId(This).
getHelpText(This) -> wxWindow:getHelpText(This).
getHandle(This) -> wxWindow:getHandle(This).
getGrandParent(This) -> wxWindow:getGrandParent(This).
getForegroundColour(This) -> wxWindow:getForegroundColour(This).
getFont(This) -> wxWindow:getFont(This).
getExtraStyle(This) -> wxWindow:getExtraStyle(This).
getDPIScaleFactor(This) -> wxWindow:getDPIScaleFactor(This).
getDropTarget(This) -> wxWindow:getDropTarget(This).
getCursor(This) -> wxWindow:getCursor(This).
getContainingSizer(This) -> wxWindow:getContainingSizer(This).
getClientSize(This) -> wxWindow:getClientSize(This).
getChildren(This) -> wxWindow:getChildren(This).
getCharWidth(This) -> wxWindow:getCharWidth(This).
getCharHeight(This) -> wxWindow:getCharHeight(This).
getCaret(This) -> wxWindow:getCaret(This).
getBestSize(This) -> wxWindow:getBestSize(This).
getBackgroundStyle(This) -> wxWindow:getBackgroundStyle(This).
getBackgroundColour(This) -> wxWindow:getBackgroundColour(This).
getAcceleratorTable(This) -> wxWindow:getAcceleratorTable(This).
freeze(This) -> wxWindow:freeze(This).
fitInside(This) -> wxWindow:fitInside(This).
fit(This) -> wxWindow:fit(This).
findWindow(This,Id) -> wxWindow:findWindow(This,Id).
enable(This, Options) -> wxWindow:enable(This, Options).
enable(This) -> wxWindow:enable(This).
dragAcceptFiles(This,Accept) -> wxWindow:dragAcceptFiles(This,Accept).
disable(This) -> wxWindow:disable(This).
destroyChildren(This) -> wxWindow:destroyChildren(This).
convertPixelsToDialog(This,Sz) -> wxWindow:convertPixelsToDialog(This,Sz).
convertDialogToPixels(This,Sz) -> wxWindow:convertDialogToPixels(This,Sz).
close(This, Options) -> wxWindow:close(This, Options).
close(This) -> wxWindow:close(This).
clientToScreen(This,X,Y) -> wxWindow:clientToScreen(This,X,Y).
clientToScreen(This,Pt) -> wxWindow:clientToScreen(This,Pt).
clearBackground(This) -> wxWindow:clearBackground(This).
centreOnParent(This, Options) -> wxWindow:centreOnParent(This, Options).
centerOnParent(This, Options) -> wxWindow:centerOnParent(This, Options).
centreOnParent(This) -> wxWindow:centreOnParent(This).
centerOnParent(This) -> wxWindow:centerOnParent(This).
centre(This, Options) -> wxWindow:centre(This, Options).
center(This, Options) -> wxWindow:center(This, Options).
centre(This) -> wxWindow:centre(This).
center(This) -> wxWindow:center(This).
captureMouse(This) -> wxWindow:captureMouse(This).
cacheBestSize(This,Size) -> wxWindow:cacheBestSize(This,Size).
disconnect(This,EventType, Options) -> wxEvtHandler:disconnect(This,EventType, Options).
disconnect(This,EventType) -> wxEvtHandler:disconnect(This,EventType).
disconnect(This) -> wxEvtHandler:disconnect(This).
connect(This,EventType, Options) -> wxEvtHandler:connect(This,EventType, Options).
connect(This,EventType) -> wxEvtHandler:connect(This,EventType).
|
d5aa7e76d81e44e61752158374f507d3de73683d04b359ed6add8736a79e50fd | racket/rhombus-prototype | cond.rkt | #lang racket/base
(require (for-syntax racket/base
syntax/parse/pre)
"expression.rkt"
"parse.rkt"
"else-clause.rkt"
(only-in "underscore.rkt"
[_ rhombus-_])
"error.rkt")
(provide (rename-out [rhombus-if if]
[rhombus-cond cond]
[rhombus-when when]
[rhombus-unless unless]))
(define-syntax rhombus-if
(expression-transformer
(lambda (stx)
(syntax-parse stx
#:datum-literals (alts)
[(form-id test ... (alts alt ...)
. tail)
(syntax-parse #'(alt ...)
#:datum-literals (block)
[(((~and tag-thn block) thn ...)
((~and tag-els block) els ...))
(values
#'(if (rhombus-expression (group test ...))
(rhombus-body-at tag-thn thn ...)
(rhombus-body-at tag-els els ...))
#'tail)]
[_
(raise-syntax-error #f
"expected two alternatives"
stx)])]))))
(define-syntax rhombus-cond
(expression-transformer
(lambda (stx)
(syntax-parse stx
#:datum-literals (alts block group)
[(form-id (alts
(block (group pred ... ((~and tag block) rhs ...)))
...
e::else-clause)
. tail)
(values
#'(cond
[(rhombus-expression (group pred ...))
(rhombus-body-at tag rhs ...)]
...
[else e.parsed])
#'tail)]
[(form-id (alts
(block (group pred ... ((~and tag block) rhs ...)))
...)
. tail)
(values
#'(cond
[(rhombus-expression (group pred ...))
(rhombus-body-at tag rhs ...)]
...
[else (cond-fallthrough 'form-id)])
#'tail)]
[(form-id (block) . tail)
(values
#'(cond-fallthrough 'form-id)
#'tail)]))))
(define (cond-fallthrough who)
(raise-contract-error who "no matching case"))
(define-syntax rhombus-when
(expression-transformer
(lambda (stx)
(parse-when stx #'when))))
(define-syntax rhombus-unless
(expression-transformer
(lambda (stx)
(parse-when stx #'unless))))
(define-for-syntax (parse-when stx racket-form-id)
(syntax-parse stx
#:datum-literals (alts)
[(form-id test ... (alts alt ...)
. tail)
(syntax-parse #'(alt ...)
#:datum-literals (block)
[(((~and tag-thn block) thn ...))
(values
#`(#,racket-form-id (rhombus-expression (group test ...))
(rhombus-body-at tag-thn thn ...))
#'tail)]
[_
(raise-syntax-error #f
"expected a single alternative"
stx)])]))
| null | https://raw.githubusercontent.com/racket/rhombus-prototype/074f1e50fbfa018dd1c064191c79665e06628ab4/rhombus/private/cond.rkt | racket | #lang racket/base
(require (for-syntax racket/base
syntax/parse/pre)
"expression.rkt"
"parse.rkt"
"else-clause.rkt"
(only-in "underscore.rkt"
[_ rhombus-_])
"error.rkt")
(provide (rename-out [rhombus-if if]
[rhombus-cond cond]
[rhombus-when when]
[rhombus-unless unless]))
(define-syntax rhombus-if
(expression-transformer
(lambda (stx)
(syntax-parse stx
#:datum-literals (alts)
[(form-id test ... (alts alt ...)
. tail)
(syntax-parse #'(alt ...)
#:datum-literals (block)
[(((~and tag-thn block) thn ...)
((~and tag-els block) els ...))
(values
#'(if (rhombus-expression (group test ...))
(rhombus-body-at tag-thn thn ...)
(rhombus-body-at tag-els els ...))
#'tail)]
[_
(raise-syntax-error #f
"expected two alternatives"
stx)])]))))
(define-syntax rhombus-cond
(expression-transformer
(lambda (stx)
(syntax-parse stx
#:datum-literals (alts block group)
[(form-id (alts
(block (group pred ... ((~and tag block) rhs ...)))
...
e::else-clause)
. tail)
(values
#'(cond
[(rhombus-expression (group pred ...))
(rhombus-body-at tag rhs ...)]
...
[else e.parsed])
#'tail)]
[(form-id (alts
(block (group pred ... ((~and tag block) rhs ...)))
...)
. tail)
(values
#'(cond
[(rhombus-expression (group pred ...))
(rhombus-body-at tag rhs ...)]
...
[else (cond-fallthrough 'form-id)])
#'tail)]
[(form-id (block) . tail)
(values
#'(cond-fallthrough 'form-id)
#'tail)]))))
(define (cond-fallthrough who)
(raise-contract-error who "no matching case"))
(define-syntax rhombus-when
(expression-transformer
(lambda (stx)
(parse-when stx #'when))))
(define-syntax rhombus-unless
(expression-transformer
(lambda (stx)
(parse-when stx #'unless))))
(define-for-syntax (parse-when stx racket-form-id)
(syntax-parse stx
#:datum-literals (alts)
[(form-id test ... (alts alt ...)
. tail)
(syntax-parse #'(alt ...)
#:datum-literals (block)
[(((~and tag-thn block) thn ...))
(values
#`(#,racket-form-id (rhombus-expression (group test ...))
(rhombus-body-at tag-thn thn ...))
#'tail)]
[_
(raise-syntax-error #f
"expected a single alternative"
stx)])]))
| |
d8f251f6e28c2c07b86c7af466bb1686721508e1c26484a208678448d75906b2 | LexiFi/menhir | positions.ml | (******************************************************************************)
(* *)
(* *)
, Paris
, PPS , Université Paris Diderot
(* *)
. All rights reserved . This file is distributed under the
terms of the GNU General Public License version 2 , as described in the
(* file LICENSE. *)
(* *)
(******************************************************************************)
open Lexing
type t =
(* Start and end positions. *)
position * position
type 'a located =
{
value : 'a;
position : t;
}
let value { value = v } =
v
let position { position = p } =
p
let decompose { value; position } =
(value, position)
let with_pos p v =
{
value = v;
position = p;
}
let with_loc =
(* The location is converted from the type [position * position]
to the type [t]. *)
with_pos
let map f v =
{
value = f v.value;
position = v.position;
}
let pmap f v =
{
value = f v.position v.value;
position = v.position
}
let iter f { value = v } =
f v
let mapd f v =
let w1, w2 = f v.value in
let pos = v.position in
{ value = w1; position = pos },
{ value = w2; position = pos }
let dummy =
(dummy_pos, dummy_pos)
let unknown_pos v =
{
value = v;
position = dummy
}
let start_of_position (p, _) = p
let end_of_position (_, p) = p
let filename_of_position p =
(start_of_position p).pos_fname
let line p =
p.pos_lnum
let column p =
p.pos_cnum - p.pos_bol
let characters p1 p2 =
(column p1, p2.pos_cnum - p1.pos_bol) (* intentionally [p1.pos_bol] *)
let join x1 x2 =
(
start_of_position (if x1 = dummy then x2 else x1),
end_of_position (if x2 = dummy then x1 else x2)
)
let import x =
x
let join_located l1 l2 f =
{
value = f l1.value l2.value;
position = join l1.position l2.position;
}
let string_of_lex_pos p =
let c = p.pos_cnum - p.pos_bol in
(string_of_int p.pos_lnum)^":"^(string_of_int c)
let string_of_pos p =
let filename = filename_of_position p in
(* [filename] is hopefully not "". *)
let l = line (start_of_position p) in
let c1, c2 = characters (start_of_position p) (end_of_position p) in
Printf.sprintf "File \"%s\", line %d, characters %d-%d" filename l c1 c2
let pos_or_undef = function
| None -> dummy
| Some x -> x
let cpos lexbuf =
(lexeme_start_p lexbuf, lexeme_end_p lexbuf)
let with_cpos lexbuf v =
with_pos (cpos lexbuf) v
let string_of_cpos lexbuf =
string_of_pos (cpos lexbuf)
let joinf f t1 t2 =
join (f t1) (f t2)
let ljoinf f =
List.fold_left (fun p t -> join p (f t)) dummy
let join_located_list ls f =
{
value = f (List.map (fun l -> l.value) ls);
position = ljoinf (fun x -> x.position) ls
}
(* The functions that print error messages and warnings require a list of
positions. The following auxiliary functions help build such lists. *)
type positions =
t list
let one (pos : position) : positions =
[ import (pos, pos) ]
let lexbuf (lexbuf : lexbuf) : positions =
[ import (lexbuf.lex_start_p, lexbuf.lex_curr_p) ]
let print (pos : position) =
Printf.printf
"{ pos_fname = \"%s\"; pos_lnum = %d; pos_bol = %d; pos_cnum = %d }\n"
pos.pos_fname
pos.pos_lnum
pos.pos_bol
pos.pos_cnum
| null | https://raw.githubusercontent.com/LexiFi/menhir/794e64e7997d4d3f91d36dd49aaecc942ea858b7/src/positions.ml | ocaml | ****************************************************************************
file LICENSE.
****************************************************************************
Start and end positions.
The location is converted from the type [position * position]
to the type [t].
intentionally [p1.pos_bol]
[filename] is hopefully not "".
The functions that print error messages and warnings require a list of
positions. The following auxiliary functions help build such lists. |
, Paris
, PPS , Université Paris Diderot
. All rights reserved . This file is distributed under the
terms of the GNU General Public License version 2 , as described in the
open Lexing
type t =
position * position
type 'a located =
{
value : 'a;
position : t;
}
let value { value = v } =
v
let position { position = p } =
p
let decompose { value; position } =
(value, position)
let with_pos p v =
{
value = v;
position = p;
}
let with_loc =
with_pos
let map f v =
{
value = f v.value;
position = v.position;
}
let pmap f v =
{
value = f v.position v.value;
position = v.position
}
let iter f { value = v } =
f v
let mapd f v =
let w1, w2 = f v.value in
let pos = v.position in
{ value = w1; position = pos },
{ value = w2; position = pos }
let dummy =
(dummy_pos, dummy_pos)
let unknown_pos v =
{
value = v;
position = dummy
}
let start_of_position (p, _) = p
let end_of_position (_, p) = p
let filename_of_position p =
(start_of_position p).pos_fname
let line p =
p.pos_lnum
let column p =
p.pos_cnum - p.pos_bol
let characters p1 p2 =
let join x1 x2 =
(
start_of_position (if x1 = dummy then x2 else x1),
end_of_position (if x2 = dummy then x1 else x2)
)
let import x =
x
let join_located l1 l2 f =
{
value = f l1.value l2.value;
position = join l1.position l2.position;
}
let string_of_lex_pos p =
let c = p.pos_cnum - p.pos_bol in
(string_of_int p.pos_lnum)^":"^(string_of_int c)
let string_of_pos p =
let filename = filename_of_position p in
let l = line (start_of_position p) in
let c1, c2 = characters (start_of_position p) (end_of_position p) in
Printf.sprintf "File \"%s\", line %d, characters %d-%d" filename l c1 c2
let pos_or_undef = function
| None -> dummy
| Some x -> x
let cpos lexbuf =
(lexeme_start_p lexbuf, lexeme_end_p lexbuf)
let with_cpos lexbuf v =
with_pos (cpos lexbuf) v
let string_of_cpos lexbuf =
string_of_pos (cpos lexbuf)
let joinf f t1 t2 =
join (f t1) (f t2)
let ljoinf f =
List.fold_left (fun p t -> join p (f t)) dummy
let join_located_list ls f =
{
value = f (List.map (fun l -> l.value) ls);
position = ljoinf (fun x -> x.position) ls
}
type positions =
t list
let one (pos : position) : positions =
[ import (pos, pos) ]
let lexbuf (lexbuf : lexbuf) : positions =
[ import (lexbuf.lex_start_p, lexbuf.lex_curr_p) ]
let print (pos : position) =
Printf.printf
"{ pos_fname = \"%s\"; pos_lnum = %d; pos_bol = %d; pos_cnum = %d }\n"
pos.pos_fname
pos.pos_lnum
pos.pos_bol
pos.pos_cnum
|
0c52e3bcc2418385bec78ccf1cc0deb8a0a4e87a75ddecd6581ea0d6a5466185 | kind2-mc/kind2 | hashcons.ml | (**************************************************************************)
(* *)
Copyright ( C )
(* *)
(* This software is free software; you can redistribute it and/or *)
modify it under the terms of the GNU Library General Public
License version 2.1 , with the special exception on linking
(* described in file LICENSE. *)
(* *)
(* This software is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *)
(* *)
(**************************************************************************)
module type HSig = sig
type ('a, 'b) hash_consed = private {
hkey : int;
tag : int;
node : 'a;
prop : 'b }
val compare : ('a, 'b) hash_consed -> ('a, 'b) hash_consed -> int
val equal : ('a, 'b) hash_consed -> ('a, 'b) hash_consed -> bool
val hash : ('a, 'b) hash_consed -> int
type ('a, 'b) t
val create : int -> ('a, 'b) t
val clear : ('a, 'b) t -> unit
val hashcons : ('a, 'b) t -> 'a -> 'b -> ('a, 'b) hash_consed
val iter : (('a, 'b) hash_consed -> unit) -> ('a, 'b) t -> unit
val fold : (('a, 'b) hash_consed -> 'c -> 'c) -> ('a, 'b) t -> 'c -> 'c
val stats : ('a, 'b) t -> int * int * int * int * int * int
module type HashedType =
sig
type t
type prop
val equal : t -> t -> bool
val hash : t -> int
end
module type S =
sig
type key
type prop
type t
val create : int -> t
val clear : t -> unit
val hashcons : t -> key -> prop -> (key, prop) hash_consed
val find : t -> key -> (key, prop) hash_consed
val iter : ((key, prop) hash_consed -> unit) -> t -> unit
val fold : ((key, prop) hash_consed -> 'a -> 'a) -> t -> 'a -> 'a
val stats : t -> int * int * int * int * int * int
end
module Make(H : HashedType) : (S with type key = H.t and type prop = H.prop)
end
include (val (module HashconsWeak : HSig))
Local Variables :
compile - command : " make -C .. -k "
tuareg - interactive - program : " ./kind2.top -I ./_build -I / SExpr "
indent - tabs - mode : nil
End :
Local Variables:
compile-command: "make -C .. -k"
tuareg-interactive-program: "./kind2.top -I ./_build -I ./_build/SExpr"
indent-tabs-mode: nil
End:
*)
| null | https://raw.githubusercontent.com/kind2-mc/kind2/1d7e926da46aa38c0816fb0a327b0e33a8d35cde/src/utils/hashcons.ml | ocaml | ************************************************************************
This software is free software; you can redistribute it and/or
described in file LICENSE.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
************************************************************************ | Copyright ( C )
modify it under the terms of the GNU Library General Public
License version 2.1 , with the special exception on linking
module type HSig = sig
type ('a, 'b) hash_consed = private {
hkey : int;
tag : int;
node : 'a;
prop : 'b }
val compare : ('a, 'b) hash_consed -> ('a, 'b) hash_consed -> int
val equal : ('a, 'b) hash_consed -> ('a, 'b) hash_consed -> bool
val hash : ('a, 'b) hash_consed -> int
type ('a, 'b) t
val create : int -> ('a, 'b) t
val clear : ('a, 'b) t -> unit
val hashcons : ('a, 'b) t -> 'a -> 'b -> ('a, 'b) hash_consed
val iter : (('a, 'b) hash_consed -> unit) -> ('a, 'b) t -> unit
val fold : (('a, 'b) hash_consed -> 'c -> 'c) -> ('a, 'b) t -> 'c -> 'c
val stats : ('a, 'b) t -> int * int * int * int * int * int
module type HashedType =
sig
type t
type prop
val equal : t -> t -> bool
val hash : t -> int
end
module type S =
sig
type key
type prop
type t
val create : int -> t
val clear : t -> unit
val hashcons : t -> key -> prop -> (key, prop) hash_consed
val find : t -> key -> (key, prop) hash_consed
val iter : ((key, prop) hash_consed -> unit) -> t -> unit
val fold : ((key, prop) hash_consed -> 'a -> 'a) -> t -> 'a -> 'a
val stats : t -> int * int * int * int * int * int
end
module Make(H : HashedType) : (S with type key = H.t and type prop = H.prop)
end
include (val (module HashconsWeak : HSig))
Local Variables :
compile - command : " make -C .. -k "
tuareg - interactive - program : " ./kind2.top -I ./_build -I / SExpr "
indent - tabs - mode : nil
End :
Local Variables:
compile-command: "make -C .. -k"
tuareg-interactive-program: "./kind2.top -I ./_build -I ./_build/SExpr"
indent-tabs-mode: nil
End:
*)
|
768a5db2082bfc1c9cc7863d704eee15a76f79d92d2e6334e518249375717092 | wjrforcyber/SystemT | EEval.hs | -- | eval envuator for Extrinsic L5
module Lang.L5.Eval.EEval where
import Common.Types
import Lang.L5.Syntax.Extrinsic (Exp (..), Name (..), Val (..))
data Env
= Emp
| Snoc Env (Name, Val)
newtype Eval a = Eval {runEval :: Env -> Maybe a}
instance Functor Eval where
fmap f (Eval g) = Eval $ \ctx -> fmap f (g ctx)
instance Applicative Eval where
pure x = Eval $ \_ -> Just x
Eval f <*> Eval x = Eval $ \ctx -> do
f' <- f ctx
x' <- x ctx
return (f' x')
instance Monad Eval where
Eval x >>= f = Eval $ \ctx -> do
x' <- x ctx
runEval (f x') ctx
lookupEnv :: Name -> Env -> Maybe Val
lookupEnv _ Emp = Nothing
lookupEnv x (Snoc env (y, v))
| x == y = Just v
| otherwise = lookupEnv x env
extendEnv :: Env -> Name -> Val -> Env
extendEnv env x v = Snoc env (x, v)
instance MonadFail Eval where
fail _ = Eval $ const Nothing
readEnv :: Eval Env
readEnv = Eval pure
eval :: Exp -> Eval Val
eval EZero = return $ VSuccN 0
eval (ESucc e) =
do
VSuccN n <- eval e
return $ VSuccN (1 + n)
eval ETrue = return VTrue
eval EFalse = return VFalse
eval (EAdd e1 e2) =
do
VSuccN n <- eval e1
VSuccN m <- eval e2
return $ VSuccN (n + m)
eval (EMul e1 e2) =
do
VSuccN n <- eval e1
VSuccN m <- eval e2
return $ VSuccN (n * m)
eval (EIf e1 e2 e3) =
do
b1 <- eval e1
case b1 of
VTrue -> eval e2
VFalse -> eval e3
_ -> fail (show e1 ++ "has a type of" ++ show b1)
eval EUnit = return VUnit
eval (ETuple e1 e2) =
do
n <- eval e1
m <- eval e2
return $ VTuple n m
eval (EFst e) =
do
VTuple v1 _ <- eval e
return v1
eval (ESnd e) =
do
VTuple _ v2 <- eval e
return v2
eval (EVar name) =
do
env <- readEnv
case lookupEnv name env of
Just v -> return v
Nothing -> fail "unbound variable"
eval (ELam name ty e) =
return $ VLam name ty e
eval (EApp e1 e2) =
do
VLam name _ e <- eval e1
eval $ subst name e2 e
subst :: Name -> Exp -> Exp -> Exp
subst x e (EVar y)
| x == y = e
| otherwise = EVar y
subst x e (ELam y ty e')
| x == y = ELam y ty e'
| otherwise = ELam y ty (subst x e e')
subst x e (EApp e1 e2) = EApp (subst x e e1) (subst x e e2)
subst x e (ETuple e1 e2) = ETuple (subst x e e1) (subst x e e2)
subst x e (EFst e') = EFst (subst x e e')
subst x e (ESnd e') = ESnd (subst x e e')
subst x e (EIf e1 e2 e3) = EIf (subst x e e1) (subst x e e2) (subst x e e3)
subst x e (EAdd e1 e2) = EAdd (subst x e e1) (subst x e e2)
subst x e (EMul e1 e2) = EMul (subst x e e1) (subst x e e2)
subst x e (ESucc e') = ESucc (subst x e e')
subst _ _ e = e
fromNat :: Nat -> Exp
fromNat Zero = EZero
fromNat (Succ n) = ESucc (fromNat n)
| null | https://raw.githubusercontent.com/wjrforcyber/SystemT/0b402e5a9a335e28e8a19ba0274f1b8e40c08eaf/src/Lang/L5/Eval/EEval.hs | haskell | | eval envuator for Extrinsic L5 | module Lang.L5.Eval.EEval where
import Common.Types
import Lang.L5.Syntax.Extrinsic (Exp (..), Name (..), Val (..))
data Env
= Emp
| Snoc Env (Name, Val)
newtype Eval a = Eval {runEval :: Env -> Maybe a}
instance Functor Eval where
fmap f (Eval g) = Eval $ \ctx -> fmap f (g ctx)
instance Applicative Eval where
pure x = Eval $ \_ -> Just x
Eval f <*> Eval x = Eval $ \ctx -> do
f' <- f ctx
x' <- x ctx
return (f' x')
instance Monad Eval where
Eval x >>= f = Eval $ \ctx -> do
x' <- x ctx
runEval (f x') ctx
lookupEnv :: Name -> Env -> Maybe Val
lookupEnv _ Emp = Nothing
lookupEnv x (Snoc env (y, v))
| x == y = Just v
| otherwise = lookupEnv x env
extendEnv :: Env -> Name -> Val -> Env
extendEnv env x v = Snoc env (x, v)
instance MonadFail Eval where
fail _ = Eval $ const Nothing
readEnv :: Eval Env
readEnv = Eval pure
eval :: Exp -> Eval Val
eval EZero = return $ VSuccN 0
eval (ESucc e) =
do
VSuccN n <- eval e
return $ VSuccN (1 + n)
eval ETrue = return VTrue
eval EFalse = return VFalse
eval (EAdd e1 e2) =
do
VSuccN n <- eval e1
VSuccN m <- eval e2
return $ VSuccN (n + m)
eval (EMul e1 e2) =
do
VSuccN n <- eval e1
VSuccN m <- eval e2
return $ VSuccN (n * m)
eval (EIf e1 e2 e3) =
do
b1 <- eval e1
case b1 of
VTrue -> eval e2
VFalse -> eval e3
_ -> fail (show e1 ++ "has a type of" ++ show b1)
eval EUnit = return VUnit
eval (ETuple e1 e2) =
do
n <- eval e1
m <- eval e2
return $ VTuple n m
eval (EFst e) =
do
VTuple v1 _ <- eval e
return v1
eval (ESnd e) =
do
VTuple _ v2 <- eval e
return v2
eval (EVar name) =
do
env <- readEnv
case lookupEnv name env of
Just v -> return v
Nothing -> fail "unbound variable"
eval (ELam name ty e) =
return $ VLam name ty e
eval (EApp e1 e2) =
do
VLam name _ e <- eval e1
eval $ subst name e2 e
subst :: Name -> Exp -> Exp -> Exp
subst x e (EVar y)
| x == y = e
| otherwise = EVar y
subst x e (ELam y ty e')
| x == y = ELam y ty e'
| otherwise = ELam y ty (subst x e e')
subst x e (EApp e1 e2) = EApp (subst x e e1) (subst x e e2)
subst x e (ETuple e1 e2) = ETuple (subst x e e1) (subst x e e2)
subst x e (EFst e') = EFst (subst x e e')
subst x e (ESnd e') = ESnd (subst x e e')
subst x e (EIf e1 e2 e3) = EIf (subst x e e1) (subst x e e2) (subst x e e3)
subst x e (EAdd e1 e2) = EAdd (subst x e e1) (subst x e e2)
subst x e (EMul e1 e2) = EMul (subst x e e1) (subst x e e2)
subst x e (ESucc e') = ESucc (subst x e e')
subst _ _ e = e
fromNat :: Nat -> Exp
fromNat Zero = EZero
fromNat (Succ n) = ESucc (fromNat n)
|
d74d78e7f0d6cfd6211d4973a424ad0f35dea4b58052af321e2b40e9c25885d0 | facebook/duckling | Rules.hs | Copyright ( c ) 2016 - present , Facebook , Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
{-# LANGUAGE GADTs #-}
{-# LANGUAGE OverloadedStrings #-}
# LANGUAGE NoRebindableSyntax #
module Duckling.Numeral.PT.Rules
( rules
) where
import Control.Applicative ((<|>))
import Data.HashMap.Strict (HashMap)
import Data.Maybe
import Data.String
import Data.Text (Text)
import Prelude
import qualified Data.HashMap.Strict as HashMap
import qualified Data.Text as Text
import Duckling.Dimensions.Types
import Duckling.Numeral.Helpers
import Duckling.Numeral.Types (NumeralData (..))
import Duckling.Regex.Types
import Duckling.Types
import qualified Duckling.Numeral.Types as TNumeral
ruleDozen :: Rule
ruleDozen = Rule
{ name = "a dozen of"
, pattern =
[ regex "(uma )?d(u|ú)zias?( de)?"
]
, prod = \_ -> integer 12 >>= withMultipliable >>= notOkForAnyTime
}
zeroNineteenMap :: HashMap Text Integer
zeroNineteenMap = HashMap.fromList
[ ( "zero" , 0 )
, ( "um" , 1 )
, ( "uma" , 1 )
, ( "dois" , 2 )
, ( "duas" , 2 )
, ( "tres" , 3 )
, ( "três" , 3 )
, ( "quatro" , 4 )
, ( "cinco" , 5 )
, ( "seis" , 6 )
, ( "sete" , 7 )
, ( "oito" , 8 )
, ( "nove" , 9 )
, ( "dez" , 10 )
, ( "onze" , 11 )
, ( "doze" , 12 )
, ( "treze" , 13 )
, ( "catorze" , 14 )
, ( "quatorze" , 14 )
, ( "quinze" , 15 )
, ( "dezesseis" , 16 )
, ( "dezasseis" , 16 )
, ( "dezessete" , 17 )
, ( "dezassete" , 17 )
, ( "dezoito" , 18 )
, ( "dezenove" , 19 )
, ( "dezanove" , 19 )
]
informalMap :: HashMap Text Integer
informalMap = HashMap.fromList
[ ( "um par" , 2 )
, ( "um par de" , 2 )
, ( "par" , 2 )
, ( "pares" , 2 )
, ( "par de" , 2 )
, ( "pares de" , 2 )
, ( "um pouco" , 3 )
, ( "pouco" , 3 )
]
ruleToNineteen :: Rule
ruleToNineteen = Rule
{ name = "integer (0..19)"
, pattern =
[ regex "(zero|d(oi|ua)s|(uma? )?par(es)?( de)?|tr(e|ê)s|(um )?pouco|uma?|(c|qu)atorze|quatro|quinze|cinco|dez[ea]sseis|seis|dez[ea]ssete|sete|dezoito|oito|dez[ea]nove|nove|dez|onze|doze|treze)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
let x = Text.toLower match in
(HashMap.lookup x zeroNineteenMap >>= integer) <|>
(HashMap.lookup x informalMap >>= integer >>= notOkForAnyTime)
_ -> Nothing
}
tensMap :: HashMap Text Integer
tensMap = HashMap.fromList
[ ( "vinte" , 20 )
, ( "trinta" , 30 )
, ( "quarenta" , 40 )
, ( "cincoenta" , 50 )
, ( "cinquenta" , 50 )
, ( "cinqüenta" , 50 )
, ( "sessenta" , 60 )
, ( "setenta" , 70 )
, ( "oitenta" , 80 )
, ( "noventa" , 90 )
]
ruleTens :: Rule
ruleTens = Rule
{ name = "tens (20..90)"
, pattern =
[ regex "(vinte|trinta|quarenta|cin(co|q[uü])enta|sessenta|setenta|oitenta|noventa)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
HashMap.lookup (Text.toLower match) tensMap >>= integer
_ -> Nothing
}
centsMap :: HashMap Text Integer
centsMap = HashMap.fromList
[ ( "cem" , 100 )
, ( "cento" , 100 )
, ( "duzentos" , 200 )
, ( "trezentos" , 300 )
, ( "quatrocentos" , 400 )
, ( "quinhetos" , 500 )
, ( "seiscentos" , 600 )
, ( "setecentos" , 700 )
, ( "oitocentos" , 800 )
, ( "novecentos" , 900 )
]
ruleCent :: Rule
ruleCent = Rule
{ name = "hundreds (100..900)"
, pattern =
[ regex "(cem|cento|duzentos|trezentos|quatrocentos|quinhetos|seiscentos|setecentos|oitocentos|novecentos)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
HashMap.lookup (Text.toLower match) centsMap >>= integer
_ -> Nothing
}
rulePowersOfTen :: Rule
rulePowersOfTen = Rule
{ name = "powers of tens"
, pattern =
[ regex "(milhao|milhão|milhões|milhoes|bilhao|bilhão|bilhões|bilhoes|mil)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) -> case Text.toLower match of
"mil" -> double 1e3 >>= withGrain 3 >>= withMultipliable
"milhao" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhão" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhões" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhoes" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"bilhao" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhão" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhões" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhoes" -> double 1e9 >>= withGrain 9 >>= withMultipliable
_ -> Nothing
_ -> Nothing
}
ruleCompositeTens :: Rule
ruleCompositeTens = Rule
{ name = "integer 21..99"
, pattern =
[ oneOf [20,30..90]
, Predicate $ numberBetween 1 10
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = tens}:
Token Numeral NumeralData{TNumeral.value = units}:
_) -> double $ tens + units
_ -> Nothing
}
ruleDecsAnd :: Rule
ruleDecsAnd = Rule
{ name = "number (21..29 31..39 .. 91..99)"
, pattern =
[ oneOf [20, 30..90]
, regex "e"
, Predicate $ numberBetween 1 10
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v1}:
_:
Token Numeral NumeralData{TNumeral.value = v2}:
_) -> double $ v1 + v2
_ -> Nothing
}
ruleCompositeCents :: Rule
ruleCompositeCents = Rule
{ name = "integer 101..999"
, pattern =
[ oneOf [100, 200..900]
, Predicate $ numberBetween 1 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = hundreds}:
Token Numeral NumeralData{TNumeral.value = units}:
_) -> double $ hundreds + units
_ -> Nothing
}
ruleCentsAnd :: Rule
ruleCentsAnd = Rule
{ name = "number (101..199 201..299 .. 901..999)"
, pattern =
[ oneOf [100, 200..900]
, regex "e"
, Predicate $ numberBetween 1 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v1}:
_:
Token Numeral NumeralData{TNumeral.value = v2}:
_) -> double $ v1 + v2
_ -> Nothing
}
ruleSkipHundreds :: Rule
ruleSkipHundreds = Rule
{ name = "one twenty two"
, pattern =
[ Predicate $ numberBetween 1 10
, Predicate $ numberBetween 10 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = hundreds}:
Token Numeral NumeralData{TNumeral.value = rest}:
_) -> double $ hundreds*100 + rest
_ -> Nothing
}
ruleDotSpelledOut :: Rule
ruleDotSpelledOut = Rule
{ name = "one point 2"
, pattern =
[ dimension Numeral
, regex "ponto"
, Predicate $ not . hasGrain
]
, prod = \tokens -> case tokens of
(Token Numeral nd1:_:Token Numeral nd2:_) ->
double $ TNumeral.value nd1 + decimalsToDouble (TNumeral.value nd2)
_ -> Nothing
}
ruleLeadingDotSpelledOut :: Rule
ruleLeadingDotSpelledOut = Rule
{ name = "point 77"
, pattern =
[ regex "ponto"
, Predicate $ not . hasGrain
]
, prod = \tokens -> case tokens of
(_:Token Numeral nd:_) -> double . decimalsToDouble $ TNumeral.value nd
_ -> Nothing
}
ruleDecimals :: Rule
ruleDecimals = Rule
{ name = "decimal number"
, pattern =
[ regex "(\\d*\\,\\d+)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) -> parseDecimal False match
_ -> Nothing
}
ruleCommas :: Rule
ruleCommas = Rule
{ name = "dot-separated numbers"
, pattern =
[ regex "(\\d+(\\.\\d\\d\\d)+(\\,\\d+)?)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
parseDecimal False $ Text.replace "." Text.empty match
_ -> Nothing
}
ruleSuffixes :: Rule
ruleSuffixes = Rule
{ name = "suffixes (K,M,G))"
, pattern =
[ dimension Numeral
, regex "(k|m|g)(?=[\\W$€¢£]|$)"
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v}:
Token RegexMatch (GroupMatch (match:_)):
_) -> case Text.toLower match of
"k" -> double $ v * 1e3
"m" -> double $ v * 1e6
"g" -> double $ v * 1e9
_ -> Nothing
_ -> Nothing
}
ruleNegative :: Rule
ruleNegative = Rule
{ name = "negative numbers"
, pattern =
[ regex "(-|menos|negativo)(?!\\s*\\-)"
, Predicate isPositive
]
, prod = \tokens -> case tokens of
(_:Token Numeral nd:_) -> double $ TNumeral.value nd * (-1)
_ -> Nothing
}
ruleSum :: Rule
ruleSum = Rule
{ name = "intersect 2 numbers"
, pattern =
[ Predicate hasGrain
, Predicate $ and . sequence [not . isMultipliable, isPositive]
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = val1, TNumeral.grain = Just g}:
Token Numeral NumeralData{TNumeral.value = val2}:
_) | (10 ** fromIntegral g) > val2 -> double $ val1 + val2
_ -> Nothing
}
ruleSumAnd :: Rule
ruleSumAnd = Rule
{ name = "intersect 2 numbers (with and)"
, pattern =
[ Predicate hasGrain
, regex "e"
, Predicate $ and . sequence [not . isMultipliable, isPositive]
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = val1, TNumeral.grain = Just g}:
_:
Token Numeral NumeralData{TNumeral.value = val2}:
_) | (10 ** fromIntegral g) > val2 -> double $ val1 + val2
_ -> Nothing
}
ruleMultiply :: Rule
ruleMultiply = Rule
{ name = "compose by multiplication"
, pattern =
[ dimension Numeral
, Predicate isMultipliable
]
, prod = \tokens -> case tokens of
(token1:token2:_) -> multiply token1 token2
_ -> Nothing
}
rules :: [Rule]
rules =
[ ruleToNineteen
, ruleTens
, ruleCent
, rulePowersOfTen
, ruleCompositeTens
, ruleCompositeCents
, ruleSkipHundreds
, ruleDotSpelledOut
, ruleLeadingDotSpelledOut
, ruleDecimals
, ruleCommas
, ruleSuffixes
, ruleNegative
, ruleSum
, ruleDecsAnd
, ruleCentsAnd
, ruleSumAnd
, ruleMultiply
, ruleDozen
]
| null | https://raw.githubusercontent.com/facebook/duckling/c6a48a1d0678a389f86d17db2676e1a289e355ce/Duckling/Numeral/PT/Rules.hs | haskell | All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
# LANGUAGE GADTs #
# LANGUAGE OverloadedStrings # | Copyright ( c ) 2016 - present , Facebook , Inc.
# LANGUAGE NoRebindableSyntax #
module Duckling.Numeral.PT.Rules
( rules
) where
import Control.Applicative ((<|>))
import Data.HashMap.Strict (HashMap)
import Data.Maybe
import Data.String
import Data.Text (Text)
import Prelude
import qualified Data.HashMap.Strict as HashMap
import qualified Data.Text as Text
import Duckling.Dimensions.Types
import Duckling.Numeral.Helpers
import Duckling.Numeral.Types (NumeralData (..))
import Duckling.Regex.Types
import Duckling.Types
import qualified Duckling.Numeral.Types as TNumeral
ruleDozen :: Rule
ruleDozen = Rule
{ name = "a dozen of"
, pattern =
[ regex "(uma )?d(u|ú)zias?( de)?"
]
, prod = \_ -> integer 12 >>= withMultipliable >>= notOkForAnyTime
}
zeroNineteenMap :: HashMap Text Integer
zeroNineteenMap = HashMap.fromList
[ ( "zero" , 0 )
, ( "um" , 1 )
, ( "uma" , 1 )
, ( "dois" , 2 )
, ( "duas" , 2 )
, ( "tres" , 3 )
, ( "três" , 3 )
, ( "quatro" , 4 )
, ( "cinco" , 5 )
, ( "seis" , 6 )
, ( "sete" , 7 )
, ( "oito" , 8 )
, ( "nove" , 9 )
, ( "dez" , 10 )
, ( "onze" , 11 )
, ( "doze" , 12 )
, ( "treze" , 13 )
, ( "catorze" , 14 )
, ( "quatorze" , 14 )
, ( "quinze" , 15 )
, ( "dezesseis" , 16 )
, ( "dezasseis" , 16 )
, ( "dezessete" , 17 )
, ( "dezassete" , 17 )
, ( "dezoito" , 18 )
, ( "dezenove" , 19 )
, ( "dezanove" , 19 )
]
informalMap :: HashMap Text Integer
informalMap = HashMap.fromList
[ ( "um par" , 2 )
, ( "um par de" , 2 )
, ( "par" , 2 )
, ( "pares" , 2 )
, ( "par de" , 2 )
, ( "pares de" , 2 )
, ( "um pouco" , 3 )
, ( "pouco" , 3 )
]
ruleToNineteen :: Rule
ruleToNineteen = Rule
{ name = "integer (0..19)"
, pattern =
[ regex "(zero|d(oi|ua)s|(uma? )?par(es)?( de)?|tr(e|ê)s|(um )?pouco|uma?|(c|qu)atorze|quatro|quinze|cinco|dez[ea]sseis|seis|dez[ea]ssete|sete|dezoito|oito|dez[ea]nove|nove|dez|onze|doze|treze)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
let x = Text.toLower match in
(HashMap.lookup x zeroNineteenMap >>= integer) <|>
(HashMap.lookup x informalMap >>= integer >>= notOkForAnyTime)
_ -> Nothing
}
tensMap :: HashMap Text Integer
tensMap = HashMap.fromList
[ ( "vinte" , 20 )
, ( "trinta" , 30 )
, ( "quarenta" , 40 )
, ( "cincoenta" , 50 )
, ( "cinquenta" , 50 )
, ( "cinqüenta" , 50 )
, ( "sessenta" , 60 )
, ( "setenta" , 70 )
, ( "oitenta" , 80 )
, ( "noventa" , 90 )
]
ruleTens :: Rule
ruleTens = Rule
{ name = "tens (20..90)"
, pattern =
[ regex "(vinte|trinta|quarenta|cin(co|q[uü])enta|sessenta|setenta|oitenta|noventa)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
HashMap.lookup (Text.toLower match) tensMap >>= integer
_ -> Nothing
}
centsMap :: HashMap Text Integer
centsMap = HashMap.fromList
[ ( "cem" , 100 )
, ( "cento" , 100 )
, ( "duzentos" , 200 )
, ( "trezentos" , 300 )
, ( "quatrocentos" , 400 )
, ( "quinhetos" , 500 )
, ( "seiscentos" , 600 )
, ( "setecentos" , 700 )
, ( "oitocentos" , 800 )
, ( "novecentos" , 900 )
]
ruleCent :: Rule
ruleCent = Rule
{ name = "hundreds (100..900)"
, pattern =
[ regex "(cem|cento|duzentos|trezentos|quatrocentos|quinhetos|seiscentos|setecentos|oitocentos|novecentos)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
HashMap.lookup (Text.toLower match) centsMap >>= integer
_ -> Nothing
}
rulePowersOfTen :: Rule
rulePowersOfTen = Rule
{ name = "powers of tens"
, pattern =
[ regex "(milhao|milhão|milhões|milhoes|bilhao|bilhão|bilhões|bilhoes|mil)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) -> case Text.toLower match of
"mil" -> double 1e3 >>= withGrain 3 >>= withMultipliable
"milhao" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhão" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhões" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"milhoes" -> double 1e6 >>= withGrain 6 >>= withMultipliable
"bilhao" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhão" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhões" -> double 1e9 >>= withGrain 9 >>= withMultipliable
"bilhoes" -> double 1e9 >>= withGrain 9 >>= withMultipliable
_ -> Nothing
_ -> Nothing
}
ruleCompositeTens :: Rule
ruleCompositeTens = Rule
{ name = "integer 21..99"
, pattern =
[ oneOf [20,30..90]
, Predicate $ numberBetween 1 10
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = tens}:
Token Numeral NumeralData{TNumeral.value = units}:
_) -> double $ tens + units
_ -> Nothing
}
ruleDecsAnd :: Rule
ruleDecsAnd = Rule
{ name = "number (21..29 31..39 .. 91..99)"
, pattern =
[ oneOf [20, 30..90]
, regex "e"
, Predicate $ numberBetween 1 10
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v1}:
_:
Token Numeral NumeralData{TNumeral.value = v2}:
_) -> double $ v1 + v2
_ -> Nothing
}
ruleCompositeCents :: Rule
ruleCompositeCents = Rule
{ name = "integer 101..999"
, pattern =
[ oneOf [100, 200..900]
, Predicate $ numberBetween 1 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = hundreds}:
Token Numeral NumeralData{TNumeral.value = units}:
_) -> double $ hundreds + units
_ -> Nothing
}
ruleCentsAnd :: Rule
ruleCentsAnd = Rule
{ name = "number (101..199 201..299 .. 901..999)"
, pattern =
[ oneOf [100, 200..900]
, regex "e"
, Predicate $ numberBetween 1 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v1}:
_:
Token Numeral NumeralData{TNumeral.value = v2}:
_) -> double $ v1 + v2
_ -> Nothing
}
ruleSkipHundreds :: Rule
ruleSkipHundreds = Rule
{ name = "one twenty two"
, pattern =
[ Predicate $ numberBetween 1 10
, Predicate $ numberBetween 10 100
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = hundreds}:
Token Numeral NumeralData{TNumeral.value = rest}:
_) -> double $ hundreds*100 + rest
_ -> Nothing
}
ruleDotSpelledOut :: Rule
ruleDotSpelledOut = Rule
{ name = "one point 2"
, pattern =
[ dimension Numeral
, regex "ponto"
, Predicate $ not . hasGrain
]
, prod = \tokens -> case tokens of
(Token Numeral nd1:_:Token Numeral nd2:_) ->
double $ TNumeral.value nd1 + decimalsToDouble (TNumeral.value nd2)
_ -> Nothing
}
ruleLeadingDotSpelledOut :: Rule
ruleLeadingDotSpelledOut = Rule
{ name = "point 77"
, pattern =
[ regex "ponto"
, Predicate $ not . hasGrain
]
, prod = \tokens -> case tokens of
(_:Token Numeral nd:_) -> double . decimalsToDouble $ TNumeral.value nd
_ -> Nothing
}
ruleDecimals :: Rule
ruleDecimals = Rule
{ name = "decimal number"
, pattern =
[ regex "(\\d*\\,\\d+)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) -> parseDecimal False match
_ -> Nothing
}
ruleCommas :: Rule
ruleCommas = Rule
{ name = "dot-separated numbers"
, pattern =
[ regex "(\\d+(\\.\\d\\d\\d)+(\\,\\d+)?)"
]
, prod = \tokens -> case tokens of
(Token RegexMatch (GroupMatch (match:_)):_) ->
parseDecimal False $ Text.replace "." Text.empty match
_ -> Nothing
}
ruleSuffixes :: Rule
ruleSuffixes = Rule
{ name = "suffixes (K,M,G))"
, pattern =
[ dimension Numeral
, regex "(k|m|g)(?=[\\W$€¢£]|$)"
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = v}:
Token RegexMatch (GroupMatch (match:_)):
_) -> case Text.toLower match of
"k" -> double $ v * 1e3
"m" -> double $ v * 1e6
"g" -> double $ v * 1e9
_ -> Nothing
_ -> Nothing
}
ruleNegative :: Rule
ruleNegative = Rule
{ name = "negative numbers"
, pattern =
[ regex "(-|menos|negativo)(?!\\s*\\-)"
, Predicate isPositive
]
, prod = \tokens -> case tokens of
(_:Token Numeral nd:_) -> double $ TNumeral.value nd * (-1)
_ -> Nothing
}
ruleSum :: Rule
ruleSum = Rule
{ name = "intersect 2 numbers"
, pattern =
[ Predicate hasGrain
, Predicate $ and . sequence [not . isMultipliable, isPositive]
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = val1, TNumeral.grain = Just g}:
Token Numeral NumeralData{TNumeral.value = val2}:
_) | (10 ** fromIntegral g) > val2 -> double $ val1 + val2
_ -> Nothing
}
ruleSumAnd :: Rule
ruleSumAnd = Rule
{ name = "intersect 2 numbers (with and)"
, pattern =
[ Predicate hasGrain
, regex "e"
, Predicate $ and . sequence [not . isMultipliable, isPositive]
]
, prod = \tokens -> case tokens of
(Token Numeral NumeralData{TNumeral.value = val1, TNumeral.grain = Just g}:
_:
Token Numeral NumeralData{TNumeral.value = val2}:
_) | (10 ** fromIntegral g) > val2 -> double $ val1 + val2
_ -> Nothing
}
ruleMultiply :: Rule
ruleMultiply = Rule
{ name = "compose by multiplication"
, pattern =
[ dimension Numeral
, Predicate isMultipliable
]
, prod = \tokens -> case tokens of
(token1:token2:_) -> multiply token1 token2
_ -> Nothing
}
rules :: [Rule]
rules =
[ ruleToNineteen
, ruleTens
, ruleCent
, rulePowersOfTen
, ruleCompositeTens
, ruleCompositeCents
, ruleSkipHundreds
, ruleDotSpelledOut
, ruleLeadingDotSpelledOut
, ruleDecimals
, ruleCommas
, ruleSuffixes
, ruleNegative
, ruleSum
, ruleDecsAnd
, ruleCentsAnd
, ruleSumAnd
, ruleMultiply
, ruleDozen
]
|
ae1f68c53b19b6cfcaac0d9b7721f341a686b82e7de8f9390f65c8c31095240c | laurencer/confluence-sync | MockServer.hs | --
-- HTTP client for use with io-streams
--
Copyright © 2012 - 2014 Operational Dynamics Consulting , Pty Ltd
--
-- The code in this file, and the program it is a part of, is made
-- available to you by its authors as open source software: you can
redistribute it and/or modify it under a BSD licence .
--
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PackageImports #-}
{-# OPTIONS -fno-warn-dodgy-imports #-}
module MockServer (runMockServer, localPort) where
Per , we suppress
the warning resulting from this line , necessary on < 7.6
Per , we suppress
the warning resulting from this line, necessary on <7.6
-}
import Prelude hiding (catch)
import Control.Applicative
import Control.Concurrent (forkIO, threadDelay)
import Control.Exception (SomeException)
import Control.Monad.CatchIO (catch)
import "mtl" Control.Monad.Trans (liftIO)
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as S
import qualified Data.ByteString.Lazy.Char8 as L
import Data.Maybe (fromMaybe)
import Filesystem (getSize)
import Filesystem.Path.CurrentOS (decodeString)
import Snap.Core
import Snap.Http.Server
import Snap.Util.FileServe
import System.IO (hFlush, hPutStrLn, stderr)
import Network.Http.Client (Hostname, Port)
localHost = "localhost" :: Hostname
localPort = 56981 :: Port
main :: IO ()
main = go
Binding the port to the IPv4 localhost appears to settle the problem
of localhost resolving ambigiously . If that does n't work , we can
comment out the setBind and the resultant 0.0.0.0 does seem to work .
Binding the port to the IPv4 localhost appears to settle the problem
of localhost resolving ambigiously. If that doesn't work, we can
comment out the setBind and the resultant 0.0.0.0 does seem to work.
-}
go :: IO ()
go = httpServe c site
where
c = setAccessLog ConfigNoLog $
setErrorLog ConfigNoLog $
setHostname localHost $
setBind localHost $
setPort (fromIntegral localPort) $
setVerbose False emptyConfig
runMockServer :: IO ()
runMockServer = do
_ <- forkIO go
threadDelay 2000000
return ()
--
-- Top level URL routing logic.
--
site :: Snap ()
site = catch
(routeRequests)
(\e -> serveError "Splat\n" e)
routeRequests :: Snap ()
routeRequests =
route
[("resource/:id", serveResource),
("static/:id", method GET serveStatic),
("time", serveTime),
("", ifTop handleAsText),
("bounce", serveRedirect),
("local", serveLocalRedirect),
("loop", serveRedirectEndlessly),
("empty", serveWithoutContent),
("postbox", method POST handlePostMethod),
("size", handleSizeRequest),
("api", handleRestfulRequest),
("cookies", serveRepeatedResponseHeaders)]
<|> serveNotFound
serveResource :: Snap ()
serveResource = do
r <- getRequest
let m = rqMethod r
case m of
GET -> handleGetMethod
PUT -> handlePutWithExpectation
_ -> serveMethodNotAllowed
serveStatic :: Snap ()
serveStatic = do
im' <- getParam "id"
let i' = fromMaybe "" im'
let f' = S.concat ["tests/", i']
let f = S.unpack f'
l <- liftIO $ getSize $ decodeString f
let t = fileType defaultMimeTypes f
modifyResponse $ setContentType t
modifyResponse $ setContentLength $ fromIntegral l
b' <- liftIO $ S.readFile f
writeBS b'
serveTime :: Snap ()
serveTime = do
writeBS "Sun 30 Dec 12, 05:39:56.746Z\n"
--
-- Dispatch normal GET requests based on MIME type.
--
handleGetMethod :: Snap ()
handleGetMethod = do
r <- getRequest
let mime0 = getHeader "Accept" r
case mime0 of
Just "text/html" -> handleAsBrowser
_ -> handleAsText
handleAsBrowser :: Snap ()
handleAsBrowser = do
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType "text/html; charset=UTF-8"
modifyResponse $ setHeader "Cache-Control" "max-age=1"
sendFile "tests/hello.html"
handleAsText :: Snap ()
handleAsText = do
modifyResponse $ setContentType "text/plain"
writeBS "Sounds good to me\n"
handleRestfulRequest :: Snap ()
handleRestfulRequest = do
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType "application/json"
sendFile "tests/data-eu-gdp.json"
serveRedirect :: Snap ()
serveRedirect = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.concat ["http://", localHost, ":", S.pack $ show $ localPort, "/time"]
serveLocalRedirect :: Snap ()
serveLocalRedirect = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.pack "/time"
serveRedirectEndlessly :: Snap ()
serveRedirectEndlessly = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.concat ["http://", localHost, ":", S.pack $ show $ localPort, "/loop"]
Attempt to test the bug with 204 No Content not closing in absence of a
Content - Length header , however Snap automatically adds one , it seems . So ,
after the fact , this is unused and the case is tested in
TestServer.testDevoidOfContent .
Attempt to test the bug with 204 No Content not closing in absence of a
Content-Length header, however Snap automatically adds one, it seems. So,
after the fact, this is unused and the case is tested in
TestServer.testDevoidOfContent.
-}
serveWithoutContent :: Snap ()
serveWithoutContent = do
modifyResponse $ setResponseStatus 204 "No Content"
modifyResponse $ setHeader "Cache-Control" "no-cache"
serveRepeatedResponseHeaders :: Snap ()
serveRepeatedResponseHeaders = do
modifyResponse $ addHeader "Set-Cookie" "stone=diamond"
modifyResponse $ addHeader "Set-Cookie" "metal=tungsten"
handlePostMethod :: Snap ()
handlePostMethod = do
setTimeout 5
modifyResponse $ setResponseStatus 201 "Created"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" ""
modifyResponse $ setContentType "text/plain"
b' <- readRequestBody 1024
writeLBS b'
handlePutWithExpectation :: Snap ()
handlePutWithExpectation = do
setTimeout 5
modifyResponse $ setResponseStatus 201 "Created"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setContentType "text/plain"
b' <- readRequestBody 1024
writeLBS b'
handleSizeRequest :: Snap ()
handleSizeRequest = do
r <- getRequest
let mm = getHeader "Content-Type" r
t <- case mm of
Just m -> return m
_ -> do
serveUnsupported
return ""
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType t
b' <- readRequestBody 65536
writeBS $ S.pack $ show $ L.length b'
updateResource :: Snap ()
updateResource = do
bs' <- readRequestBody 4096
let b' = fromLazy bs'
im' <- getParam "id"
let i' = fromMaybe "0" im'
-- TODO something
modifyResponse $ setResponseStatus 204 "Updated" -- "No Content"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setContentLength 0
return ()
where
fromLazy ls' = S.concat $ L.toChunks ls'
serveNotFound :: Snap a
serveNotFound = do
modifyResponse $ setResponseStatus 404 "Not Found"
modifyResponse $ setHeader "Content-Type" "text/html"
writeBS "404 Not Found"
r <- getResponse
finishWith r
serveBadRequest :: Snap ()
serveBadRequest = do
modifyResponse $ setResponseStatus 400 "Bad Request"
writeBS "400 Bad Request\n"
serveMethodNotAllowed :: Snap ()
serveMethodNotAllowed = do
modifyResponse $ setResponseStatus 405 "Method Not Allowed"
modifyResponse $ setHeader "Allow" "GET, POST, PUT"
writeBS "405 Method Not Allowed\n"
r <- getResponse
finishWith r
serveUnsupported :: Snap ()
serveUnsupported = do
modifyResponse $ setResponseStatus 415 "Unsupported Media Type"
writeBS "415 Unsupported Media Type\n"
r <- getResponse
finishWith r
--
-- The exception will be dumped to the server's stdout, while the supplied
-- message will be sent out with the response (ideally only for debugging
-- purposes, but easier than looking in log/error.log for details).
--
serveError :: ByteString -> SomeException -> Snap ()
serveError x' e = do
debug msg
modifyResponse $ setResponseStatus 500 "Internal Server Error"
writeBS x'
r <- getResponse
finishWith r
where
msg = show (e :: SomeException)
debug :: String -> Snap ()
debug cs = do
liftIO $ do
hPutStrLn stderr ""
hPutStrLn stderr cs
hFlush stderr
| null | https://raw.githubusercontent.com/laurencer/confluence-sync/442fdbc84fe07471f323af80d2d4580026f8d9e8/vendor/http-streams/tests/MockServer.hs | haskell |
HTTP client for use with io-streams
The code in this file, and the program it is a part of, is made
available to you by its authors as open source software: you can
# LANGUAGE OverloadedStrings #
# LANGUAGE PackageImports #
# OPTIONS -fno-warn-dodgy-imports #
Top level URL routing logic.
Dispatch normal GET requests based on MIME type.
TODO something
"No Content"
The exception will be dumped to the server's stdout, while the supplied
message will be sent out with the response (ideally only for debugging
purposes, but easier than looking in log/error.log for details).
| Copyright © 2012 - 2014 Operational Dynamics Consulting , Pty Ltd
redistribute it and/or modify it under a BSD licence .
module MockServer (runMockServer, localPort) where
Per , we suppress
the warning resulting from this line , necessary on < 7.6
Per , we suppress
the warning resulting from this line, necessary on <7.6
-}
import Prelude hiding (catch)
import Control.Applicative
import Control.Concurrent (forkIO, threadDelay)
import Control.Exception (SomeException)
import Control.Monad.CatchIO (catch)
import "mtl" Control.Monad.Trans (liftIO)
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as S
import qualified Data.ByteString.Lazy.Char8 as L
import Data.Maybe (fromMaybe)
import Filesystem (getSize)
import Filesystem.Path.CurrentOS (decodeString)
import Snap.Core
import Snap.Http.Server
import Snap.Util.FileServe
import System.IO (hFlush, hPutStrLn, stderr)
import Network.Http.Client (Hostname, Port)
localHost = "localhost" :: Hostname
localPort = 56981 :: Port
main :: IO ()
main = go
Binding the port to the IPv4 localhost appears to settle the problem
of localhost resolving ambigiously . If that does n't work , we can
comment out the setBind and the resultant 0.0.0.0 does seem to work .
Binding the port to the IPv4 localhost appears to settle the problem
of localhost resolving ambigiously. If that doesn't work, we can
comment out the setBind and the resultant 0.0.0.0 does seem to work.
-}
go :: IO ()
go = httpServe c site
where
c = setAccessLog ConfigNoLog $
setErrorLog ConfigNoLog $
setHostname localHost $
setBind localHost $
setPort (fromIntegral localPort) $
setVerbose False emptyConfig
runMockServer :: IO ()
runMockServer = do
_ <- forkIO go
threadDelay 2000000
return ()
site :: Snap ()
site = catch
(routeRequests)
(\e -> serveError "Splat\n" e)
routeRequests :: Snap ()
routeRequests =
route
[("resource/:id", serveResource),
("static/:id", method GET serveStatic),
("time", serveTime),
("", ifTop handleAsText),
("bounce", serveRedirect),
("local", serveLocalRedirect),
("loop", serveRedirectEndlessly),
("empty", serveWithoutContent),
("postbox", method POST handlePostMethod),
("size", handleSizeRequest),
("api", handleRestfulRequest),
("cookies", serveRepeatedResponseHeaders)]
<|> serveNotFound
serveResource :: Snap ()
serveResource = do
r <- getRequest
let m = rqMethod r
case m of
GET -> handleGetMethod
PUT -> handlePutWithExpectation
_ -> serveMethodNotAllowed
serveStatic :: Snap ()
serveStatic = do
im' <- getParam "id"
let i' = fromMaybe "" im'
let f' = S.concat ["tests/", i']
let f = S.unpack f'
l <- liftIO $ getSize $ decodeString f
let t = fileType defaultMimeTypes f
modifyResponse $ setContentType t
modifyResponse $ setContentLength $ fromIntegral l
b' <- liftIO $ S.readFile f
writeBS b'
serveTime :: Snap ()
serveTime = do
writeBS "Sun 30 Dec 12, 05:39:56.746Z\n"
handleGetMethod :: Snap ()
handleGetMethod = do
r <- getRequest
let mime0 = getHeader "Accept" r
case mime0 of
Just "text/html" -> handleAsBrowser
_ -> handleAsText
handleAsBrowser :: Snap ()
handleAsBrowser = do
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType "text/html; charset=UTF-8"
modifyResponse $ setHeader "Cache-Control" "max-age=1"
sendFile "tests/hello.html"
handleAsText :: Snap ()
handleAsText = do
modifyResponse $ setContentType "text/plain"
writeBS "Sounds good to me\n"
handleRestfulRequest :: Snap ()
handleRestfulRequest = do
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType "application/json"
sendFile "tests/data-eu-gdp.json"
serveRedirect :: Snap ()
serveRedirect = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.concat ["http://", localHost, ":", S.pack $ show $ localPort, "/time"]
serveLocalRedirect :: Snap ()
serveLocalRedirect = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.pack "/time"
serveRedirectEndlessly :: Snap ()
serveRedirectEndlessly = do
modifyResponse $ setResponseStatus 307 "Temporary Redirect"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" r'
where
r' = S.concat ["http://", localHost, ":", S.pack $ show $ localPort, "/loop"]
Attempt to test the bug with 204 No Content not closing in absence of a
Content - Length header , however Snap automatically adds one , it seems . So ,
after the fact , this is unused and the case is tested in
TestServer.testDevoidOfContent .
Attempt to test the bug with 204 No Content not closing in absence of a
Content-Length header, however Snap automatically adds one, it seems. So,
after the fact, this is unused and the case is tested in
TestServer.testDevoidOfContent.
-}
serveWithoutContent :: Snap ()
serveWithoutContent = do
modifyResponse $ setResponseStatus 204 "No Content"
modifyResponse $ setHeader "Cache-Control" "no-cache"
serveRepeatedResponseHeaders :: Snap ()
serveRepeatedResponseHeaders = do
modifyResponse $ addHeader "Set-Cookie" "stone=diamond"
modifyResponse $ addHeader "Set-Cookie" "metal=tungsten"
handlePostMethod :: Snap ()
handlePostMethod = do
setTimeout 5
modifyResponse $ setResponseStatus 201 "Created"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setHeader "Location" ""
modifyResponse $ setContentType "text/plain"
b' <- readRequestBody 1024
writeLBS b'
handlePutWithExpectation :: Snap ()
handlePutWithExpectation = do
setTimeout 5
modifyResponse $ setResponseStatus 201 "Created"
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setContentType "text/plain"
b' <- readRequestBody 1024
writeLBS b'
handleSizeRequest :: Snap ()
handleSizeRequest = do
r <- getRequest
let mm = getHeader "Content-Type" r
t <- case mm of
Just m -> return m
_ -> do
serveUnsupported
return ""
modifyResponse $ setResponseStatus 200 "OK"
modifyResponse $ setContentType t
b' <- readRequestBody 65536
writeBS $ S.pack $ show $ L.length b'
updateResource :: Snap ()
updateResource = do
bs' <- readRequestBody 4096
let b' = fromLazy bs'
im' <- getParam "id"
let i' = fromMaybe "0" im'
modifyResponse $ setHeader "Cache-Control" "no-cache"
modifyResponse $ setContentLength 0
return ()
where
fromLazy ls' = S.concat $ L.toChunks ls'
serveNotFound :: Snap a
serveNotFound = do
modifyResponse $ setResponseStatus 404 "Not Found"
modifyResponse $ setHeader "Content-Type" "text/html"
writeBS "404 Not Found"
r <- getResponse
finishWith r
serveBadRequest :: Snap ()
serveBadRequest = do
modifyResponse $ setResponseStatus 400 "Bad Request"
writeBS "400 Bad Request\n"
serveMethodNotAllowed :: Snap ()
serveMethodNotAllowed = do
modifyResponse $ setResponseStatus 405 "Method Not Allowed"
modifyResponse $ setHeader "Allow" "GET, POST, PUT"
writeBS "405 Method Not Allowed\n"
r <- getResponse
finishWith r
serveUnsupported :: Snap ()
serveUnsupported = do
modifyResponse $ setResponseStatus 415 "Unsupported Media Type"
writeBS "415 Unsupported Media Type\n"
r <- getResponse
finishWith r
serveError :: ByteString -> SomeException -> Snap ()
serveError x' e = do
debug msg
modifyResponse $ setResponseStatus 500 "Internal Server Error"
writeBS x'
r <- getResponse
finishWith r
where
msg = show (e :: SomeException)
debug :: String -> Snap ()
debug cs = do
liftIO $ do
hPutStrLn stderr ""
hPutStrLn stderr cs
hFlush stderr
|
0709aedb0b8c65de54f2ce2d572372fee21d8bef66b111723d2d1f8a074d2182 | archimag/mongo-cl-driver | protocol.lisp | ;;;; protocol.lisp
;;;;
;;;; This file is part of the MONGO-CL-DRIVER library, released under Lisp-LGPL.
;;;; See file COPYING for details.
;;;;
Author : < >
(in-package #:mongo-cl-driver.wire)
(defconstant +op-reply+ 1 "Reply to a client request. responseTo is set")
(defconstant +op-msg+ 1000 "generic msg command followed by a string")
(defconstant +op-update+ 2001 "update document")
(defconstant +op-insert+ 2002 "insert new document")
(defconstant +reserverd+ 2003 "formerly used for OP_GET_BY_OID")
(defconstant +op-query+ 2004 "query a collection")
(defconstant +op-get-more+ 2005 "Get more data from a query. See Cursors")
(defconstant +op-delete+ 2006 "Delete documents")
(defconstant +op-kill-cursors+ 2007 "Tell database client is done with a cursor")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Message Types
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defclass msg-header ()
((message-length
:accessor message-length
:bson-type :int32)
(request-id
:initform 0
:initarg :request-id
:accessor request-id
:bson-type :int32)
(response-to
:initform 0
:initarg :response-to
:accessor response-to
:bson-type :int32)
(opcode
:initarg :opcode
:accessor opcode
:bson-type :int32))
(:slot-order message-length request-id response-to opcode)
(:metaclass message-class))
(defmacro define-protocol-message (name code &rest slots)
`(defclass ,name (msg-header)
,slots
(:default-initargs :opcode ,code)
(:slot-order message-length request-id response-to opcode ,@(mapcar #'car slots))
(:metaclass message-class)))
(define-protocol-message op-update +op-update+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(flags
:initform 0
:bson-type :int32)
(selector
:initarg :selector
:bson-type :document)
(update
:initarg :update
:bson-type :document))
(defmethod shared-initialize :after ((msg op-update) slot-names &key upsert multi-update &allow-other-keys)
(let ((bits nil))
(when upsert
(push 0 bits))
(when multi-update
(push 1 bits))
(dolist (bit bits)
(setf (ldb (byte 1 bit)
(slot-value msg 'flags))
1))))
(define-protocol-message op-insert +op-insert+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(documents
:initarg :documents
:initform nil
:bson-type :document
:list-p t))
(define-protocol-message op-query +op-query+
(flags
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(number-to-skip
:initarg :number-to-skip
:initform 0
:bson-type :int32)
(number-to-return
:initarg :number-to-return
:initform 0
:bson-type :int32)
(query
:initarg :query
:bson-type :document)
(return-field-selector
:initarg :return-field-selector
:initform nil
:bson-type :document))
(defmethod shared-initialize :after ((query op-query) slot-names &key
tailable-cursor slave-ok no-cursor-timeout
await-data exhaust partial)
(unless (slot-value query 'query)
(setf (slot-value query 'query)
(make-hash-table :test 'equal)))
(unless (slot-value query 'return-field-selector)
(setf (slot-value query 'return-field-selector)
(make-hash-table :test 'equal)))
(let ((bits nil))
(when tailable-cursor (push 1 bits))
(when slave-ok (push 2 bits))
(when no-cursor-timeout (push 4 bits))
(when await-data (push 5 bits))
(when exhaust (push 6 bits))
(when partial (push 7 bits))
(dolist (bit bits)
(setf (ldb (byte 1 bit)
(slot-value query 'flags))
1))))
(define-protocol-message op-getmore +op-get-more+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(number-to-return
:initarg :number-to-return
:bson-type :int32)
(cursor-id
:initarg :cursor-id
:bson-type :int64))
(define-protocol-message op-delete +op-delete+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(flags
:initform 0
:bson-type :int32)
(selector
:initarg :selector
:bson-type :document))
(defmethod shared-initialize :after ((msg op-delete) slot-names &key single-remove &allow-other-keys)
(when single-remove
(setf (ldb (byte 1 0)
(slot-value msg 'flags))
1)))
(define-protocol-message op-kill-cursors +op-kill-cursors+
(zero
:initform 0
:bson-type :int32)
(number-of-cursor-ids
:bson-type :int32)
(cursor-ids
:initform nil
:initarg :cursor-ids
:bson-type :int64
:list-p t))
(defmethod shared-initialize :after ((msg op-kill-cursors) slot-names &key &allow-other-keys)
(setf (slot-value msg 'number-of-cursor-ids)
(length (slot-value msg 'cursor-ids))))
(define-protocol-message op-reply +op-reply+
(response-flags
:reader op-reply-response-flags
:bson-type :int32)
(cursor-id
:reader op-reply-cursor-id
:bson-type :int64)
(starting-from
:reader op-reply-starting-from
:bson-type :int32)
(number-returned
:reader op-reply-number-returned
:bson-type :int32)
(documents
:reader op-reply-documents
:initform nil
:bson-type :document
:list-p t))
(defmacro define-reply-flag-predicate (name bitnum)
`(defun ,name (reply)
(= (ldb (byte 1 ,bitnum)
(op-reply-response-flags reply))
1)))
(define-reply-flag-predicate cursor-not-found-p 0)
(define-reply-flag-predicate query-failure-p 1)
(define-reply-flag-predicate await-capable-p 3)
(defun check-reply-impl (reply error-handler)
(cond
((cursor-not-found-p reply)
(funcall error-handler
"Cursor '~A' not found"
(op-reply-cursor-id reply))
nil)
((query-failure-p reply)
(funcall error-handler
(gethash "$err" (car (op-reply-documents reply))))
nil)
((gethash "errmsg" (first (op-reply-documents reply)))
(funcall error-handler
(gethash "errmsg" (first (op-reply-documents reply))))
nil)
(t t)))
(defun check-reply (reply)
(check-reply-impl reply #'error)
reply)
(defun check-reply-async (reply callback)
(labels ((check-reply-callback (&rest args)
(funcall callback
(apply #'format args))))
(when (check-reply-impl reply #'check-reply-callback)
(funcall callback nil reply))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; encode protocol message
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defgeneric encode-protocol-message (message target)
(:documentation "Serialize MESSAGE to TARGET"))
(defmethod encode-protocol-message (message (target (eql :vector)))
(encode-protocol-message message
(make-array 0
:element-type '(unsigned-byte 8)
:fill-pointer 0
:adjustable t)))
(defmethod encode-protocol-message (message (target (eql :list)))
(coerce (encode-protocol-message message :vector)
'list))
(defmethod encode-protocol-message :around (message target)
(let ((*encoded-bytes-count* 0))
(call-next-method)))
(defmethod encode-protocol-message (message target)
(let ((size (with-count-encoded-bytes
(dotimes (i 4)
(encode-byte 0 target))
(iter (for slot in (cdr (class-slots (class-of message))))
(for value = #-lispworks (slot-value-using-class (class-of message)
message
slot)
#+lispworks (slot-value-using-class (class-of message)
message
(slot-definition-name slot)))
(for encoder = (message-effective-slot-encoder slot))
(when value
(funcall encoder value target)))))
(arr (make-array 4 :element-type '(unsigned-byte 8) :fill-pointer 0)))
(encode-int32 size arr)
(bson-target-replace target arr 0)
target))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; decode server reply
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun decode-op-reply (source)
(let* ((reply (make-instance 'op-reply))
(reply-class (class-of reply))
(*decoded-bytes-count* 0))
(iter (for slot in (butlast (class-slots reply-class)))
(setf #-lispworks (slot-value-using-class reply-class reply slot)
#+lispworks (slot-value-using-class reply-class reply (slot-definition-name slot))
(funcall (message-effective-slot-decoder slot)
source)))
(iter (for i from 0 below (op-reply-number-returned reply))
(push (decode-document source)
(slot-value reply 'documents)))
(setf (slot-value reply 'documents)
(nreverse (slot-value reply 'documents)))
reply))
| null | https://raw.githubusercontent.com/archimag/mongo-cl-driver/300c10bd3d0b3ca3a82c8b32087bddbc7b6a86c3/wire/protocol.lisp | lisp | protocol.lisp
This file is part of the MONGO-CL-DRIVER library, released under Lisp-LGPL.
See file COPYING for details.
Message Types
encode protocol message
decode server reply
| Author : < >
(in-package #:mongo-cl-driver.wire)
(defconstant +op-reply+ 1 "Reply to a client request. responseTo is set")
(defconstant +op-msg+ 1000 "generic msg command followed by a string")
(defconstant +op-update+ 2001 "update document")
(defconstant +op-insert+ 2002 "insert new document")
(defconstant +reserverd+ 2003 "formerly used for OP_GET_BY_OID")
(defconstant +op-query+ 2004 "query a collection")
(defconstant +op-get-more+ 2005 "Get more data from a query. See Cursors")
(defconstant +op-delete+ 2006 "Delete documents")
(defconstant +op-kill-cursors+ 2007 "Tell database client is done with a cursor")
(defclass msg-header ()
((message-length
:accessor message-length
:bson-type :int32)
(request-id
:initform 0
:initarg :request-id
:accessor request-id
:bson-type :int32)
(response-to
:initform 0
:initarg :response-to
:accessor response-to
:bson-type :int32)
(opcode
:initarg :opcode
:accessor opcode
:bson-type :int32))
(:slot-order message-length request-id response-to opcode)
(:metaclass message-class))
(defmacro define-protocol-message (name code &rest slots)
`(defclass ,name (msg-header)
,slots
(:default-initargs :opcode ,code)
(:slot-order message-length request-id response-to opcode ,@(mapcar #'car slots))
(:metaclass message-class)))
(define-protocol-message op-update +op-update+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(flags
:initform 0
:bson-type :int32)
(selector
:initarg :selector
:bson-type :document)
(update
:initarg :update
:bson-type :document))
(defmethod shared-initialize :after ((msg op-update) slot-names &key upsert multi-update &allow-other-keys)
(let ((bits nil))
(when upsert
(push 0 bits))
(when multi-update
(push 1 bits))
(dolist (bit bits)
(setf (ldb (byte 1 bit)
(slot-value msg 'flags))
1))))
(define-protocol-message op-insert +op-insert+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(documents
:initarg :documents
:initform nil
:bson-type :document
:list-p t))
(define-protocol-message op-query +op-query+
(flags
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(number-to-skip
:initarg :number-to-skip
:initform 0
:bson-type :int32)
(number-to-return
:initarg :number-to-return
:initform 0
:bson-type :int32)
(query
:initarg :query
:bson-type :document)
(return-field-selector
:initarg :return-field-selector
:initform nil
:bson-type :document))
(defmethod shared-initialize :after ((query op-query) slot-names &key
tailable-cursor slave-ok no-cursor-timeout
await-data exhaust partial)
(unless (slot-value query 'query)
(setf (slot-value query 'query)
(make-hash-table :test 'equal)))
(unless (slot-value query 'return-field-selector)
(setf (slot-value query 'return-field-selector)
(make-hash-table :test 'equal)))
(let ((bits nil))
(when tailable-cursor (push 1 bits))
(when slave-ok (push 2 bits))
(when no-cursor-timeout (push 4 bits))
(when await-data (push 5 bits))
(when exhaust (push 6 bits))
(when partial (push 7 bits))
(dolist (bit bits)
(setf (ldb (byte 1 bit)
(slot-value query 'flags))
1))))
(define-protocol-message op-getmore +op-get-more+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(number-to-return
:initarg :number-to-return
:bson-type :int32)
(cursor-id
:initarg :cursor-id
:bson-type :int64))
(define-protocol-message op-delete +op-delete+
(zero
:initform 0
:bson-type :int32)
(full-collection-name
:initarg :full-collection-name
:initform ""
:bson-type :cstring)
(flags
:initform 0
:bson-type :int32)
(selector
:initarg :selector
:bson-type :document))
(defmethod shared-initialize :after ((msg op-delete) slot-names &key single-remove &allow-other-keys)
(when single-remove
(setf (ldb (byte 1 0)
(slot-value msg 'flags))
1)))
(define-protocol-message op-kill-cursors +op-kill-cursors+
(zero
:initform 0
:bson-type :int32)
(number-of-cursor-ids
:bson-type :int32)
(cursor-ids
:initform nil
:initarg :cursor-ids
:bson-type :int64
:list-p t))
(defmethod shared-initialize :after ((msg op-kill-cursors) slot-names &key &allow-other-keys)
(setf (slot-value msg 'number-of-cursor-ids)
(length (slot-value msg 'cursor-ids))))
(define-protocol-message op-reply +op-reply+
(response-flags
:reader op-reply-response-flags
:bson-type :int32)
(cursor-id
:reader op-reply-cursor-id
:bson-type :int64)
(starting-from
:reader op-reply-starting-from
:bson-type :int32)
(number-returned
:reader op-reply-number-returned
:bson-type :int32)
(documents
:reader op-reply-documents
:initform nil
:bson-type :document
:list-p t))
(defmacro define-reply-flag-predicate (name bitnum)
`(defun ,name (reply)
(= (ldb (byte 1 ,bitnum)
(op-reply-response-flags reply))
1)))
(define-reply-flag-predicate cursor-not-found-p 0)
(define-reply-flag-predicate query-failure-p 1)
(define-reply-flag-predicate await-capable-p 3)
(defun check-reply-impl (reply error-handler)
(cond
((cursor-not-found-p reply)
(funcall error-handler
"Cursor '~A' not found"
(op-reply-cursor-id reply))
nil)
((query-failure-p reply)
(funcall error-handler
(gethash "$err" (car (op-reply-documents reply))))
nil)
((gethash "errmsg" (first (op-reply-documents reply)))
(funcall error-handler
(gethash "errmsg" (first (op-reply-documents reply))))
nil)
(t t)))
(defun check-reply (reply)
(check-reply-impl reply #'error)
reply)
(defun check-reply-async (reply callback)
(labels ((check-reply-callback (&rest args)
(funcall callback
(apply #'format args))))
(when (check-reply-impl reply #'check-reply-callback)
(funcall callback nil reply))))
(defgeneric encode-protocol-message (message target)
(:documentation "Serialize MESSAGE to TARGET"))
(defmethod encode-protocol-message (message (target (eql :vector)))
(encode-protocol-message message
(make-array 0
:element-type '(unsigned-byte 8)
:fill-pointer 0
:adjustable t)))
(defmethod encode-protocol-message (message (target (eql :list)))
(coerce (encode-protocol-message message :vector)
'list))
(defmethod encode-protocol-message :around (message target)
(let ((*encoded-bytes-count* 0))
(call-next-method)))
(defmethod encode-protocol-message (message target)
(let ((size (with-count-encoded-bytes
(dotimes (i 4)
(encode-byte 0 target))
(iter (for slot in (cdr (class-slots (class-of message))))
(for value = #-lispworks (slot-value-using-class (class-of message)
message
slot)
#+lispworks (slot-value-using-class (class-of message)
message
(slot-definition-name slot)))
(for encoder = (message-effective-slot-encoder slot))
(when value
(funcall encoder value target)))))
(arr (make-array 4 :element-type '(unsigned-byte 8) :fill-pointer 0)))
(encode-int32 size arr)
(bson-target-replace target arr 0)
target))
(defun decode-op-reply (source)
(let* ((reply (make-instance 'op-reply))
(reply-class (class-of reply))
(*decoded-bytes-count* 0))
(iter (for slot in (butlast (class-slots reply-class)))
(setf #-lispworks (slot-value-using-class reply-class reply slot)
#+lispworks (slot-value-using-class reply-class reply (slot-definition-name slot))
(funcall (message-effective-slot-decoder slot)
source)))
(iter (for i from 0 below (op-reply-number-returned reply))
(push (decode-document source)
(slot-value reply 'documents)))
(setf (slot-value reply 'documents)
(nreverse (slot-value reply 'documents)))
reply))
|
73d9d0ce99184333f7b87cef502796046c5136d833e8c130d25424ce9fc03540 | anoma/juvix | Translation.hs | module Juvix.Compiler.Concrete.Translation where
import Juvix . Compiler . Concrete . Translation . FromParsed
import Juvix.Compiler.Concrete.Language
import Juvix.Compiler.Concrete.Translation.FromParsed qualified as Scoper
import Juvix.Compiler.Concrete.Translation.FromParsed.Analysis.PathResolver
import Juvix.Compiler.Concrete.Translation.FromSource qualified as Parser
import Juvix.Compiler.Pipeline.EntryPoint
import Juvix.Prelude
type JudocStash = State (Maybe (Judoc 'Parsed))
fromSource ::
(Members '[Files, Error JuvixError, NameIdGen, Reader EntryPoint, PathResolver] r) =>
EntryPoint ->
Sem r Scoper.ScoperResult
fromSource = Parser.fromSource >=> Scoper.fromParsed
| null | https://raw.githubusercontent.com/anoma/juvix/807b3b1770289b8921304e92e7305c55c2e11f8f/src/Juvix/Compiler/Concrete/Translation.hs | haskell | module Juvix.Compiler.Concrete.Translation where
import Juvix . Compiler . Concrete . Translation . FromParsed
import Juvix.Compiler.Concrete.Language
import Juvix.Compiler.Concrete.Translation.FromParsed qualified as Scoper
import Juvix.Compiler.Concrete.Translation.FromParsed.Analysis.PathResolver
import Juvix.Compiler.Concrete.Translation.FromSource qualified as Parser
import Juvix.Compiler.Pipeline.EntryPoint
import Juvix.Prelude
type JudocStash = State (Maybe (Judoc 'Parsed))
fromSource ::
(Members '[Files, Error JuvixError, NameIdGen, Reader EntryPoint, PathResolver] r) =>
EntryPoint ->
Sem r Scoper.ScoperResult
fromSource = Parser.fromSource >=> Scoper.fromParsed
| |
9edb5e85ab5e99bb8b4497ee116d0294fbd92b5a5f54cfa643241ba35281a8a6 | footprintanalytics/footprint-web | schema.cljc | (ns metabase.mbql.schema
"Schema for validating a *normalized* MBQL query. This is also the definitive grammar for MBQL, wow!"
(:refer-clojure :exclude [count distinct min max + - / * and or not not-empty = < > <= >= time case concat replace abs])
#?@
(:clj
[(:require
[clojure.core :as core]
[clojure.set :as set]
[metabase.mbql.schema.helpers :as helpers :refer [is-clause?]]
[metabase.mbql.schema.macros :refer [defclause one-of]]
[schema.core :as s])
(:import java.time.format.DateTimeFormatter)]
:cljs
[(:require
[clojure.core :as core]
[clojure.set :as set]
[metabase.mbql.schema.helpers :as helpers :refer [is-clause?]]
[metabase.mbql.schema.macros :refer [defclause one-of]]
[schema.core :as s])]))
;; A NOTE ABOUT METADATA:
;;
;; Clauses below are marked with the following tags for documentation purposes:
;;
;; * Clauses marked `^:sugar` are syntactic sugar primarily intended to make generating queries easier on the
;; frontend. These clauses are automatically rewritten as simpler clauses by the `desugar` or `expand-macros`
;; middleware. Thus driver implementations do not need to handle these clauses.
;;
;; * Clauses marked `^:internal` are automatically generated by `wrap-value-literals` or other middleware from values
;; passed in. They are not intended to be used by the frontend when generating a query. These add certain
;; information that simplify driver implementations. When writing MBQL queries yourself you should pretend these
;; clauses don't exist.
;;
;; * Clauses marked `^{:requires-features #{feature+}}` require a certain set of features to be used. At some date in
;; the future we will likely add middleware that uses this metadata to automatically validate that a driver has the
;; features needed to run the query in question.
;;; +----------------------------------------------------------------------------------------------------------------+
;;; | MBQL Clauses |
;;; +----------------------------------------------------------------------------------------------------------------+
;;; ------------------------------------------------- Datetime Stuff -------------------------------------------------
` : day - of - week ` depends on the [ [ metabase.public - settings / start - of - week ] ] Setting , by default Sunday .
1 = first day of the week ( e.g. Sunday )
7 = last day of the week ( e.g. Saturday )
(def date-bucketing-units
"Set of valid units for bucketing or comparing against a *date* Field."
#{:default :day :day-of-week :day-of-month :day-of-year :week :week-of-year
:month :month-of-year :quarter :quarter-of-year :year})
(def time-bucketing-units
"Set of valid units for bucketing or comparing against a *time* Field."
#{:default :millisecond :second :minute :minute-of-hour :hour :hour-of-day})
(def datetime-bucketing-units
"Set of valid units for bucketing or comparing against a *datetime* Field."
(set/union date-bucketing-units time-bucketing-units))
(def DateUnit
"Valid unit for *date* bucketing."
(s/named
(apply s/enum date-bucketing-units)
"date-bucketing-unit"))
;; it could make sense to say hour-of-day(field) = hour-of-day("2018-10-10T12:00")
but it does not make sense to say month - of - year(field ) = month - of - year("08:00:00 " ) ,
does it ? So we 'll restrict the set of units a TimeValue can have to ones that have no notion of day / date .
(def TimeUnit
"Valid unit for *time* bucketing."
(s/named
(apply s/enum time-bucketing-units)
"time-bucketing-unit"))
(def DateTimeUnit
"Valid unit for *datetime* bucketing."
(s/named
(apply s/enum datetime-bucketing-units)
"datetime-bucketing-unit"))
(def TemporalExtractUnits
"Valid units to extract from a temporal."
(s/named
(apply s/enum #{:year-of-era
:quarter-of-year
:month-of-year
:week-of-year-iso
:week-of-year-us
:week-of-year-instance
:day-of-month
:day-of-week
:hour-of-day
:minute-of-hour
:second-of-minute})
"temporal-extract-units"))
(def DatetimeDiffUnits
"Valid units for a datetime-diff clause."
(s/named
(apply s/enum #{:second :minute :hour :day :week :month :year})
"datetime-diff-units"))
(def ExtractWeekModes
"Valid modes to extract weeks."
(s/named
(apply s/enum #{:iso :us :instance})
"extract-week-modes"))
(def ^:private RelativeDatetimeUnit
(s/named
(apply s/enum #{:default :minute :hour :day :week :month :quarter :year})
"relative-datetime-unit"))
#?(:clj
(defn- can-parse-iso-8601?
[^DateTimeFormatter formatter ^String s]
(when (string? s)
(try
(.parse formatter s)
true
(catch Throwable _
false))))
:cljs
(defn- can-parse-iso-8601?
[s]
(when (string? s)
(not= (.parse js/Date s) ##NaN))))
TODO -- currently these are all the same between date / time / datetime
(def ^{:arglists '([s])} can-parse-date?
"Returns whether a string can be parsed to an ISO 8601 date or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_DATE)
:cljs can-parse-iso-8601?))
(def ^{:arglists '([s])} can-parse-datetime?
"Returns whether a string can be parsed to an ISO 8601 datetime or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_DATE_TIME)
:cljs can-parse-iso-8601?))
(def ^{:arglists '([s])} can-parse-time?
"Returns whether a string can be parsed to an ISO 8601 time or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_TIME)
:cljs can-parse-iso-8601?))
(def LiteralDateString
"Schema for an ISO-8601-formatted date string literal."
(s/constrained helpers/NonBlankString can-parse-date? "valid ISO-8601 datetime string literal"))
(def LiteralDatetimeString
"Schema for an ISO-8601-formattedor datetime string literal."
(s/constrained helpers/NonBlankString can-parse-datetime? "valid ISO-8601 datetime string literal"))
(def LiteralTimeString
"Schema for an ISO-8601-formatted time string literal."
(s/constrained helpers/NonBlankString can-parse-time? "valid ISO-8601 time string literal"))
;; TODO - `unit` is not allowed if `n` is `current`
(defclause relative-datetime
n (s/cond-pre (s/eq :current) s/Int)
unit (optional RelativeDatetimeUnit))
(defclause interval
n s/Int
unit RelativeDatetimeUnit)
This clause is automatically generated by middleware when datetime literals ( literal strings or one of the Java
types ) are encountered . Unit is inferred by looking at the Field the timestamp is compared against . Implemented
;; mostly to convenience driver implementations. You don't need to use this form directly when writing MBQL; datetime
;; literal strings are preferred instead.
;;
;; example:
[: = [ : field 10 { : temporal - unit : day } ] " 2018 - 10 - 02 " ]
;;
;; becomes:
[: = [ : field 10 { : temporal - unit : day } ] [: absolute - datetime # inst " 2018 - 10 - 02 " : day ] ]
(def ^:internal ^{:clause-name :absolute-datetime} absolute-datetime
"Schema for an `:absolute-datetime` clause."
(s/conditional
#(core/not (is-clause? :absolute-datetime %))
(helpers/clause
:absolute-datetime
"t"
#?(:clj (s/cond-pre java.time.LocalDate java.time.LocalDateTime java.time.OffsetDateTime java.time.ZonedDateTime)
:cljs js/Date)
"unit"
DateTimeUnit)
#(instance? #?(:clj java.time.LocalDate :cljs js/Date) (second %))
(helpers/clause
:absolute-datetime
"date" #?(:clj java.time.LocalDate :cljs js/Date)
"unit" DateUnit)
:else
(helpers/clause
:absolute-datetime
"datetime"
#?(:clj (s/cond-pre java.time.LocalDateTime java.time.OffsetDateTime java.time.ZonedDateTime)
:cljs js/Date)
"unit"
DateTimeUnit)))
;; almost exactly the same as `absolute-datetime`, but generated in some sitations where the literal in question was
clearly a time ( e.g. " 08:00:00.000 " ) and/or the Field derived from ` : type / Time ` and/or the unit was a
;; time-bucketing unit
;;
;; TODO - should we have a separate `date` type as well
(defclause ^:internal time
time #?(:clj (s/cond-pre java.time.LocalTime java.time.OffsetTime)
:cljs js/Date)
unit TimeUnit)
(def ^:private DateOrDatetimeLiteral
"Schema for a valid date or datetime literal."
(s/conditional
(partial is-clause? :absolute-datetime)
absolute-datetime
can-parse-datetime?
LiteralDatetimeString
can-parse-date?
LiteralDateString
:else
(s/cond-pre
literal datetime strings and Java types will get transformed to ` absolute - datetime ` clauses automatically by
;; middleware so drivers don't need to deal with these directly. You only need to worry about handling
;; `absolute-datetime` clauses.
#?@(:clj
[java.time.LocalDate
java.time.LocalDateTime
java.time.OffsetDateTime
java.time.ZonedDateTime]
:cljs
[js/Date]))))
(def ^:private TimeLiteral
"Schema for valid time literals."
(s/conditional
(partial is-clause? :time)
time
can-parse-time?
LiteralTimeString
:else
(s/cond-pre
literal datetime strings and Java types will get transformed to ` time ` clauses automatically by
;; middleware so drivers don't need to deal with these directly. You only need to worry about handling
;; `time` clauses.
#?@(:clj
[java.time.LocalTime
java.time.OffsetTime]
:cljs
[js/Date]))))
(def ^:private TemporalLiteral
"Schema for valid temporal literals."
(s/cond-pre TimeLiteral DateOrDatetimeLiteral))
(def DateTimeValue
"Schema for a datetime value drivers will personally have to handle, either an `absolute-datetime` form or a
`relative-datetime` form."
(one-of absolute-datetime relative-datetime time))
;;; -------------------------------------------------- Other Values --------------------------------------------------
(def ValueTypeInfo
"Type info about a value in a `:value` clause. Added automatically by `wrap-value-literals` middleware to values in
filter clauses based on the Field in the clause."
TODO -- these should use ` lisp - case ` like everything else in MBQL .
{(s/optional-key :database_type) (s/maybe helpers/NonBlankString)
(s/optional-key :base_type) (s/maybe helpers/FieldType)
(s/optional-key :semantic_type) (s/maybe helpers/FieldSemanticOrRelationType)
(s/optional-key :unit) (s/maybe DateTimeUnit)
(s/optional-key :name) (s/maybe helpers/NonBlankString)
s/Keyword s/Any})
;; Arguments to filter clauses are automatically replaced with [:value <value> <type-info>] clauses by the
;; `wrap-value-literals` middleware. This is done to make it easier to implement query processors, because most driver
;; implementations dispatch off of Object type, which is often not enough to make informed decisions about how to
;; treat certain objects. For example, a string compared against a Postgres UUID Field needs to be parsed into a UUID
object , since text < - > UUID comparision does n't work in Postgres . For this reason , raw literals in ` : filter `
clauses are wrapped in ` : value ` clauses and given information about the type of the Field they will be compared to .
(defclause ^:internal value
value s/Any
type-info (s/maybe ValueTypeInfo))
;;; ----------------------------------------------------- Fields -----------------------------------------------------
;; Expression *references* refer to a something in the `:expressions` clause, e.g. something like
;;
[: field 1 nil ] [: field 2 nil ] ]
;;
As of 0.42.0 ` : expression ` references can have an optional options map
(defclause ^{:requires-features #{:expressions}} expression
expression-name helpers/NonBlankString
options (optional (s/pred map? "map")))
(def BinningStrategyName
"Schema for a valid value for the `strategy-name` param of a [[field]] clause with `:binning` information."
(s/enum :num-bins :bin-width :default))
(defn- validate-bin-width [schema]
(s/constrained
schema
(fn [{:keys [strategy bin-width]}]
(if (core/= strategy :bin-width)
bin-width
true))
"You must specify :bin-width when using the :bin-width strategy."))
(defn- validate-num-bins [schema]
(s/constrained
schema
(fn [{:keys [strategy num-bins]}]
(if (core/= strategy :num-bins)
num-bins
true))
"You must specify :num-bins when using the :num-bins strategy."))
(def FieldBinningOptions
"Schema for `:binning` options passed to a `:field` clause."
(-> {:strategy BinningStrategyName
(s/optional-key :num-bins) helpers/IntGreaterThanZero
(s/optional-key :bin-width) (s/constrained s/Num (complement neg?) "bin width must be >= 0.")
s/Keyword s/Any}
validate-bin-width
validate-num-bins))
(defn valid-temporal-unit-for-base-type?
"Whether `temporal-unit` (e.g. `:day`) is valid for the given `base-type` (e.g. `:type/Date`). If either is `nil` this
will return truthy. Accepts either map of `field-options` or `base-type` and `temporal-unit` passed separately."
([{:keys [base-type temporal-unit] :as _field-options}]
(valid-temporal-unit-for-base-type? base-type temporal-unit))
([base-type temporal-unit]
(if-let [units (when (core/and temporal-unit base-type)
(condp #(isa? %2 %1) base-type
:type/Date date-bucketing-units
:type/Time time-bucketing-units
:type/DateTime datetime-bucketing-units
nil))]
(contains? units temporal-unit)
true)))
(defn- validate-temporal-unit [schema]
TODO - consider breaking this out into separate constraints for the three different types so we can generate more
;; specific error messages
(s/constrained
schema
valid-temporal-unit-for-base-type?
"Invalid :temporal-unit for the specified :base-type."))
(defn- no-binning-options-at-top-level [schema]
(s/constrained
schema
(complement :strategy)
"Found :binning keys at the top level of :field options. binning-related options belong under the :binning key."))
(def ^:private FieldOptions
(-> {(s/optional-key :base-type) (s/maybe helpers/FieldType)
;;
;; replaces `fk->`
;;
` : source - field ` is used to refer to a Field from a different Table you would like IMPLICITLY JOINED to the
;; source table.
;;
;; If both `:source-field` and `:join-alias` are supplied, `:join-alias` should be used to perform the join;
;; `:source-field` should be for information purposes only.
(s/optional-key :source-field) (s/maybe (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
;;
` : temporal - unit ` is used to specify DATE BUCKETING for a Field that represents a moment in time of some sort .
;;
There is no requirement that all ` : type / Temporal ` derived Fields specify a ` : temporal - unit ` , but for legacy
;; reasons `:field` clauses that refer to `:type/DateTime` Fields will be automatically "bucketed" in the
;; `:breakout` and `:filter` clauses, but nowhere else. Auto-bucketing only applies to `:filter` clauses when
;; values for comparison are `yyyy-MM-dd` date strings. See the `auto-bucket-datetimes` middleware for more
;; details. `:field` clauses elsewhere will not be automatically bucketed, so drivers still need to make sure they
do any special datetime handling for plain ` : field ` clauses when their Field derives from ` : type / DateTime ` .
(s/optional-key :temporal-unit) (s/maybe DateTimeUnit)
;;
;; replaces `joined-field`
;;
` : join - alias ` is used to refer to a Field from a different Table / nested query that you are
;; JOINING against.
(s/optional-key :join-alias) (s/maybe helpers/NonBlankString)
;;
;; replaces `binning-strategy`
;;
;; Using binning requires the driver to support the `:binning` feature.
(s/optional-key :binning) (s/maybe FieldBinningOptions)
;;
s/Keyword s/Any}
validate-temporal-unit
no-binning-options-at-top-level))
(defn- require-base-type-for-field-name [schema]
(s/constrained
schema
(fn [[_ id-or-name {:keys [base-type]}]]
(if (string? id-or-name)
base-type
true))
":field clauses using a string field name must specify :base-type."))
(def ^{:clause-name :field, :added "0.39.0"} field
"Schema for a `:field` clause."
(-> (helpers/clause
:field
"id-or-name" (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString)
"options" (s/maybe (s/recursive #'FieldOptions)))
require-base-type-for-field-name))
(def ^{:clause-name :field, :added "0.39.0"} field:id
"Schema for a `:field` clause, with the added constraint that it must use an integer Field ID."
(s/constrained
field
(fn [[_ id-or-name]]
(integer? id-or-name))
"Must be a :field with an integer Field ID."))
(def ^{:clause-name :field, :added "0.39.0"} field:name
"Schema for a `:field` clause, with the added constraint that it must use an string Field name."
(s/constrained
field
(fn [[_ id-or-name]]
(string? id-or-name))
"Must be a :field with a string Field name."))
(def ^:private Field*
(one-of expression field))
TODO -- consider renaming this FieldOrExpression
(def Field
"Schema for either a `:field` clause (reference to a Field) or an `:expression` clause (reference to an expression)."
(s/recursive #'Field*))
;; aggregate field reference refers to an aggregation, e.g.
;;
;; {:aggregation [[:count]]
: order - by [ [: asc [: aggregation 0 ] ] ] } ; ; refers to the 0th aggregation , ` : count `
;;
Currently aggregate Field references can only be used inside order - by clauses . In the future once we support SQL
;; `HAVING` we can allow them in filter clauses too
;;
;; TODO - shouldn't we allow composing aggregations in expressions? e.g.
;;
;; {:order-by [[:asc [:+ [:aggregation 0] [:aggregation 1]]]]}
;;
;; TODO - it would be nice if we could check that there's actually an aggregation with the corresponding index,
;; wouldn't it
;;
As of 0.42.0 ` : aggregation ` references can have an optional options map .
(defclause aggregation
aggregation-clause-index s/Int
options (optional (s/pred map? "map")))
(def FieldOrAggregationReference
"Schema for any type of valid Field clause, or for an indexed reference to an aggregation clause."
(s/if (partial is-clause? :aggregation)
aggregation
Field))
;;; -------------------------------------------------- Expressions ---------------------------------------------------
;; Expressions are "calculated column" definitions, defined once and then used elsewhere in the MBQL query.
(def string-expressions
"String functions"
#{:substring :trim :rtrim :ltrim :upper :lower :replace :concat :regex-match-first :coalesce :case})
(declare StringExpression)
(def ^:private StringExpressionArg
(s/conditional
string?
s/Str
(partial is-clause? string-expressions)
(s/recursive #'StringExpression)
(partial is-clause? :value)
value
:else
Field))
TODO - rename to numeric - expressions
(def arithmetic-expressions
"Set of valid arithmetic expression clause keywords."
#{:+ :- :/ :* :coalesce :length :round :ceil :floor :abs :power :sqrt :log :exp :case :datetime-diff})
(def boolean-expressions
"Set of valid boolean expression clause keywords."
#{:and :or :not :< :<= :> :>= := :!=})
(def ^:private aggregations #{:sum :avg :stddev :var :median :percentile :min :max :cum-count :cum-sum :count-where :sum-where :share :distinct :metric :aggregation-options :count})
;; TODO: expressions that return numerics should be in arithmetic-expressions
(def temporal-extract-functions
"Functions to extract components of a date, datetime."
#{;; extraction functions (get some component of a given temporal value/column)
:temporal-extract
;; SUGAR drivers do not need to implement
:get-year :get-quarter :get-month :get-week :get-day :get-day-of-week :get-hour :get-minute :get-second})
(def date-arithmetic-functions
"Functions to do math with date, datetime."
#{:+ :datetime-add :datetime-subtract})
(def date+time+timezone-functions
"Date, time, and timezone related functions."
(set/union temporal-extract-functions date-arithmetic-functions))
(declare ArithmeticExpression)
(declare BooleanExpression)
(declare DatetimeExpression)
(declare Aggregation)
(def ^:private NumericExpressionArg
(s/conditional
number?
s/Num
(partial is-clause? arithmetic-expressions)
(s/recursive #'ArithmeticExpression)
(partial is-clause? temporal-extract-functions)
(s/recursive #'DatetimeExpression)
(partial is-clause? aggregations)
(s/recursive #'Aggregation)
(partial is-clause? :value)
value
:else
Field))
(def ^:private DateTimeExpressionArg
(s/conditional
(partial is-clause? aggregations)
(s/recursive #'Aggregation)
(partial is-clause? :value)
value
;; Recursively doing date math
(partial is-clause? date-arithmetic-functions)
(s/recursive #'DatetimeExpression)
:else
(s/cond-pre DateOrDatetimeLiteral Field)))
(def ^:private ExpressionArg
(s/conditional
number?
s/Num
boolean?
s/Bool
(partial is-clause? boolean-expressions)
(s/recursive #'BooleanExpression)
(partial is-clause? arithmetic-expressions)
(s/recursive #'ArithmeticExpression)
string?
s/Str
(partial is-clause? string-expressions)
(s/recursive #'StringExpression)
(partial is-clause? temporal-extract-functions)
(s/recursive #'DatetimeExpression)
(partial is-clause? :value)
value
:else
Field))
(def ^:private NumericExpressionArgOrInterval
(s/if (partial is-clause? :interval)
interval
NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} coalesce
a ExpressionArg, b ExpressionArg, more (rest ExpressionArg))
(defclause ^{:requires-features #{:expressions}} substring
s StringExpressionArg, start NumericExpressionArg, length (optional NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} length
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} trim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} rtrim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} ltrim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} upper
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} lower
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} replace
s StringExpressionArg, match s/Str, replacement s/Str)
(defclause ^{:requires-features #{:expressions}} concat
a StringExpressionArg, b StringExpressionArg, more (rest StringExpressionArg))
(defclause ^{:requires-features #{:expressions :regex}} regex-match-first
s StringExpressionArg, pattern s/Str)
(defclause ^{:requires-features #{:expressions}} +
x NumericExpressionArgOrInterval, y NumericExpressionArgOrInterval, more (rest NumericExpressionArgOrInterval))
(defclause ^{:requires-features #{:expressions}} -
x NumericExpressionArg, y NumericExpressionArgOrInterval, more (rest NumericExpressionArgOrInterval))
(defclause ^{:requires-features #{:expressions}} /, x NumericExpressionArg, y NumericExpressionArg, more (rest NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} *, x NumericExpressionArg, y NumericExpressionArg, more (rest NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} floor
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} ceil
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} round
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} abs
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} power
x NumericExpressionArg, y NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} sqrt
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} exp
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} log
x NumericExpressionArg)
TODO : rename to NumericExpression *
(declare ArithmeticExpression*)
TODO : rename to NumericExpression
(def ^:private ArithmeticExpression
"Schema for the definition of an arithmetic expression. All arithmetic expressions evaluate to numeric values."
(s/recursive #'ArithmeticExpression*))
;; The result is positive if x <= y, and negative otherwise.
;;
Days , weeks , months , and years are only counted if they are whole to the " day " .
For example , ` datetimeDiff("2022 - 01 - 30 " , " 2022 - 02 - 28 " , " month " ) ` returns 0 months .
;;
;; If the values are datetimes, the time doesn't matter for these units.
For example , ` datetimeDiff("2022 - 01 - 01T09:00:00 " , " 2022 - 01 - 02T08:00:00 " , " day " ) ` returns 1 day even though it is less than 24 hours .
;;
Hours , minutes , and seconds are only counted if they are whole .
For example , datetimeDiff("2022 - 01 - 01T01:00:30 " , " 2022 - 01 - 01T02:00:29 " , " hour " ) returns 0 hours .
(defclause ^{:requires-features #{:datetime-diff}} datetime-diff
datetime-x DateTimeExpressionArg
datetime-y DateTimeExpressionArg
unit DatetimeDiffUnits)
(defclause ^{:requires-features #{:temporal-extract}} temporal-extract
datetime DateTimeExpressionArg
unit TemporalExtractUnits
mode (optional ExtractWeekModes)) ;; only for get-week
;; SUGAR CLAUSE: get-year, get-month... clauses are all sugars clause that will be rewritten as [:temporal-extract column :year]
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-year
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-quarter
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-month
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-week
date DateTimeExpressionArg
mode (optional ExtractWeekModes))
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-day
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-day-of-week
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-hour
datetime DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-minute
datetime DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-second
datetime DateTimeExpressionArg)
(def ^:private ArithmeticDateTimeUnit
(s/named
(apply s/enum #{:millisecond :second :minute :hour :day :week :month :quarter :year})
"arithmetic-datetime-unit"))
(defclause ^{:requires-features #{:date-arithmetics}} datetime-add
datetime DateTimeExpressionArg
amount NumericExpressionArg
unit ArithmeticDateTimeUnit)
(defclause ^{:requires-features #{:date-arithmetics}} datetime-subtract
datetime DateTimeExpressionArg
amount NumericExpressionArg
unit ArithmeticDateTimeUnit)
(def ^:private DatetimeExpression*
(one-of + temporal-extract datetime-add datetime-subtract
;; SUGAR drivers do not need to implement
get-year get-quarter get-month get-week get-day get-day-of-week
get-hour get-minute get-second))
(def DatetimeExpression
"Schema for the definition of a date function expression."
(s/recursive #'DatetimeExpression*))
(declare StringExpression*)
(def ^:private StringExpression
"Schema for the definition of an string expression."
(s/recursive #'StringExpression*))
;;; ----------------------------------------------------- Filter -----------------------------------------------------
(declare Filter)
(defclause and
first-clause (s/recursive #'Filter)
second-clause (s/recursive #'Filter)
other-clauses (rest (s/recursive #'Filter)))
(defclause or
first-clause (s/recursive #'Filter)
second-clause (s/recursive #'Filter)
other-clauses (rest (s/recursive #'Filter)))
(defclause not, clause (s/recursive #'Filter))
(def ^:private FieldOrRelativeDatetime
(s/if (partial is-clause? :relative-datetime)
relative-datetime
Field))
(def ^:private EqualityComparable
"Schema for things things that make sense in a `=` or `!=` filter, i.e. things that can be compared for equality."
(s/maybe
(s/cond-pre
s/Bool
s/Num
s/Str
TemporalLiteral
FieldOrRelativeDatetime
ExpressionArg
value)))
(def ^:private OrderComparable
"Schema for things that make sense in a filter like `>` or `<`, i.e. things that can be sorted."
(s/if (partial is-clause? :value)
value
(s/cond-pre
s/Num
s/Str
TemporalLiteral
ExpressionArg
FieldOrRelativeDatetime)))
For all of the non - compound Filter clauses below the first arg is an implicit Field ID
;; These are SORT OF SUGARY, because extra values will automatically be converted a compound clauses. Driver
implementations only need to handle the 2 - arg forms .
;;
` = ` works like SQL ` IN ` with more than 2 args
;;
[: = [ : field 1 nil ] 2 3 ] --[DESUGAR]-- > [: or [: = [ : field 1 nil ] 2 ] [: = [ : field 1 nil ] 3 ] ]
;;
` ! = ` works like SQL ` NOT IN ` with more than 2 args
;;
[: ! = [ : field 1 nil ] 2 3 ] --[DESUGAR]-- > [: and [: ! = [ : field 1 nil ] 2 ] [: ! = [ : field 1 nil ] 3 ] ]
(defclause =, field EqualityComparable, value-or-field EqualityComparable, more-values-or-fields (rest EqualityComparable))
(defclause !=, field EqualityComparable, value-or-field EqualityComparable, more-values-or-fields (rest EqualityComparable))
(defclause <, field OrderComparable, value-or-field OrderComparable)
(defclause >, field OrderComparable, value-or-field OrderComparable)
(defclause <=, field OrderComparable, value-or-field OrderComparable)
(defclause >=, field OrderComparable, value-or-field OrderComparable)
;; :between is INCLUSIVE just like SQL !!!
(defclause between field OrderComparable, min OrderComparable, max OrderComparable)
SUGAR CLAUSE : This is automatically written as a pair of ` : between ` clauses by the ` : ` middleware .
(defclause ^:sugar inside
lat-field OrderComparable
lon-field OrderComparable
lat-max OrderComparable
lon-min OrderComparable
lat-min OrderComparable
lon-max OrderComparable)
;; SUGAR CLAUSES: These are rewritten as `[:= <field> nil]` and `[:not= <field> nil]` respectively
(defclause ^:sugar is-null, field Field)
(defclause ^:sugar not-null, field Field)
;; These are rewritten as `[:or [:= <field> nil] [:= <field> ""]]` and
;; `[:and [:not= <field> nil] [:not= <field> ""]]`
(defclause ^:sugar is-empty, field Field)
(defclause ^:sugar not-empty, field Field)
(def ^:private StringFilterOptions
{(s/optional-key :case-sensitive) s/Bool}) ; default true
(defclause starts-with, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(defclause ends-with, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(defclause contains, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
;; SUGAR: this is rewritten as [:not [:contains ...]]
(defclause ^:sugar does-not-contain
field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(def ^:private TimeIntervalOptions
Should we include partial results for the current day / month / etc ? Defaults to ` false ` ; set this to ` true ` to
;; include them.
{(s/optional-key :include-current) s/Bool}) ; default false
;; Filter subclause. Syntactic sugar for specifying a specific time interval.
;;
Return rows where datetime Field 100 's value is in the current month
;;
[: time - interval [: field 100 nil ] : current : month ]
;;
Return rows where datetime Field 100 's value is in the current month , including partial results for the
current day
;;
[: time - interval [: field 100 nil ] : current : month { : include - current true } ]
;;
;; SUGAR: This is automatically rewritten as a filter clause with a relative-datetime value
(defclause ^:sugar time-interval
field Field
n (s/cond-pre
s/Int
(s/enum :current :last :next))
unit RelativeDatetimeUnit
options (optional TimeIntervalOptions))
A segment is a special ` macro ` that saves some pre - definied filter clause , e.g. [: segment 1 ]
this gets replaced by a normal Filter clause in
;;
It can also be used for GA , which looks something like ` [: segment " gaid::-11 " ] ` . GA segments are n't actually MBQL
segments and pass - thru to GA .
(defclause ^:sugar segment, segment-id (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
(declare BooleanExpression*)
(def ^:private BooleanExpression
"Schema for the definition of an arithmetic expression."
(s/recursive #'BooleanExpression*))
(def ^:private BooleanExpression*
(one-of and or not < <= > >= = !=))
(def ^:private Filter*
(s/conditional
(partial is-clause? arithmetic-expressions) ArithmeticExpression
(partial is-clause? string-expressions) StringExpression
(partial is-clause? boolean-expressions) BooleanExpression
:else
(one-of
;; filters drivers must implement
and or not = != < > <= >= between starts-with ends-with contains
;; SUGAR filters drivers do not need to implement
does-not-contain inside is-empty not-empty is-null not-null time-interval segment)))
(def Filter
"Schema for a valid MBQL `:filter` clause."
(s/recursive #'Filter*))
(def ^:private CaseClause [(s/one Filter "pred") (s/one ExpressionArg "expr")])
(def ^:private CaseClauses [CaseClause])
(def ^:private CaseOptions
{(s/optional-key :default) ExpressionArg})
(defclause ^{:requires-features #{:basic-aggregations}} case
clauses CaseClauses, options (optional CaseOptions))
TODO : rename to NumericExpression ?
(def ^:private ArithmeticExpression*
(one-of + - / * coalesce length floor ceil round abs power sqrt exp log case datetime-diff))
(def ^:private StringExpression*
(one-of substring trim ltrim rtrim replace lower upper concat regex-match-first coalesce case))
(def FieldOrExpressionDef
"Schema for anything that is accepted as a top-level expression definition, either an arithmetic expression such as a
`:+` clause or a `:field` clause."
(s/conditional
(partial is-clause? arithmetic-expressions) ArithmeticExpression
(partial is-clause? string-expressions) StringExpression
(partial is-clause? boolean-expressions) BooleanExpression
(partial is-clause? date+time+timezone-functions) DatetimeExpression
(partial is-clause? :case) case
:else Field))
;;; -------------------------------------------------- Aggregations --------------------------------------------------
For all of the ' normal ' Aggregations below ( excluding Metrics ) fields are implicit Field IDs
;; cum-sum and cum-count are SUGAR because they're implemented in middleware. The clauses are swapped out with
;; `count` and `sum` aggregations respectively and summation is done in Clojure-land
(defclause ^{:requires-features #{:basic-aggregations}} ^:sugar count, field (optional Field))
(defclause ^{:requires-features #{:basic-aggregations}} ^:sugar cum-count, field (optional Field))
;; technically aggregations besides count can also accept expressions as args, e.g.
;;
[ [: sum [: + [: field 1 nil ] [: field 2 nil ] ] ] ]
;;
;; Which is equivalent to SQL:
;;
SUM(field_1 + field_2 )
(defclause ^{:requires-features #{:basic-aggregations}} avg, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} cum-sum, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} distinct, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} sum, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} min, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} max, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} sum-where
field-or-expression FieldOrExpressionDef, pred Filter)
(defclause ^{:requires-features #{:basic-aggregations}} count-where
pred Filter)
(defclause ^{:requires-features #{:basic-aggregations}} share
pred Filter)
(defclause ^{:requires-features #{:standard-deviation-aggregations}} stddev
field-or-expression FieldOrExpressionDef)
(declare ag:var) ;; for clj-kondo
(defclause ^{:requires-features #{:standard-deviation-aggregations}} [ag:var var]
field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:percentile-aggregations}} median
field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:percentile-aggregations}} percentile
field-or-expression FieldOrExpressionDef, percentile NumericExpressionArg)
;; Metrics are just 'macros' (placeholders for other aggregations with optional filter and breakout clauses) that get
;; expanded to other aggregations/etc. in the expand-macros middleware
;;
METRICS WITH STRING IDS , e.g. ` [: metric " ga : sessions " ] ` , are Google Analytics metrics , not Metabase metrics ! They
pass straight thru to the GA query processor .
(defclause ^:sugar metric, metric-id (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
;; the following are definitions for expression aggregations, e.g.
;;
[: + [: sum [: field 10 nil ] ] [: sum [: field 20 nil ] ] ]
(def ^:private UnnamedAggregation*
(s/if (partial is-clause? arithmetic-expressions)
ArithmeticExpression
(one-of avg cum-sum distinct stddev sum min max metric share count-where
sum-where case median percentile ag:var
;; SUGAR clauses
cum-count count)))
(def ^:private UnnamedAggregation
(s/recursive #'UnnamedAggregation*))
(def AggregationOptions
"Additional options for any aggregation clause when wrapping it in `:aggregation-options`."
{;; name to use for this aggregation in the native query instead of the default name (e.g. `count`)
(s/optional-key :name) helpers/NonBlankString
user - facing display name for this aggregation instead of the default one
(s/optional-key :display-name) helpers/NonBlankString
s/Keyword s/Any})
(defclause aggregation-options
aggregation UnnamedAggregation
options AggregationOptions)
(def Aggregation
"Schema for anything that is a valid `:aggregation` clause."
(s/if (partial is-clause? :aggregation-options)
aggregation-options
UnnamedAggregation))
;;; ---------------------------------------------------- Order-By ----------------------------------------------------
;; order-by is just a series of `[<direction> <field>]` clauses like
;;
{ : order - by [ [: asc [: field 1 nil ] ] , [: desc [: field 2 nil ] ] ] }
;;
;; Field ID is implicit in these clauses
(defclause asc, field FieldOrAggregationReference)
(defclause desc, field FieldOrAggregationReference)
(def OrderBy
"Schema for an `order-by` clause subclause."
(one-of asc desc))
;;; +----------------------------------------------------------------------------------------------------------------+
;;; | Queries |
;;; +----------------------------------------------------------------------------------------------------------------+
;;; ---------------------------------------------- Native [Inner] Query ----------------------------------------------
;; Template tags are used to specify {{placeholders}} in native queries that are replaced with some sort of value when
the query itself runs . There are four basic types of template tag for native queries :
;;
1 . Field filters , which are used like
;;
;; SELECT * FROM table WHERE {{field_filter}}
;;
These reference specific and are replaced with entire conditions , e.g. ` some_field > 1000 `
;;
2 . Raw values , which are used like
;;
SELECT * FROM table WHERE = { { x } }
;;
;; These are replaced with raw values.
;;
3 . Native query snippets , which might be used like
;;
;; SELECT * FROM ({{snippet: orders}}) source
;;
;; These are replaced with `NativeQuerySnippet`s from the application database.
;;
4 . Source query Card IDs , which are used like
;;
SELECT * FROM ( { { # 123 } } ) source
;;
;; These are replaced with the query from the Card with that ID.
;;
Field filters and raw values usually have their value specified by ` : parameters ` ( see [ [ Parameters ] ] below ) .
(def TemplateTagType
"Schema for valid values of template tag `:type`."
(s/enum :snippet :card :dimension :number :text :date))
(def ^:private TemplateTag:Common
"Things required by all template tag types."
TODO -- ` : i d ` is actually 100 % required but we have a lot of tests that do n't specify it because this constraint
;; wasn't previously enforced; we need to go in and fix those tests and make this non-optional
(s/optional-key :id) helpers/NonBlankString
:name helpers/NonBlankString
:display-name helpers/NonBlankString
s/Keyword s/Any})
;; Example:
;;
{ : i d " c2fc7310 - 44eb-4f21 - c3a0 - 63806ffb7ddd "
;; :name "snippet: select"
: display - name " Snippet : select "
;; :type :snippet
;; :snippet-name "select"
: snippet - id 1 }
(def TemplateTag:Snippet
"Schema for a native query snippet template tag."
(merge
TemplateTag:Common
{:type (s/eq :snippet)
:snippet-name helpers/NonBlankString
:snippet-id helpers/IntGreaterThanZero
database to which this belongs . Does n't always seen to be specified .
(s/optional-key :database) helpers/IntGreaterThanZero}))
;; Example:
;;
;; {:id "fc5e14d9-7d14-67af-66b2-b2a6e25afeaf"
: name " # 1635 "
: display - name " # 1635 "
;; :type :card
: card - id 1635 }
(def TemplateTag:SourceQuery
"Schema for a source query template tag."
(merge
TemplateTag:Common
{:type (s/eq :card)
:card-id helpers/IntGreaterThanZero}))
(def ^:private TemplateTag:Value:Common
"Stuff shared between the Field filter and raw value template tag schemas."
(merge
TemplateTag:Common
{;; default value for this parameter
(s/optional-key :default) s/Any
;; whether or not a value for this parameter is required in order to run the query
(s/optional-key :required) s/Bool}))
(declare ParameterType)
;; Example:
;;
{ : i d " c20851c7 - 8a80 - 0ffa-8a99 - ae636f0e9539 "
;; :name "date"
;; :display-name "Date"
;; :type :dimension,
: dimension [: field 4 nil ]
;; :widget-type :date/all-options}
(def TemplateTag:FieldFilter
"Schema for a field filter template tag."
(merge
TemplateTag:Value:Common
{:type (s/eq :dimension)
:dimension field
which type of widget the frontend should show for this Field Filter ; this also affects which parameter types
;; are allowed to be specified for it.
:widget-type (s/recursive #'ParameterType)}))
(def raw-value-template-tag-types
"Set of valid values of `:type` for raw value template tags."
#{:number :text :date :boolean})
(def TemplateTag:RawValue:Type
"Valid values of `:type` for raw value template tags."
(apply s/enum raw-value-template-tag-types))
;; Example:
;;
;; {:id "35f1ecd4-d622-6d14-54be-750c498043cb"
;; :name "id"
;; :display-name "Id"
;; :type :number
;; :required true
;; :default "1"}
(def TemplateTag:RawValue
"Schema for a raw value template tag."
(merge
TemplateTag:Value:Common
` : type ` is used be the FE to determine which type of widget to display for the template tag , and to determine
;; which types of parameters are allowed to be passed in for this template tag.
{:type TemplateTag:RawValue:Type}))
TODO -- if we were using core.spec here I would make this a multimethod - based spec instead and have it dispatch off
;; of `:type`. Then we could make it possible to add new types dynamically
(def TemplateTag
"Schema for a template tag as specified in a native query. There are four types of template tags, differentiated by
`:type` (see comments above)."
(s/conditional
#(core/= (:type %) :dimension) TemplateTag:FieldFilter
#(core/= (:type %) :snippet) TemplateTag:Snippet
#(core/= (:type %) :card) TemplateTag:SourceQuery
:else TemplateTag:RawValue))
(def TemplateTagMap
"Schema for the `:template-tags` map passed in as part of a native query."
;; map of template tag name -> template tag definition
(-> {helpers/NonBlankString TemplateTag}
;; make sure people don't try to pass in a `:name` that's different from the actual key in the map.
(s/constrained (fn [m]
(every? (fn [[tag-name tag-definition]]
(core/= tag-name (:name tag-definition)))
m))
"keys in template tag map must match the :name of their values")))
(def NativeQuery
"Schema for a valid, normalized native [inner] query."
{:query s/Any
(s/optional-key :template-tags) TemplateTagMap
;; collection (table) this query should run against. Needed for MongoDB
(s/optional-key :collection) (s/maybe helpers/NonBlankString)
other stuff gets added in my different bits of QP middleware to record bits of state or pass info around .
;; Everyone else can ignore them.
s/Keyword s/Any})
;;; ----------------------------------------------- MBQL [Inner] Query -----------------------------------------------
(declare Query MBQLQuery)
(def SourceQuery
"Schema for a valid value for a `:source-query` clause."
(s/if (every-pred map? :native)
;; when using native queries as source queries the schema is exactly the same except use `:native` in place of
;; `:query` for reasons I do not fully remember (perhaps to make it easier to differentiate them from MBQL source
;; queries).
(set/rename-keys NativeQuery {:query :native})
(s/recursive #'MBQLQuery)))
(def SourceQueryMetadata
"Schema for the expected keys for a single column in `:source-metadata` (`:source-metadata` is a sequence of these
entries), if it is passed in to the query.
This metadata automatically gets added for all source queries that are referenced via the `card__id` `:source-table`
form; for explicit `:source-query`s you should usually include this information yourself when specifying explicit
`:source-query`s."
;; TODO - there is a very similar schema in `metabase.sync.analyze.query-results`; see if we can merge them
{:name helpers/NonBlankString
:base_type helpers/FieldType
;; this is only used by the annotate post-processing stage, not really needed at all for pre-processing, might be
;; able to remove this as a requirement
:display_name helpers/NonBlankString
(s/optional-key :semantic_type) (s/maybe helpers/FieldSemanticOrRelationType)
you 'll need to provide this in order to use BINNING
(s/optional-key :fingerprint) (s/maybe helpers/Map)
s/Any s/Any})
(def source-table-card-id-regex
"Pattern that matches `card__id` strings that can be used as the `:source-table` of MBQL queries."
#"^card__[1-9]\d*$")
(def SourceTable
"Schema for a valid value for the `:source-table` clause of an MBQL query."
(s/cond-pre helpers/IntGreaterThanZero source-table-card-id-regex))
(def join-strategies
"Valid values of the `:strategy` key in a join map."
#{:left-join :right-join :inner-join :full-join})
(def JoinStrategy
"Strategy that should be used to perform the equivalent of a SQL `JOIN` against another table or a nested query.
These correspond 1:1 to features of the same name in driver features lists; e.g. you should check that the current
driver supports `:full-join` before generating a Join clause using that strategy."
(apply s/enum join-strategies))
(declare Fields)
(def Join
"Perform the equivalent of a SQL `JOIN` with another Table or nested `:source-query`. JOINs are either explicitly
specified in the incoming query, or implicitly generated when one uses a `:field` clause with `:source-field`.
In the top-level query, you can reference Fields from the joined table or nested query by including `:source-field`
in the `:field` options (known as implicit joins); for explicit joins, you *must* specify `:join-alias` yourself; in
the `:field` options, e.g.
;; for joins against other Tables/MBQL source queries
[:field 1 {:join-alias \"my_join_alias\"}]
;; for joins against native queries
[:field \"my_field\" {:base-type :field/Integer, :join-alias \"my_join_alias\"}]"
(->
{;; *What* to JOIN. Self-joins can be done by using the same `:source-table` as in the query where this is specified.
;; YOU MUST SUPPLY EITHER `:source-table` OR `:source-query`, BUT NOT BOTH!
(s/optional-key :source-table)
SourceTable
(s/optional-key :source-query)
SourceQuery
;;
;; The condition on which to JOIN. Can be anything that is a valid `:filter` clause. For automatically-generated
;; JOINs this is always
;;
;; [:= <source-table-fk-field> [:field <dest-table-pk-field> {:join-alias <join-table-alias>}]]
;;
:condition
Filter
;;
Defaults to ` : left - join ` ; used for all automatically - generated JOINs
;;
;; Driver implementations: this is guaranteed to be present after pre-processing.
(s/optional-key :strategy)
JoinStrategy
;;
The to include in the results * if * a top - level ` : fields ` clause * is not * specified . This can be either
` : none ` , ` : all ` , or a sequence of Field clauses .
;;
;; * `:none`: no Fields from the joined table or nested query are included (unless indirectly included by
;; breakouts or other clauses). This is the default, and what is used for automatically-generated joins.
;;
* ` : all ` : will include all of the Fields from the joined table or query
;;
* a sequence of Field clauses : include only the specified . Valid clauses are the same as the top - level
;; `:fields` clause. This should be non-empty and all elements should be distinct. The normalizer will
;; automatically remove duplicate fields for you, and replace empty clauses with `:none`.
;;
;; Driver implementations: you can ignore this clause. Relevant fields will be added to top-level `:fields` clause
;; with appropriate aliases.
(s/optional-key :fields)
(s/named
(s/cond-pre
(s/enum :all :none)
(s/recursive #'Fields))
"Valid Join `:fields`: `:all`, `:none`, or a sequence of `:field` clauses that have `:join-alias`.")
;;
;; The name used to alias the joined table or query. This is usually generated automatically and generally looks
;; like `table__via__field`. You can specify this yourself if you need to reference a joined field with a
;; `:join-alias` in the options.
;;
;; Driver implementations: This is guaranteed to be present after pre-processing.
(s/optional-key :alias)
helpers/NonBlankString
;;
;; Used internally, only for annotation purposes in post-processing. When a join is implicitly generated via a
;; `:field` clause with `:source-field`, the ID of the foreign key field in the source Table will
;; be recorded here. This information is used to add `fk_field_id` information to the `:cols` in the query
;; results; I believe this is used to facilitate drill-thru? :shrug:
;;
;; Don't set this information yourself. It will have no effect.
(s/optional-key :fk-field-id)
(s/maybe helpers/IntGreaterThanZero)
;;
Metadata about the source query being used , if pulled in from a Card via the ` : source - table " card__id " ` syntax .
;; added automatically by the `resolve-card-id-source-tables` middleware.
(s/optional-key :source-metadata)
(s/maybe [SourceQueryMetadata])
s/Keyword s/Any}
(s/constrained
(every-pred
(some-fn :source-table :source-query)
(complement (every-pred :source-table :source-query)))
"Joins must have either a `source-table` or `source-query`, but not both.")))
(def Joins
"Schema for a valid sequence of `Join`s. Must be a non-empty sequence, and `:alias`, if specified, must be unique."
(s/constrained
(helpers/non-empty [Join])
#(helpers/empty-or-distinct? (filter some? (map :alias %)))
"All join aliases must be unique."))
(def Fields
"Schema for valid values of the MBQL `:fields` clause."
(s/named
(helpers/distinct (helpers/non-empty [Field]))
"Distinct, non-empty sequence of Field clauses"))
(def MBQLQuery
"Schema for a valid, normalized MBQL [inner] query."
(->
{(s/optional-key :source-query) SourceQuery
(s/optional-key :source-table) SourceTable
(s/optional-key :aggregation) (helpers/non-empty [Aggregation])
(s/optional-key :breakout) (helpers/non-empty [Field])
(s/optional-key :expressions) {helpers/NonBlankString FieldOrExpressionDef}
(s/optional-key :fields) Fields
(s/optional-key :filter) Filter
(s/optional-key :limit) helpers/IntGreaterThanOrEqualToZero
(s/optional-key :order-by) (helpers/distinct (helpers/non-empty [OrderBy]))
page = page , starting with 1 . items = number of items per page .
;; e.g.
{ : page 1 , : items 10 } = items 1 - 10
{ : page 2 , : items 10 } = items 11 - 20
(s/optional-key :page) {:page helpers/IntGreaterThanZero
:items helpers/IntGreaterThanZero}
;;
;; Various bits of middleware add additonal keys, such as `fields-is-implicit?`, to record bits of state or pass
;; info to other pieces of middleware. Everyone else can ignore them.
(s/optional-key :joins) Joins
;;
;; Info about the columns of the source query. Added in automatically by middleware. This metadata is primarily
used to let power things like binning when used with Field Literals instead of normal
(s/optional-key :source-metadata) (s/maybe [SourceQueryMetadata])
;;
;; Other keys are added by middleware or frontend client for various purposes
s/Keyword s/Any}
(s/constrained
(fn [query]
(core/= 1 (core/count (select-keys query [:source-query :source-table]))))
"Query must specify either `:source-table` or `:source-query`, but not both.")
(s/constrained
(fn [{:keys [breakout fields]}]
(empty? (set/intersection (set breakout) (set fields))))
"Fields specified in `:breakout` should not be specified in `:fields`; this is implied.")))
;;; ----------------------------------------------------- Params -----------------------------------------------------
;; `:parameters` specify the *values* of parameters previously definied for a Dashboard or Card (native query template
;; tag parameters.) See [[TemplateTag]] above for more information on the later.
There are three things called ' type ' in play when we talk about parameters and template tags .
;;
Two are used when the parameters are specified / declared , in a [ [ TemplateTag ] ] or in a Dashboard parameter :
;;
1 . Dashboard parameter / template tag ` : type ` -- ` : dimension ` ( for a Field filter parameter ) ,
;; otherwise `:text`, `:number`, `:boolean`, or `:date`
;;
2 . ` : widget - type ` -- only specified for Field filter parameters ( where type is ` : dimension ` ) . This tells the FE
;; what type of widget to display, and also tells us what types of parameters we should allow. Examples:
;; `:date/all-options`, `:category`, etc.
;;
One type is used in the [ [ Parameter ] ] list ( ` : parameters ` ):
;;
3 . Parameter ` : type ` -- specifies the type of the value being passed in . e.g. ` : text ` or ` : string/!= `
;;
;; Note that some types that makes sense as widget types (e.g. `:date/all-options`) but not as actual value types are
currently still allowed for backwards - compatibility purposes -- currently the FE client will just parrot back the
;; `:widget-type` in some cases. In these cases, the backend is just supposed to infer the actual type of the
;; parameter value.
(def parameter-types
"Map of parameter-type -> info. Info is a map with the following keys:
### `:type`
The general type of this parameter. `:numeric`, `:string`, `:boolean`, or `:date`, if applicable. Some parameter
types like `:id` and `:category` don't have a particular `:type`. This is offered mostly so we can group stuff
together or determine things like whether a given parameter is a date parameter.
### `:operator`
Signifies this is one of the new 'operator' parameter types added in 0.39.0 or so. These parameters can only be used
for [[TemplateTag:FieldFilter]]s or for Dashboard parameters mapped to MBQL queries. The value of this key is the
arity for the parameter, either `:unary`, `:binary`, or `:variadic`. See
the [[metabase.driver.common.parameters.operators]] namespace for more information.
### `:allowed-for`
[[Parameter]]s with this `:type` may be supplied for [[TemplateTag]]s with these `:type`s (or `:widget-type` if
`:type` is `:dimension`) types. Example: it is ok to pass a parameter of type `:date/range` for template tag with
`:widget-type` `:date/all-options`; but it is NOT ok to pass a parameter of type `:date/range` for a template tag
with a widget type `:date`. Why? It's a potential security risk if someone creates a Card with an \"exact-match\"
Field filter like `:date` or `:text` and you pass in a parameter like `string/!=` `NOTHING_WILL_MATCH_THIS`.
Non-exact-match parameters can be abused to enumerate *all* the rows in a table when the parameter was supposed to
lock the results down to a single row or set of rows."
the basic raw - value types . These can be used with [ [ TemplateTag : RawValue ] ] template tags as well as
;; [[TemplateTag:FieldFilter]] template tags.
:number {:type :numeric, :allowed-for #{:number :number/= :id :category :series-category :location/zip_code}}
:text {:type :string, :allowed-for #{:text :string/= :id :category :series-category
:location/city :location/state :location/zip_code :location/country}}
:date {:type :date, :allowed-for #{:date :date/single :date/all-options :id :category :series-category}}
I do n't think ` : boolean ` is actually used on the FE at all .
:boolean {:type :boolean, :allowed-for #{:boolean :id :category :series-category}}
;; as far as I can tell this is basically just an alias for `:date`... I'm not sure what the difference is TBH
:date/single {:type :date, :allowed-for #{:date :date/single :date/all-options :id :category :series-category}}
;; everything else can't be used with raw value template tags -- they can only be used with Dashboard parameters
for MBQL queries or Field filters in native queries
;; `:id` and `:category` conceptually aren't types in a "the parameter value is of this type" sense, but they are
widget types . They have something to do with telling the frontend to show FieldValues list / search widgets or
;; something like that.
;;
;; Apparently the frontend might still pass in parameters with these types, in which case we're supposed to infer
the actual type of the parameter based on the Field we 're filtering on . Or something like that . Parameters with
;; these types are only allowed if the widget type matches exactly, but you can also pass in something like a
;; `:number/=` for a parameter with widget type `:category`.
;;
TODO FIXME -- actually , it turns out the the FE client passes parameter type ` : category ` for parameters in
;; public Cards. Who knows why! For now, we'll continue allowing it. But we should fix it soon. See
;; [[metabase.api.public-test/execute-public-card-with-parameters-test]]
:id {:allowed-for #{:id}}
:category {:allowed-for #{:category #_FIXME :number :text :date :boolean}}
:series-category {:allowed-for #{:series-category #_FIXME :number :text :date :boolean}}
;; Like `:id` and `:category`, the `:location/*` types are primarily widget types. They don't really have a meaning
as a parameter type , so in an ideal world they would n't be allowed ; however it seems like the FE still passed
;; these in as parameter type on occasion anyway. In this case the backend is just supposed to infer the actual
;; type -- which should be `:text` and, in the case of ZIP code, possibly `:number`.
;;
;; As with `:id` and `:category`, it would be preferable to just pass in a parameter with type `:text` or `:number`
;; for these widget types, but for compatibility we'll allow them to continue to be used as parameter types for the
;; time being. We'll only allow that if the widget type matches exactly, however.
:location/city {:allowed-for #{:location/city}}
:location/state {:allowed-for #{:location/state}}
:location/zip_code {:allowed-for #{:location/zip_code}}
:location/country {:allowed-for #{:location/country}}
;; date range types -- these match a range of dates
:date/range {:type :date, :allowed-for #{:date/range :date/all-options :date/series-date}}
:date/month-year {:type :date, :allowed-for #{:date/month-year :date/all-options :date/series-date}}
:date/quarter-year {:type :date, :allowed-for #{:date/quarter-year :date/all-options :date/series-date}}
:date/relative {:type :date, :allowed-for #{:date/relative :date/all-options :date/series-date}}
;; Like `:id` and `:category` above, `:date/all-options` is primarily a widget type. It means that we should allow
;; any date option above.
:date/all-options {:type :date, :allowed-for #{:date/all-options}}
:date/series-date {:type :date, :allowed-for #{:date/series-date}}
;; "operator" parameter types.
:number/!= {:type :numeric, :operator :variadic, :allowed-for #{:number/!=}}
:number/<= {:type :numeric, :operator :unary, :allowed-for #{:number/<=}}
:number/= {:type :numeric, :operator :variadic, :allowed-for #{:number/= :number :id :category :series-category
:location/zip_code}}
:number/>= {:type :numeric, :operator :unary, :allowed-for #{:number/>=}}
:number/between {:type :numeric, :operator :binary, :allowed-for #{:number/between}}
:string/!= {:type :string, :operator :variadic, :allowed-for #{:string/!=}}
:string/= {:type :string, :operator :variadic, :allowed-for #{:string/= :text :id :category :series-category
:location/city :location/state
:location/zip_code :location/country}}
:string/contains {:type :string, :operator :unary, :allowed-for #{:string/contains}}
:string/does-not-contain {:type :string, :operator :unary, :allowed-for #{:string/does-not-contain}}
:string/ends-with {:type :string, :operator :unary, :allowed-for #{:string/ends-with}}
:string/starts-with {:type :string, :operator :unary, :allowed-for #{:string/starts-with}}})
(defn valid-parameter-type?
"Whether `param-type` is a valid non-abstract parameter type."
[param-type]
(get parameter-types param-type))
(def ParameterType
"Schema for valid values of `:type` for a [[Parameter]]."
(apply s/enum (keys parameter-types)))
;; the next few clauses are used for parameter `:target`... this maps the parameter to an actual template tag in a
;; native query or Field for MBQL queries.
;;
;; examples:
;;
{ : target [: dimension [: template - tag " my_tag " ] ] }
;; {:target [:dimension [:template-tag {:id "my_tag_id"}]]}
;; {:target [:variable [:template-tag "another_tag"]]}
;; {:target [:variable [:template-tag {:id "another_tag_id"}]]}
{ : target [: dimension [: field 100 nil ] ] }
{ : target [: field 100 nil ] }
;;
I 'm not 100 % clear on which situations we 'll get which version . But I think the following is generally true :
;;
* Things are wrapped in ` : dimension ` when we 're dealing with Field filter template tags
;; * Raw value template tags wrap things in `:variable` instead
;; * Dashboard parameters are passed in with plain Field clause targets.
;;
One more thing to note : apparently ` : expression ` ... is allowed below as well . I 'm not sure how this is actually
supposed to work , but we have test # 18747 that attempts to set it . I 'm not convinced this should actually be
;; allowed.
;; this is the reference like [:template-tag <whatever>], not the [[TemplateTag]] schema for when it's declared in
;; `:template-tags`
(defclause template-tag
tag-name
(s/cond-pre helpers/NonBlankString
{:id helpers/NonBlankString}))
(defclause dimension
target (s/cond-pre Field template-tag))
(defclause variable
target template-tag)
(def ParameterTarget
"Schema for the value of `:target` in a [[Parameter]]."
not 100 % sure about this but ` field ` on its own comes from a Dashboard parameter and when it 's wrapped in
` dimension ` it comes from a Field filter template tag parameter ( do n't quote me on this -- working theory )
(s/cond-pre
Field
(one-of dimension variable)))
(def Parameter
"Schema for the *value* of a parameter (e.g. a Dashboard parameter or a native query template tag) as passed in as
part of the `:parameters` list in a query."
{:type ParameterType
TODO -- these definitely SHOULD NOT be optional but a ton of tests are n't passing them in like they should be .
;; At some point we need to go fix those tests and then make these keys required
(s/optional-key :id) helpers/NonBlankString
(s/optional-key :target) ParameterTarget
;; not specified if the param has no value. TODO - make this stricter; type of `:value` should be validated based
on the [ [ ParameterType ] ]
(s/optional-key :value) s/Any
;; the name of the parameter we're trying to set -- this is actually required now I think, or at least needs to get
;; merged in appropriately
(s/optional-key :name) helpers/NonBlankString
;; The following are not used by the code in this namespace but may or may not be specified depending on what the
;; code that constructs the query params is doing. We can go ahead and ignore these when present.
(s/optional-key :slug) helpers/NonBlankString
(s/optional-key :default) s/Any
;; various other keys are used internally by the frontend
s/Keyword s/Any})
(def ParameterList
"Schema for a list of `:parameters` as passed in to a query."
[Parameter]
#_(->
TODO -- disabled for now since it breaks tests . Also , I 'm not sure whether these should be distinct by
;; `:name` or `:id`... at any rate, neither is currently required.
;;
(s/constrained (fn [parameters]
(apply distinct? (map :id parameters)))
"Cannot specify parameter more than once; IDs must be distinct")))
;;; ---------------------------------------------------- Options -----------------------------------------------------
(def ^:private Settings
"Options that tweak the behavior of the query processor."
;; The timezone the query should be ran in, overriding the default report timezone for the instance.
{(s/optional-key :report-timezone) helpers/NonBlankString
;; other Settings might be used somewhere, but I don't know about them. Add them if you come across them for
;; documentation purposes
s/Keyword s/Any})
(def ^:private Constraints
"Additional constraints added to a query limiting the maximum number of rows that can be returned. Mostly useful
because native queries don't support the MBQL `:limit` clause. For MBQL queries, if `:limit` is set, it will
override these values."
(s/constrained
{ ;; maximum number of results to allow for a query with aggregations. If `max-results-bare-rows` is unset, this
;; applies to all queries
(s/optional-key :max-results) helpers/IntGreaterThanOrEqualToZero
;; maximum number of results to allow for a query with no aggregations.
;; If set, this should be LOWER than `:max-results`
(s/optional-key :max-results-bare-rows) helpers/IntGreaterThanOrEqualToZero
;; other Constraints might be used somewhere, but I don't know about them. Add them if you come across them for
;; documentation purposes
s/Keyword s/Any}
(fn [{:keys [max-results max-results-bare-rows]}]
(if-not (core/and max-results max-results-bare-rows)
true
(core/>= max-results max-results-bare-rows)))
"max-results-bare-rows must be less or equal to than max-results"))
(def ^:private MiddlewareOptions
"Additional options that can be used to toggle middleware on or off."
{;; should we skip adding results_metadata to query results after running the query? Used by
;; `metabase.query-processor.middleware.results-metadata`; default `false`
(s/optional-key :skip-results-metadata?)
s/Bool
should we skip converting datetime types to ISO-8601 strings with appropriate timezone when post - processing
;; results? Used by `metabase.query-processor.middleware.format-rows`; default `false`
(s/optional-key :format-rows?)
s/Bool
disable the MBQL->native middleware . If you do this , the query will not work at all , so there are no cases where
;; you should set this yourself. This is only used by the [[metabase.query-processor/preprocess]] function to get
;; the fully pre-processed query without attempting to convert it to native.
(s/optional-key :disable-mbql->native?)
s/Bool
;; Disable applying a default limit on the query results. Handled in the `add-default-limit` middleware.
;; If true, this will override the `:max-results` and `:max-results-bare-rows` values in [[Constraints]].
(s/optional-key :disable-max-results?)
s/Bool
Userland queries are ones ran as a result of an API call , Pulse , or the like . Special handling is done in the
;; `process-userland-query` middleware for such queries -- results are returned in a slightly different format, and
;; QueryExecution entries are normally saved, unless you pass `:no-save` as the option.
(s/optional-key :userland-query?)
(s/maybe s/Bool)
;; Whether to add some default `max-results` and `max-results-bare-rows` constraints. By default, none are added,
;; although the functions that ultimately power most API endpoints tend to set this to `true`. See
;; `add-constraints` middleware for more details.
(s/optional-key :add-default-userland-constraints?)
(s/maybe s/Bool)
;; Whether to process a question's visualization settings and include them in the result metadata so that they can
;; incorporated into an export. Used by `metabase.query-processor.middleware.visualization-settings`; default `false`.
(s/optional-key :process-viz-settings?)
(s/maybe s/Bool)
;; other middleware options might be used somewhere, but I don't know about them. Add them if you come across them
;; for documentation purposes
s/Keyword
s/Any})
;;; ------------------------------------------------------ Info ------------------------------------------------------
;; This stuff is used for informational purposes, primarily to record QueryExecution entries when a query is ran. Pass
;; them along if applicable when writing code that creates queries, but when working on middleware and the like you
;; can most likely ignore this stuff entirely.
(def Context
"Schema for `info.context`; used for informational purposes to record how a query was executed."
(s/enum :ad-hoc
:collection
:csv-download
:dashboard
:embedded-dashboard
:embedded-question
:json-download
:map-tiles
:public-dashboard
:public-question
:pulse
:question
:xlsx-download))
TODO - this schema is somewhat misleading because if you use a function like
;; `qp/process-query-and-save-with-max-results-constraints!` some of these keys (e.g. `:context`) are in fact required
(def Info
"Schema for query `:info` dictionary, which is used for informational purposes to record information about how a query
was executed in QueryExecution and other places. It is considered bad form for middleware to change its behavior
based on this information, don't do it!"
{;; These keys are nice to pass in if you're running queries on the backend and you know these values. They aren't
;; used for permissions checking or anything like that so don't try to be sneaky
(s/optional-key :context) (s/maybe Context)
(s/optional-key :executed-by) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :card-id) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :card-name) (s/maybe helpers/NonBlankString)
(s/optional-key :dashboard-id) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :pulse-id) (s/maybe helpers/IntGreaterThanZero)
;; Metadata for datasets when querying the dataset. This ensures that user edits to dataset metadata are blended in
;; with runtime computed metadata so that edits are saved.
(s/optional-key :metadata/dataset-metadata) (s/maybe [{s/Any s/Any}])
;; `:hash` gets added automatically by `process-query-and-save-execution!`, so don't try passing
;; these in yourself. In fact, I would like this a lot better if we could take these keys out of `:info` entirely
and have the code that saves QueryExceutions figure out their values when it goes to save them
(s/optional-key :query-hash) (s/maybe #?(:clj (Class/forName "[B")
:cljs s/Any))})
;;; --------------------------------------------- Metabase [Outer] Query ---------------------------------------------
(def ^Integer saved-questions-virtual-database-id
"The ID used to signify that a database is 'virtual' rather than physical.
A fake integer ID is used so as to minimize the number of changes that need to be made on the frontend -- by using
something that would otherwise be a legal ID, *nothing* need change there, and the frontend can query against this
'database' none the wiser. (This integer ID is negative which means it will never conflict with a *real* database
ID.)
This ID acts as a sort of flag. The relevant places in the middleware can check whether the DB we're querying is
this 'virtual' database and take the appropriate actions."
-1337)
To the reader : yes , this seems sort of hacky , but one of the goals of the Nested Query Initiative ™ was to minimize
;; if not completely eliminate any changes to the frontend. After experimenting with several possible ways to do this
;; implementation seemed simplest and best met the goal. Luckily this is the only place this "magic number" is defined
;; and the entire frontend can remain blissfully unaware of its value.
(def DatabaseID
"Schema for a valid `:database` ID, in the top-level 'outer' query. Either a positive integer (referring to an
actual Database), or the saved questions virtual ID, which is a placeholder used for queries using the
`:source-table \"card__id\"` shorthand for a source query resolved by middleware (since clients might not know the
actual DB for that source query.)"
(s/cond-pre (s/eq saved-questions-virtual-database-id) helpers/IntGreaterThanZero))
(def Query
"Schema for an [outer] query, e.g. the sort of thing you'd pass to the query processor or save in
`Card.dataset_query`."
(->
{:database DatabaseID
Type of query . ` : query ` = MBQL ; ` : native ` = native . TODO - consider normalizing ` : query ` to ` : `
:type (s/enum :query :native)
(s/optional-key :native) NativeQuery
(s/optional-key :query) MBQLQuery
(s/optional-key :parameters) ParameterList
;;
;; OPTIONS
;;
;; These keys are used to tweak behavior of the Query Processor.
;; TODO - can we combine these all into a single `:options` map?
;;
(s/optional-key :settings) (s/maybe Settings)
(s/optional-key :constraints) (s/maybe Constraints)
(s/optional-key :middleware) (s/maybe MiddlewareOptions)
;;
;; INFO
;;
Used when recording info about this run in the QueryExecution log ; things like context query was ran in and
;; User who ran it
(s/optional-key :info) (s/maybe Info)
;;
Other various keys get stuck in the query dictionary at some point or another by various pieces of QP
;; middleware to record bits of state. Everyone else can ignore them.
s/Keyword s/Any}
;;
;; CONSTRAINTS
;;
;; Make sure we have the combo of query `:type` and `:native`/`:query`
(s/constrained
(every-pred
(some-fn :native :query)
(complement (every-pred :native :query)))
"Query must specify either `:native` or `:query`, but not both.")
(s/constrained
(fn [{native :native, mbql :query, query-type :type}]
(core/case query-type
:native native
:query mbql))
"Native queries must specify `:native`; MBQL queries must specify `:query`.")
;;
;; `:source-metadata` is added to queries when `card__id` source queries are resolved. It contains info about the
;; columns in the source query.
;;
Where this is added was changed in Metabase 0.33.0 -- previously , when ` card__id ` source queries were resolved ,
;; the middleware would add `:source-metadata` to the top-level; to support joins against source queries, this has
;; been changed so it is always added at the same level the resolved `:source-query` is added.
;;
;; This should automatically be fixed by `normalize`; if we encounter it, it means some middleware is not
;; functioning properly
(s/constrained
(complement :source-metadata)
"`:source-metadata` should be added in the same level as `:source-query` (i.e., the 'inner' MBQL query.)")))
;;; --------------------------------------------------- Validators ---------------------------------------------------
(def ^{:arglists '([query])} validate-query
"Compiled schema validator for an [outer] Metabase query. (Pre-compling a validator is more efficient; use this
instead of calling `(s/validate Query query)` or similar."
(s/validator Query))
| null | https://raw.githubusercontent.com/footprintanalytics/footprint-web/d3090d943dd9fcea493c236f79e7ef8a36ae17fc/shared/src/metabase/mbql/schema.cljc | clojure | A NOTE ABOUT METADATA:
Clauses below are marked with the following tags for documentation purposes:
* Clauses marked `^:sugar` are syntactic sugar primarily intended to make generating queries easier on the
frontend. These clauses are automatically rewritten as simpler clauses by the `desugar` or `expand-macros`
middleware. Thus driver implementations do not need to handle these clauses.
* Clauses marked `^:internal` are automatically generated by `wrap-value-literals` or other middleware from values
passed in. They are not intended to be used by the frontend when generating a query. These add certain
information that simplify driver implementations. When writing MBQL queries yourself you should pretend these
clauses don't exist.
* Clauses marked `^{:requires-features #{feature+}}` require a certain set of features to be used. At some date in
the future we will likely add middleware that uses this metadata to automatically validate that a driver has the
features needed to run the query in question.
+----------------------------------------------------------------------------------------------------------------+
| MBQL Clauses |
+----------------------------------------------------------------------------------------------------------------+
------------------------------------------------- Datetime Stuff -------------------------------------------------
it could make sense to say hour-of-day(field) = hour-of-day("2018-10-10T12:00")
TODO - `unit` is not allowed if `n` is `current`
mostly to convenience driver implementations. You don't need to use this form directly when writing MBQL; datetime
literal strings are preferred instead.
example:
becomes:
almost exactly the same as `absolute-datetime`, but generated in some sitations where the literal in question was
time-bucketing unit
TODO - should we have a separate `date` type as well
middleware so drivers don't need to deal with these directly. You only need to worry about handling
`absolute-datetime` clauses.
middleware so drivers don't need to deal with these directly. You only need to worry about handling
`time` clauses.
-------------------------------------------------- Other Values --------------------------------------------------
Arguments to filter clauses are automatically replaced with [:value <value> <type-info>] clauses by the
`wrap-value-literals` middleware. This is done to make it easier to implement query processors, because most driver
implementations dispatch off of Object type, which is often not enough to make informed decisions about how to
treat certain objects. For example, a string compared against a Postgres UUID Field needs to be parsed into a UUID
----------------------------------------------------- Fields -----------------------------------------------------
Expression *references* refer to a something in the `:expressions` clause, e.g. something like
specific error messages
replaces `fk->`
source table.
If both `:source-field` and `:join-alias` are supplied, `:join-alias` should be used to perform the join;
`:source-field` should be for information purposes only.
reasons `:field` clauses that refer to `:type/DateTime` Fields will be automatically "bucketed" in the
`:breakout` and `:filter` clauses, but nowhere else. Auto-bucketing only applies to `:filter` clauses when
values for comparison are `yyyy-MM-dd` date strings. See the `auto-bucket-datetimes` middleware for more
details. `:field` clauses elsewhere will not be automatically bucketed, so drivers still need to make sure they
replaces `joined-field`
JOINING against.
replaces `binning-strategy`
Using binning requires the driver to support the `:binning` feature.
aggregate field reference refers to an aggregation, e.g.
{:aggregation [[:count]]
; refers to the 0th aggregation , ` : count `
`HAVING` we can allow them in filter clauses too
TODO - shouldn't we allow composing aggregations in expressions? e.g.
{:order-by [[:asc [:+ [:aggregation 0] [:aggregation 1]]]]}
TODO - it would be nice if we could check that there's actually an aggregation with the corresponding index,
wouldn't it
-------------------------------------------------- Expressions ---------------------------------------------------
Expressions are "calculated column" definitions, defined once and then used elsewhere in the MBQL query.
TODO: expressions that return numerics should be in arithmetic-expressions
extraction functions (get some component of a given temporal value/column)
SUGAR drivers do not need to implement
Recursively doing date math
The result is positive if x <= y, and negative otherwise.
If the values are datetimes, the time doesn't matter for these units.
only for get-week
SUGAR CLAUSE: get-year, get-month... clauses are all sugars clause that will be rewritten as [:temporal-extract column :year]
SUGAR drivers do not need to implement
----------------------------------------------------- Filter -----------------------------------------------------
These are SORT OF SUGARY, because extra values will automatically be converted a compound clauses. Driver
:between is INCLUSIVE just like SQL !!!
SUGAR CLAUSES: These are rewritten as `[:= <field> nil]` and `[:not= <field> nil]` respectively
These are rewritten as `[:or [:= <field> nil] [:= <field> ""]]` and
`[:and [:not= <field> nil] [:not= <field> ""]]`
default true
SUGAR: this is rewritten as [:not [:contains ...]]
set this to ` true ` to
include them.
default false
Filter subclause. Syntactic sugar for specifying a specific time interval.
SUGAR: This is automatically rewritten as a filter clause with a relative-datetime value
filters drivers must implement
SUGAR filters drivers do not need to implement
-------------------------------------------------- Aggregations --------------------------------------------------
cum-sum and cum-count are SUGAR because they're implemented in middleware. The clauses are swapped out with
`count` and `sum` aggregations respectively and summation is done in Clojure-land
technically aggregations besides count can also accept expressions as args, e.g.
Which is equivalent to SQL:
for clj-kondo
Metrics are just 'macros' (placeholders for other aggregations with optional filter and breakout clauses) that get
expanded to other aggregations/etc. in the expand-macros middleware
the following are definitions for expression aggregations, e.g.
SUGAR clauses
name to use for this aggregation in the native query instead of the default name (e.g. `count`)
---------------------------------------------------- Order-By ----------------------------------------------------
order-by is just a series of `[<direction> <field>]` clauses like
Field ID is implicit in these clauses
+----------------------------------------------------------------------------------------------------------------+
| Queries |
+----------------------------------------------------------------------------------------------------------------+
---------------------------------------------- Native [Inner] Query ----------------------------------------------
Template tags are used to specify {{placeholders}} in native queries that are replaced with some sort of value when
SELECT * FROM table WHERE {{field_filter}}
These are replaced with raw values.
SELECT * FROM ({{snippet: orders}}) source
These are replaced with `NativeQuerySnippet`s from the application database.
These are replaced with the query from the Card with that ID.
wasn't previously enforced; we need to go in and fix those tests and make this non-optional
Example:
:name "snippet: select"
:type :snippet
:snippet-name "select"
Example:
{:id "fc5e14d9-7d14-67af-66b2-b2a6e25afeaf"
:type :card
default value for this parameter
whether or not a value for this parameter is required in order to run the query
Example:
:name "date"
:display-name "Date"
:type :dimension,
:widget-type :date/all-options}
this also affects which parameter types
are allowed to be specified for it.
Example:
{:id "35f1ecd4-d622-6d14-54be-750c498043cb"
:name "id"
:display-name "Id"
:type :number
:required true
:default "1"}
which types of parameters are allowed to be passed in for this template tag.
of `:type`. Then we could make it possible to add new types dynamically
map of template tag name -> template tag definition
make sure people don't try to pass in a `:name` that's different from the actual key in the map.
collection (table) this query should run against. Needed for MongoDB
Everyone else can ignore them.
----------------------------------------------- MBQL [Inner] Query -----------------------------------------------
when using native queries as source queries the schema is exactly the same except use `:native` in place of
`:query` for reasons I do not fully remember (perhaps to make it easier to differentiate them from MBQL source
queries).
for explicit `:source-query`s you should usually include this information yourself when specifying explicit
TODO - there is a very similar schema in `metabase.sync.analyze.query-results`; see if we can merge them
this is only used by the annotate post-processing stage, not really needed at all for pre-processing, might be
able to remove this as a requirement
e.g. you should check that the current
for explicit joins, you *must* specify `:join-alias` yourself; in
for joins against other Tables/MBQL source queries
for joins against native queries
*What* to JOIN. Self-joins can be done by using the same `:source-table` as in the query where this is specified.
YOU MUST SUPPLY EITHER `:source-table` OR `:source-query`, BUT NOT BOTH!
The condition on which to JOIN. Can be anything that is a valid `:filter` clause. For automatically-generated
JOINs this is always
[:= <source-table-fk-field> [:field <dest-table-pk-field> {:join-alias <join-table-alias>}]]
used for all automatically - generated JOINs
Driver implementations: this is guaranteed to be present after pre-processing.
* `:none`: no Fields from the joined table or nested query are included (unless indirectly included by
breakouts or other clauses). This is the default, and what is used for automatically-generated joins.
`:fields` clause. This should be non-empty and all elements should be distinct. The normalizer will
automatically remove duplicate fields for you, and replace empty clauses with `:none`.
Driver implementations: you can ignore this clause. Relevant fields will be added to top-level `:fields` clause
with appropriate aliases.
The name used to alias the joined table or query. This is usually generated automatically and generally looks
like `table__via__field`. You can specify this yourself if you need to reference a joined field with a
`:join-alias` in the options.
Driver implementations: This is guaranteed to be present after pre-processing.
Used internally, only for annotation purposes in post-processing. When a join is implicitly generated via a
`:field` clause with `:source-field`, the ID of the foreign key field in the source Table will
be recorded here. This information is used to add `fk_field_id` information to the `:cols` in the query
results; I believe this is used to facilitate drill-thru? :shrug:
Don't set this information yourself. It will have no effect.
added automatically by the `resolve-card-id-source-tables` middleware.
e.g.
Various bits of middleware add additonal keys, such as `fields-is-implicit?`, to record bits of state or pass
info to other pieces of middleware. Everyone else can ignore them.
Info about the columns of the source query. Added in automatically by middleware. This metadata is primarily
Other keys are added by middleware or frontend client for various purposes
----------------------------------------------------- Params -----------------------------------------------------
`:parameters` specify the *values* of parameters previously definied for a Dashboard or Card (native query template
tag parameters.) See [[TemplateTag]] above for more information on the later.
otherwise `:text`, `:number`, `:boolean`, or `:date`
what type of widget to display, and also tells us what types of parameters we should allow. Examples:
`:date/all-options`, `:category`, etc.
Note that some types that makes sense as widget types (e.g. `:date/all-options`) but not as actual value types are
`:widget-type` in some cases. In these cases, the backend is just supposed to infer the actual type of the
parameter value.
but it is NOT ok to pass a parameter of type `:date/range` for a template tag
[[TemplateTag:FieldFilter]] template tags.
as far as I can tell this is basically just an alias for `:date`... I'm not sure what the difference is TBH
everything else can't be used with raw value template tags -- they can only be used with Dashboard parameters
`:id` and `:category` conceptually aren't types in a "the parameter value is of this type" sense, but they are
something like that.
Apparently the frontend might still pass in parameters with these types, in which case we're supposed to infer
these types are only allowed if the widget type matches exactly, but you can also pass in something like a
`:number/=` for a parameter with widget type `:category`.
public Cards. Who knows why! For now, we'll continue allowing it. But we should fix it soon. See
[[metabase.api.public-test/execute-public-card-with-parameters-test]]
Like `:id` and `:category`, the `:location/*` types are primarily widget types. They don't really have a meaning
however it seems like the FE still passed
these in as parameter type on occasion anyway. In this case the backend is just supposed to infer the actual
type -- which should be `:text` and, in the case of ZIP code, possibly `:number`.
As with `:id` and `:category`, it would be preferable to just pass in a parameter with type `:text` or `:number`
for these widget types, but for compatibility we'll allow them to continue to be used as parameter types for the
time being. We'll only allow that if the widget type matches exactly, however.
date range types -- these match a range of dates
Like `:id` and `:category` above, `:date/all-options` is primarily a widget type. It means that we should allow
any date option above.
"operator" parameter types.
the next few clauses are used for parameter `:target`... this maps the parameter to an actual template tag in a
native query or Field for MBQL queries.
examples:
{:target [:dimension [:template-tag {:id "my_tag_id"}]]}
{:target [:variable [:template-tag "another_tag"]]}
{:target [:variable [:template-tag {:id "another_tag_id"}]]}
* Raw value template tags wrap things in `:variable` instead
* Dashboard parameters are passed in with plain Field clause targets.
allowed.
this is the reference like [:template-tag <whatever>], not the [[TemplateTag]] schema for when it's declared in
`:template-tags`
At some point we need to go fix those tests and then make these keys required
not specified if the param has no value. TODO - make this stricter; type of `:value` should be validated based
the name of the parameter we're trying to set -- this is actually required now I think, or at least needs to get
merged in appropriately
The following are not used by the code in this namespace but may or may not be specified depending on what the
code that constructs the query params is doing. We can go ahead and ignore these when present.
various other keys are used internally by the frontend
`:name` or `:id`... at any rate, neither is currently required.
---------------------------------------------------- Options -----------------------------------------------------
The timezone the query should be ran in, overriding the default report timezone for the instance.
other Settings might be used somewhere, but I don't know about them. Add them if you come across them for
documentation purposes
maximum number of results to allow for a query with aggregations. If `max-results-bare-rows` is unset, this
applies to all queries
maximum number of results to allow for a query with no aggregations.
If set, this should be LOWER than `:max-results`
other Constraints might be used somewhere, but I don't know about them. Add them if you come across them for
documentation purposes
should we skip adding results_metadata to query results after running the query? Used by
`metabase.query-processor.middleware.results-metadata`; default `false`
results? Used by `metabase.query-processor.middleware.format-rows`; default `false`
you should set this yourself. This is only used by the [[metabase.query-processor/preprocess]] function to get
the fully pre-processed query without attempting to convert it to native.
Disable applying a default limit on the query results. Handled in the `add-default-limit` middleware.
If true, this will override the `:max-results` and `:max-results-bare-rows` values in [[Constraints]].
`process-userland-query` middleware for such queries -- results are returned in a slightly different format, and
QueryExecution entries are normally saved, unless you pass `:no-save` as the option.
Whether to add some default `max-results` and `max-results-bare-rows` constraints. By default, none are added,
although the functions that ultimately power most API endpoints tend to set this to `true`. See
`add-constraints` middleware for more details.
Whether to process a question's visualization settings and include them in the result metadata so that they can
incorporated into an export. Used by `metabase.query-processor.middleware.visualization-settings`; default `false`.
other middleware options might be used somewhere, but I don't know about them. Add them if you come across them
for documentation purposes
------------------------------------------------------ Info ------------------------------------------------------
This stuff is used for informational purposes, primarily to record QueryExecution entries when a query is ran. Pass
them along if applicable when writing code that creates queries, but when working on middleware and the like you
can most likely ignore this stuff entirely.
`qp/process-query-and-save-with-max-results-constraints!` some of these keys (e.g. `:context`) are in fact required
These keys are nice to pass in if you're running queries on the backend and you know these values. They aren't
used for permissions checking or anything like that so don't try to be sneaky
Metadata for datasets when querying the dataset. This ensures that user edits to dataset metadata are blended in
with runtime computed metadata so that edits are saved.
`:hash` gets added automatically by `process-query-and-save-execution!`, so don't try passing
these in yourself. In fact, I would like this a lot better if we could take these keys out of `:info` entirely
--------------------------------------------- Metabase [Outer] Query ---------------------------------------------
if not completely eliminate any changes to the frontend. After experimenting with several possible ways to do this
implementation seemed simplest and best met the goal. Luckily this is the only place this "magic number" is defined
and the entire frontend can remain blissfully unaware of its value.
` : native ` = native . TODO - consider normalizing ` : query ` to ` : `
OPTIONS
These keys are used to tweak behavior of the Query Processor.
TODO - can we combine these all into a single `:options` map?
INFO
things like context query was ran in and
User who ran it
middleware to record bits of state. Everyone else can ignore them.
CONSTRAINTS
Make sure we have the combo of query `:type` and `:native`/`:query`
`:source-metadata` is added to queries when `card__id` source queries are resolved. It contains info about the
columns in the source query.
the middleware would add `:source-metadata` to the top-level; to support joins against source queries, this has
been changed so it is always added at the same level the resolved `:source-query` is added.
This should automatically be fixed by `normalize`; if we encounter it, it means some middleware is not
functioning properly
--------------------------------------------------- Validators ---------------------------------------------------
use this | (ns metabase.mbql.schema
"Schema for validating a *normalized* MBQL query. This is also the definitive grammar for MBQL, wow!"
(:refer-clojure :exclude [count distinct min max + - / * and or not not-empty = < > <= >= time case concat replace abs])
#?@
(:clj
[(:require
[clojure.core :as core]
[clojure.set :as set]
[metabase.mbql.schema.helpers :as helpers :refer [is-clause?]]
[metabase.mbql.schema.macros :refer [defclause one-of]]
[schema.core :as s])
(:import java.time.format.DateTimeFormatter)]
:cljs
[(:require
[clojure.core :as core]
[clojure.set :as set]
[metabase.mbql.schema.helpers :as helpers :refer [is-clause?]]
[metabase.mbql.schema.macros :refer [defclause one-of]]
[schema.core :as s])]))
` : day - of - week ` depends on the [ [ metabase.public - settings / start - of - week ] ] Setting , by default Sunday .
1 = first day of the week ( e.g. Sunday )
7 = last day of the week ( e.g. Saturday )
(def date-bucketing-units
"Set of valid units for bucketing or comparing against a *date* Field."
#{:default :day :day-of-week :day-of-month :day-of-year :week :week-of-year
:month :month-of-year :quarter :quarter-of-year :year})
(def time-bucketing-units
"Set of valid units for bucketing or comparing against a *time* Field."
#{:default :millisecond :second :minute :minute-of-hour :hour :hour-of-day})
(def datetime-bucketing-units
"Set of valid units for bucketing or comparing against a *datetime* Field."
(set/union date-bucketing-units time-bucketing-units))
(def DateUnit
"Valid unit for *date* bucketing."
(s/named
(apply s/enum date-bucketing-units)
"date-bucketing-unit"))
but it does not make sense to say month - of - year(field ) = month - of - year("08:00:00 " ) ,
does it ? So we 'll restrict the set of units a TimeValue can have to ones that have no notion of day / date .
(def TimeUnit
"Valid unit for *time* bucketing."
(s/named
(apply s/enum time-bucketing-units)
"time-bucketing-unit"))
(def DateTimeUnit
"Valid unit for *datetime* bucketing."
(s/named
(apply s/enum datetime-bucketing-units)
"datetime-bucketing-unit"))
(def TemporalExtractUnits
"Valid units to extract from a temporal."
(s/named
(apply s/enum #{:year-of-era
:quarter-of-year
:month-of-year
:week-of-year-iso
:week-of-year-us
:week-of-year-instance
:day-of-month
:day-of-week
:hour-of-day
:minute-of-hour
:second-of-minute})
"temporal-extract-units"))
(def DatetimeDiffUnits
"Valid units for a datetime-diff clause."
(s/named
(apply s/enum #{:second :minute :hour :day :week :month :year})
"datetime-diff-units"))
(def ExtractWeekModes
"Valid modes to extract weeks."
(s/named
(apply s/enum #{:iso :us :instance})
"extract-week-modes"))
(def ^:private RelativeDatetimeUnit
(s/named
(apply s/enum #{:default :minute :hour :day :week :month :quarter :year})
"relative-datetime-unit"))
#?(:clj
(defn- can-parse-iso-8601?
[^DateTimeFormatter formatter ^String s]
(when (string? s)
(try
(.parse formatter s)
true
(catch Throwable _
false))))
:cljs
(defn- can-parse-iso-8601?
[s]
(when (string? s)
(not= (.parse js/Date s) ##NaN))))
TODO -- currently these are all the same between date / time / datetime
(def ^{:arglists '([s])} can-parse-date?
"Returns whether a string can be parsed to an ISO 8601 date or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_DATE)
:cljs can-parse-iso-8601?))
(def ^{:arglists '([s])} can-parse-datetime?
"Returns whether a string can be parsed to an ISO 8601 datetime or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_DATE_TIME)
:cljs can-parse-iso-8601?))
(def ^{:arglists '([s])} can-parse-time?
"Returns whether a string can be parsed to an ISO 8601 time or not."
#?(:clj (partial can-parse-iso-8601? DateTimeFormatter/ISO_TIME)
:cljs can-parse-iso-8601?))
(def LiteralDateString
"Schema for an ISO-8601-formatted date string literal."
(s/constrained helpers/NonBlankString can-parse-date? "valid ISO-8601 datetime string literal"))
(def LiteralDatetimeString
"Schema for an ISO-8601-formattedor datetime string literal."
(s/constrained helpers/NonBlankString can-parse-datetime? "valid ISO-8601 datetime string literal"))
(def LiteralTimeString
"Schema for an ISO-8601-formatted time string literal."
(s/constrained helpers/NonBlankString can-parse-time? "valid ISO-8601 time string literal"))
(defclause relative-datetime
n (s/cond-pre (s/eq :current) s/Int)
unit (optional RelativeDatetimeUnit))
(defclause interval
n s/Int
unit RelativeDatetimeUnit)
This clause is automatically generated by middleware when datetime literals ( literal strings or one of the Java
types ) are encountered . Unit is inferred by looking at the Field the timestamp is compared against . Implemented
[: = [ : field 10 { : temporal - unit : day } ] " 2018 - 10 - 02 " ]
[: = [ : field 10 { : temporal - unit : day } ] [: absolute - datetime # inst " 2018 - 10 - 02 " : day ] ]
(def ^:internal ^{:clause-name :absolute-datetime} absolute-datetime
"Schema for an `:absolute-datetime` clause."
(s/conditional
#(core/not (is-clause? :absolute-datetime %))
(helpers/clause
:absolute-datetime
"t"
#?(:clj (s/cond-pre java.time.LocalDate java.time.LocalDateTime java.time.OffsetDateTime java.time.ZonedDateTime)
:cljs js/Date)
"unit"
DateTimeUnit)
#(instance? #?(:clj java.time.LocalDate :cljs js/Date) (second %))
(helpers/clause
:absolute-datetime
"date" #?(:clj java.time.LocalDate :cljs js/Date)
"unit" DateUnit)
:else
(helpers/clause
:absolute-datetime
"datetime"
#?(:clj (s/cond-pre java.time.LocalDateTime java.time.OffsetDateTime java.time.ZonedDateTime)
:cljs js/Date)
"unit"
DateTimeUnit)))
clearly a time ( e.g. " 08:00:00.000 " ) and/or the Field derived from ` : type / Time ` and/or the unit was a
(defclause ^:internal time
time #?(:clj (s/cond-pre java.time.LocalTime java.time.OffsetTime)
:cljs js/Date)
unit TimeUnit)
(def ^:private DateOrDatetimeLiteral
"Schema for a valid date or datetime literal."
(s/conditional
(partial is-clause? :absolute-datetime)
absolute-datetime
can-parse-datetime?
LiteralDatetimeString
can-parse-date?
LiteralDateString
:else
(s/cond-pre
literal datetime strings and Java types will get transformed to ` absolute - datetime ` clauses automatically by
#?@(:clj
[java.time.LocalDate
java.time.LocalDateTime
java.time.OffsetDateTime
java.time.ZonedDateTime]
:cljs
[js/Date]))))
(def ^:private TimeLiteral
"Schema for valid time literals."
(s/conditional
(partial is-clause? :time)
time
can-parse-time?
LiteralTimeString
:else
(s/cond-pre
literal datetime strings and Java types will get transformed to ` time ` clauses automatically by
#?@(:clj
[java.time.LocalTime
java.time.OffsetTime]
:cljs
[js/Date]))))
(def ^:private TemporalLiteral
"Schema for valid temporal literals."
(s/cond-pre TimeLiteral DateOrDatetimeLiteral))
(def DateTimeValue
"Schema for a datetime value drivers will personally have to handle, either an `absolute-datetime` form or a
`relative-datetime` form."
(one-of absolute-datetime relative-datetime time))
(def ValueTypeInfo
"Type info about a value in a `:value` clause. Added automatically by `wrap-value-literals` middleware to values in
filter clauses based on the Field in the clause."
TODO -- these should use ` lisp - case ` like everything else in MBQL .
{(s/optional-key :database_type) (s/maybe helpers/NonBlankString)
(s/optional-key :base_type) (s/maybe helpers/FieldType)
(s/optional-key :semantic_type) (s/maybe helpers/FieldSemanticOrRelationType)
(s/optional-key :unit) (s/maybe DateTimeUnit)
(s/optional-key :name) (s/maybe helpers/NonBlankString)
s/Keyword s/Any})
object , since text < - > UUID comparision does n't work in Postgres . For this reason , raw literals in ` : filter `
clauses are wrapped in ` : value ` clauses and given information about the type of the Field they will be compared to .
(defclause ^:internal value
value s/Any
type-info (s/maybe ValueTypeInfo))
[: field 1 nil ] [: field 2 nil ] ]
As of 0.42.0 ` : expression ` references can have an optional options map
(defclause ^{:requires-features #{:expressions}} expression
expression-name helpers/NonBlankString
options (optional (s/pred map? "map")))
(def BinningStrategyName
"Schema for a valid value for the `strategy-name` param of a [[field]] clause with `:binning` information."
(s/enum :num-bins :bin-width :default))
(defn- validate-bin-width [schema]
(s/constrained
schema
(fn [{:keys [strategy bin-width]}]
(if (core/= strategy :bin-width)
bin-width
true))
"You must specify :bin-width when using the :bin-width strategy."))
(defn- validate-num-bins [schema]
(s/constrained
schema
(fn [{:keys [strategy num-bins]}]
(if (core/= strategy :num-bins)
num-bins
true))
"You must specify :num-bins when using the :num-bins strategy."))
(def FieldBinningOptions
"Schema for `:binning` options passed to a `:field` clause."
(-> {:strategy BinningStrategyName
(s/optional-key :num-bins) helpers/IntGreaterThanZero
(s/optional-key :bin-width) (s/constrained s/Num (complement neg?) "bin width must be >= 0.")
s/Keyword s/Any}
validate-bin-width
validate-num-bins))
(defn valid-temporal-unit-for-base-type?
"Whether `temporal-unit` (e.g. `:day`) is valid for the given `base-type` (e.g. `:type/Date`). If either is `nil` this
will return truthy. Accepts either map of `field-options` or `base-type` and `temporal-unit` passed separately."
([{:keys [base-type temporal-unit] :as _field-options}]
(valid-temporal-unit-for-base-type? base-type temporal-unit))
([base-type temporal-unit]
(if-let [units (when (core/and temporal-unit base-type)
(condp #(isa? %2 %1) base-type
:type/Date date-bucketing-units
:type/Time time-bucketing-units
:type/DateTime datetime-bucketing-units
nil))]
(contains? units temporal-unit)
true)))
(defn- validate-temporal-unit [schema]
TODO - consider breaking this out into separate constraints for the three different types so we can generate more
(s/constrained
schema
valid-temporal-unit-for-base-type?
"Invalid :temporal-unit for the specified :base-type."))
(defn- no-binning-options-at-top-level [schema]
(s/constrained
schema
(complement :strategy)
"Found :binning keys at the top level of :field options. binning-related options belong under the :binning key."))
(def ^:private FieldOptions
(-> {(s/optional-key :base-type) (s/maybe helpers/FieldType)
` : source - field ` is used to refer to a Field from a different Table you would like IMPLICITLY JOINED to the
(s/optional-key :source-field) (s/maybe (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
` : temporal - unit ` is used to specify DATE BUCKETING for a Field that represents a moment in time of some sort .
There is no requirement that all ` : type / Temporal ` derived Fields specify a ` : temporal - unit ` , but for legacy
do any special datetime handling for plain ` : field ` clauses when their Field derives from ` : type / DateTime ` .
(s/optional-key :temporal-unit) (s/maybe DateTimeUnit)
` : join - alias ` is used to refer to a Field from a different Table / nested query that you are
(s/optional-key :join-alias) (s/maybe helpers/NonBlankString)
(s/optional-key :binning) (s/maybe FieldBinningOptions)
s/Keyword s/Any}
validate-temporal-unit
no-binning-options-at-top-level))
(defn- require-base-type-for-field-name [schema]
(s/constrained
schema
(fn [[_ id-or-name {:keys [base-type]}]]
(if (string? id-or-name)
base-type
true))
":field clauses using a string field name must specify :base-type."))
(def ^{:clause-name :field, :added "0.39.0"} field
"Schema for a `:field` clause."
(-> (helpers/clause
:field
"id-or-name" (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString)
"options" (s/maybe (s/recursive #'FieldOptions)))
require-base-type-for-field-name))
(def ^{:clause-name :field, :added "0.39.0"} field:id
"Schema for a `:field` clause, with the added constraint that it must use an integer Field ID."
(s/constrained
field
(fn [[_ id-or-name]]
(integer? id-or-name))
"Must be a :field with an integer Field ID."))
(def ^{:clause-name :field, :added "0.39.0"} field:name
"Schema for a `:field` clause, with the added constraint that it must use an string Field name."
(s/constrained
field
(fn [[_ id-or-name]]
(string? id-or-name))
"Must be a :field with a string Field name."))
(def ^:private Field*
(one-of expression field))
TODO -- consider renaming this FieldOrExpression
(def Field
"Schema for either a `:field` clause (reference to a Field) or an `:expression` clause (reference to an expression)."
(s/recursive #'Field*))
Currently aggregate Field references can only be used inside order - by clauses . In the future once we support SQL
As of 0.42.0 ` : aggregation ` references can have an optional options map .
(defclause aggregation
aggregation-clause-index s/Int
options (optional (s/pred map? "map")))
(def FieldOrAggregationReference
"Schema for any type of valid Field clause, or for an indexed reference to an aggregation clause."
(s/if (partial is-clause? :aggregation)
aggregation
Field))
(def string-expressions
"String functions"
#{:substring :trim :rtrim :ltrim :upper :lower :replace :concat :regex-match-first :coalesce :case})
(declare StringExpression)
(def ^:private StringExpressionArg
(s/conditional
string?
s/Str
(partial is-clause? string-expressions)
(s/recursive #'StringExpression)
(partial is-clause? :value)
value
:else
Field))
TODO - rename to numeric - expressions
(def arithmetic-expressions
"Set of valid arithmetic expression clause keywords."
#{:+ :- :/ :* :coalesce :length :round :ceil :floor :abs :power :sqrt :log :exp :case :datetime-diff})
(def boolean-expressions
"Set of valid boolean expression clause keywords."
#{:and :or :not :< :<= :> :>= := :!=})
(def ^:private aggregations #{:sum :avg :stddev :var :median :percentile :min :max :cum-count :cum-sum :count-where :sum-where :share :distinct :metric :aggregation-options :count})
(def temporal-extract-functions
"Functions to extract components of a date, datetime."
:temporal-extract
:get-year :get-quarter :get-month :get-week :get-day :get-day-of-week :get-hour :get-minute :get-second})
(def date-arithmetic-functions
"Functions to do math with date, datetime."
#{:+ :datetime-add :datetime-subtract})
(def date+time+timezone-functions
"Date, time, and timezone related functions."
(set/union temporal-extract-functions date-arithmetic-functions))
(declare ArithmeticExpression)
(declare BooleanExpression)
(declare DatetimeExpression)
(declare Aggregation)
(def ^:private NumericExpressionArg
(s/conditional
number?
s/Num
(partial is-clause? arithmetic-expressions)
(s/recursive #'ArithmeticExpression)
(partial is-clause? temporal-extract-functions)
(s/recursive #'DatetimeExpression)
(partial is-clause? aggregations)
(s/recursive #'Aggregation)
(partial is-clause? :value)
value
:else
Field))
(def ^:private DateTimeExpressionArg
(s/conditional
(partial is-clause? aggregations)
(s/recursive #'Aggregation)
(partial is-clause? :value)
value
(partial is-clause? date-arithmetic-functions)
(s/recursive #'DatetimeExpression)
:else
(s/cond-pre DateOrDatetimeLiteral Field)))
(def ^:private ExpressionArg
(s/conditional
number?
s/Num
boolean?
s/Bool
(partial is-clause? boolean-expressions)
(s/recursive #'BooleanExpression)
(partial is-clause? arithmetic-expressions)
(s/recursive #'ArithmeticExpression)
string?
s/Str
(partial is-clause? string-expressions)
(s/recursive #'StringExpression)
(partial is-clause? temporal-extract-functions)
(s/recursive #'DatetimeExpression)
(partial is-clause? :value)
value
:else
Field))
(def ^:private NumericExpressionArgOrInterval
(s/if (partial is-clause? :interval)
interval
NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} coalesce
a ExpressionArg, b ExpressionArg, more (rest ExpressionArg))
(defclause ^{:requires-features #{:expressions}} substring
s StringExpressionArg, start NumericExpressionArg, length (optional NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} length
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} trim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} rtrim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} ltrim
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} upper
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} lower
s StringExpressionArg)
(defclause ^{:requires-features #{:expressions}} replace
s StringExpressionArg, match s/Str, replacement s/Str)
(defclause ^{:requires-features #{:expressions}} concat
a StringExpressionArg, b StringExpressionArg, more (rest StringExpressionArg))
(defclause ^{:requires-features #{:expressions :regex}} regex-match-first
s StringExpressionArg, pattern s/Str)
(defclause ^{:requires-features #{:expressions}} +
x NumericExpressionArgOrInterval, y NumericExpressionArgOrInterval, more (rest NumericExpressionArgOrInterval))
(defclause ^{:requires-features #{:expressions}} -
x NumericExpressionArg, y NumericExpressionArgOrInterval, more (rest NumericExpressionArgOrInterval))
(defclause ^{:requires-features #{:expressions}} /, x NumericExpressionArg, y NumericExpressionArg, more (rest NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} *, x NumericExpressionArg, y NumericExpressionArg, more (rest NumericExpressionArg))
(defclause ^{:requires-features #{:expressions}} floor
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} ceil
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} round
x NumericExpressionArg)
(defclause ^{:requires-features #{:expressions}} abs
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} power
x NumericExpressionArg, y NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} sqrt
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} exp
x NumericExpressionArg)
(defclause ^{:requires-features #{:advanced-math-expressions}} log
x NumericExpressionArg)
TODO : rename to NumericExpression *
(declare ArithmeticExpression*)
TODO : rename to NumericExpression
(def ^:private ArithmeticExpression
"Schema for the definition of an arithmetic expression. All arithmetic expressions evaluate to numeric values."
(s/recursive #'ArithmeticExpression*))
Days , weeks , months , and years are only counted if they are whole to the " day " .
For example , ` datetimeDiff("2022 - 01 - 30 " , " 2022 - 02 - 28 " , " month " ) ` returns 0 months .
For example , ` datetimeDiff("2022 - 01 - 01T09:00:00 " , " 2022 - 01 - 02T08:00:00 " , " day " ) ` returns 1 day even though it is less than 24 hours .
Hours , minutes , and seconds are only counted if they are whole .
For example , datetimeDiff("2022 - 01 - 01T01:00:30 " , " 2022 - 01 - 01T02:00:29 " , " hour " ) returns 0 hours .
(defclause ^{:requires-features #{:datetime-diff}} datetime-diff
datetime-x DateTimeExpressionArg
datetime-y DateTimeExpressionArg
unit DatetimeDiffUnits)
(defclause ^{:requires-features #{:temporal-extract}} temporal-extract
datetime DateTimeExpressionArg
unit TemporalExtractUnits
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-year
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-quarter
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-month
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-week
date DateTimeExpressionArg
mode (optional ExtractWeekModes))
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-day
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-day-of-week
date DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-hour
datetime DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-minute
datetime DateTimeExpressionArg)
(defclause ^{:requires-features #{:temporal-extract}} ^:sugar get-second
datetime DateTimeExpressionArg)
(def ^:private ArithmeticDateTimeUnit
(s/named
(apply s/enum #{:millisecond :second :minute :hour :day :week :month :quarter :year})
"arithmetic-datetime-unit"))
(defclause ^{:requires-features #{:date-arithmetics}} datetime-add
datetime DateTimeExpressionArg
amount NumericExpressionArg
unit ArithmeticDateTimeUnit)
(defclause ^{:requires-features #{:date-arithmetics}} datetime-subtract
datetime DateTimeExpressionArg
amount NumericExpressionArg
unit ArithmeticDateTimeUnit)
(def ^:private DatetimeExpression*
(one-of + temporal-extract datetime-add datetime-subtract
get-year get-quarter get-month get-week get-day get-day-of-week
get-hour get-minute get-second))
(def DatetimeExpression
"Schema for the definition of a date function expression."
(s/recursive #'DatetimeExpression*))
(declare StringExpression*)
(def ^:private StringExpression
"Schema for the definition of an string expression."
(s/recursive #'StringExpression*))
(declare Filter)
(defclause and
first-clause (s/recursive #'Filter)
second-clause (s/recursive #'Filter)
other-clauses (rest (s/recursive #'Filter)))
(defclause or
first-clause (s/recursive #'Filter)
second-clause (s/recursive #'Filter)
other-clauses (rest (s/recursive #'Filter)))
(defclause not, clause (s/recursive #'Filter))
(def ^:private FieldOrRelativeDatetime
(s/if (partial is-clause? :relative-datetime)
relative-datetime
Field))
(def ^:private EqualityComparable
"Schema for things things that make sense in a `=` or `!=` filter, i.e. things that can be compared for equality."
(s/maybe
(s/cond-pre
s/Bool
s/Num
s/Str
TemporalLiteral
FieldOrRelativeDatetime
ExpressionArg
value)))
(def ^:private OrderComparable
"Schema for things that make sense in a filter like `>` or `<`, i.e. things that can be sorted."
(s/if (partial is-clause? :value)
value
(s/cond-pre
s/Num
s/Str
TemporalLiteral
ExpressionArg
FieldOrRelativeDatetime)))
For all of the non - compound Filter clauses below the first arg is an implicit Field ID
implementations only need to handle the 2 - arg forms .
` = ` works like SQL ` IN ` with more than 2 args
[: = [ : field 1 nil ] 2 3 ] --[DESUGAR]-- > [: or [: = [ : field 1 nil ] 2 ] [: = [ : field 1 nil ] 3 ] ]
` ! = ` works like SQL ` NOT IN ` with more than 2 args
[: ! = [ : field 1 nil ] 2 3 ] --[DESUGAR]-- > [: and [: ! = [ : field 1 nil ] 2 ] [: ! = [ : field 1 nil ] 3 ] ]
(defclause =, field EqualityComparable, value-or-field EqualityComparable, more-values-or-fields (rest EqualityComparable))
(defclause !=, field EqualityComparable, value-or-field EqualityComparable, more-values-or-fields (rest EqualityComparable))
(defclause <, field OrderComparable, value-or-field OrderComparable)
(defclause >, field OrderComparable, value-or-field OrderComparable)
(defclause <=, field OrderComparable, value-or-field OrderComparable)
(defclause >=, field OrderComparable, value-or-field OrderComparable)
(defclause between field OrderComparable, min OrderComparable, max OrderComparable)
SUGAR CLAUSE : This is automatically written as a pair of ` : between ` clauses by the ` : ` middleware .
(defclause ^:sugar inside
lat-field OrderComparable
lon-field OrderComparable
lat-max OrderComparable
lon-min OrderComparable
lat-min OrderComparable
lon-max OrderComparable)
(defclause ^:sugar is-null, field Field)
(defclause ^:sugar not-null, field Field)
(defclause ^:sugar is-empty, field Field)
(defclause ^:sugar not-empty, field Field)
(def ^:private StringFilterOptions
(defclause starts-with, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(defclause ends-with, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(defclause contains, field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(defclause ^:sugar does-not-contain
field StringExpressionArg, string-or-field StringExpressionArg, options (optional StringFilterOptions))
(def ^:private TimeIntervalOptions
Return rows where datetime Field 100 's value is in the current month
[: time - interval [: field 100 nil ] : current : month ]
Return rows where datetime Field 100 's value is in the current month , including partial results for the
current day
[: time - interval [: field 100 nil ] : current : month { : include - current true } ]
(defclause ^:sugar time-interval
field Field
n (s/cond-pre
s/Int
(s/enum :current :last :next))
unit RelativeDatetimeUnit
options (optional TimeIntervalOptions))
A segment is a special ` macro ` that saves some pre - definied filter clause , e.g. [: segment 1 ]
this gets replaced by a normal Filter clause in
It can also be used for GA , which looks something like ` [: segment " gaid::-11 " ] ` . GA segments are n't actually MBQL
segments and pass - thru to GA .
(defclause ^:sugar segment, segment-id (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
(declare BooleanExpression*)
(def ^:private BooleanExpression
"Schema for the definition of an arithmetic expression."
(s/recursive #'BooleanExpression*))
(def ^:private BooleanExpression*
(one-of and or not < <= > >= = !=))
(def ^:private Filter*
(s/conditional
(partial is-clause? arithmetic-expressions) ArithmeticExpression
(partial is-clause? string-expressions) StringExpression
(partial is-clause? boolean-expressions) BooleanExpression
:else
(one-of
and or not = != < > <= >= between starts-with ends-with contains
does-not-contain inside is-empty not-empty is-null not-null time-interval segment)))
(def Filter
"Schema for a valid MBQL `:filter` clause."
(s/recursive #'Filter*))
(def ^:private CaseClause [(s/one Filter "pred") (s/one ExpressionArg "expr")])
(def ^:private CaseClauses [CaseClause])
(def ^:private CaseOptions
{(s/optional-key :default) ExpressionArg})
(defclause ^{:requires-features #{:basic-aggregations}} case
clauses CaseClauses, options (optional CaseOptions))
TODO : rename to NumericExpression ?
(def ^:private ArithmeticExpression*
(one-of + - / * coalesce length floor ceil round abs power sqrt exp log case datetime-diff))
(def ^:private StringExpression*
(one-of substring trim ltrim rtrim replace lower upper concat regex-match-first coalesce case))
(def FieldOrExpressionDef
"Schema for anything that is accepted as a top-level expression definition, either an arithmetic expression such as a
`:+` clause or a `:field` clause."
(s/conditional
(partial is-clause? arithmetic-expressions) ArithmeticExpression
(partial is-clause? string-expressions) StringExpression
(partial is-clause? boolean-expressions) BooleanExpression
(partial is-clause? date+time+timezone-functions) DatetimeExpression
(partial is-clause? :case) case
:else Field))
For all of the ' normal ' Aggregations below ( excluding Metrics ) fields are implicit Field IDs
(defclause ^{:requires-features #{:basic-aggregations}} ^:sugar count, field (optional Field))
(defclause ^{:requires-features #{:basic-aggregations}} ^:sugar cum-count, field (optional Field))
[ [: sum [: + [: field 1 nil ] [: field 2 nil ] ] ] ]
SUM(field_1 + field_2 )
(defclause ^{:requires-features #{:basic-aggregations}} avg, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} cum-sum, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} distinct, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} sum, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} min, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} max, field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:basic-aggregations}} sum-where
field-or-expression FieldOrExpressionDef, pred Filter)
(defclause ^{:requires-features #{:basic-aggregations}} count-where
pred Filter)
(defclause ^{:requires-features #{:basic-aggregations}} share
pred Filter)
(defclause ^{:requires-features #{:standard-deviation-aggregations}} stddev
field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:standard-deviation-aggregations}} [ag:var var]
field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:percentile-aggregations}} median
field-or-expression FieldOrExpressionDef)
(defclause ^{:requires-features #{:percentile-aggregations}} percentile
field-or-expression FieldOrExpressionDef, percentile NumericExpressionArg)
METRICS WITH STRING IDS , e.g. ` [: metric " ga : sessions " ] ` , are Google Analytics metrics , not Metabase metrics ! They
pass straight thru to the GA query processor .
(defclause ^:sugar metric, metric-id (s/cond-pre helpers/IntGreaterThanZero helpers/NonBlankString))
[: + [: sum [: field 10 nil ] ] [: sum [: field 20 nil ] ] ]
(def ^:private UnnamedAggregation*
(s/if (partial is-clause? arithmetic-expressions)
ArithmeticExpression
(one-of avg cum-sum distinct stddev sum min max metric share count-where
sum-where case median percentile ag:var
cum-count count)))
(def ^:private UnnamedAggregation
(s/recursive #'UnnamedAggregation*))
(def AggregationOptions
"Additional options for any aggregation clause when wrapping it in `:aggregation-options`."
(s/optional-key :name) helpers/NonBlankString
user - facing display name for this aggregation instead of the default one
(s/optional-key :display-name) helpers/NonBlankString
s/Keyword s/Any})
(defclause aggregation-options
aggregation UnnamedAggregation
options AggregationOptions)
(def Aggregation
"Schema for anything that is a valid `:aggregation` clause."
(s/if (partial is-clause? :aggregation-options)
aggregation-options
UnnamedAggregation))
{ : order - by [ [: asc [: field 1 nil ] ] , [: desc [: field 2 nil ] ] ] }
(defclause asc, field FieldOrAggregationReference)
(defclause desc, field FieldOrAggregationReference)
(def OrderBy
"Schema for an `order-by` clause subclause."
(one-of asc desc))
the query itself runs . There are four basic types of template tag for native queries :
1 . Field filters , which are used like
These reference specific and are replaced with entire conditions , e.g. ` some_field > 1000 `
2 . Raw values , which are used like
SELECT * FROM table WHERE = { { x } }
3 . Native query snippets , which might be used like
4 . Source query Card IDs , which are used like
SELECT * FROM ( { { # 123 } } ) source
Field filters and raw values usually have their value specified by ` : parameters ` ( see [ [ Parameters ] ] below ) .
(def TemplateTagType
"Schema for valid values of template tag `:type`."
(s/enum :snippet :card :dimension :number :text :date))
(def ^:private TemplateTag:Common
"Things required by all template tag types."
TODO -- ` : i d ` is actually 100 % required but we have a lot of tests that do n't specify it because this constraint
(s/optional-key :id) helpers/NonBlankString
:name helpers/NonBlankString
:display-name helpers/NonBlankString
s/Keyword s/Any})
{ : i d " c2fc7310 - 44eb-4f21 - c3a0 - 63806ffb7ddd "
: display - name " Snippet : select "
: snippet - id 1 }
(def TemplateTag:Snippet
"Schema for a native query snippet template tag."
(merge
TemplateTag:Common
{:type (s/eq :snippet)
:snippet-name helpers/NonBlankString
:snippet-id helpers/IntGreaterThanZero
database to which this belongs . Does n't always seen to be specified .
(s/optional-key :database) helpers/IntGreaterThanZero}))
: name " # 1635 "
: display - name " # 1635 "
: card - id 1635 }
(def TemplateTag:SourceQuery
"Schema for a source query template tag."
(merge
TemplateTag:Common
{:type (s/eq :card)
:card-id helpers/IntGreaterThanZero}))
(def ^:private TemplateTag:Value:Common
"Stuff shared between the Field filter and raw value template tag schemas."
(merge
TemplateTag:Common
(s/optional-key :default) s/Any
(s/optional-key :required) s/Bool}))
(declare ParameterType)
{ : i d " c20851c7 - 8a80 - 0ffa-8a99 - ae636f0e9539 "
: dimension [: field 4 nil ]
(def TemplateTag:FieldFilter
"Schema for a field filter template tag."
(merge
TemplateTag:Value:Common
{:type (s/eq :dimension)
:dimension field
:widget-type (s/recursive #'ParameterType)}))
(def raw-value-template-tag-types
"Set of valid values of `:type` for raw value template tags."
#{:number :text :date :boolean})
(def TemplateTag:RawValue:Type
"Valid values of `:type` for raw value template tags."
(apply s/enum raw-value-template-tag-types))
(def TemplateTag:RawValue
"Schema for a raw value template tag."
(merge
TemplateTag:Value:Common
` : type ` is used be the FE to determine which type of widget to display for the template tag , and to determine
{:type TemplateTag:RawValue:Type}))
TODO -- if we were using core.spec here I would make this a multimethod - based spec instead and have it dispatch off
(def TemplateTag
"Schema for a template tag as specified in a native query. There are four types of template tags, differentiated by
`:type` (see comments above)."
(s/conditional
#(core/= (:type %) :dimension) TemplateTag:FieldFilter
#(core/= (:type %) :snippet) TemplateTag:Snippet
#(core/= (:type %) :card) TemplateTag:SourceQuery
:else TemplateTag:RawValue))
(def TemplateTagMap
"Schema for the `:template-tags` map passed in as part of a native query."
(-> {helpers/NonBlankString TemplateTag}
(s/constrained (fn [m]
(every? (fn [[tag-name tag-definition]]
(core/= tag-name (:name tag-definition)))
m))
"keys in template tag map must match the :name of their values")))
(def NativeQuery
"Schema for a valid, normalized native [inner] query."
{:query s/Any
(s/optional-key :template-tags) TemplateTagMap
(s/optional-key :collection) (s/maybe helpers/NonBlankString)
other stuff gets added in my different bits of QP middleware to record bits of state or pass info around .
s/Keyword s/Any})
(declare Query MBQLQuery)
(def SourceQuery
"Schema for a valid value for a `:source-query` clause."
(s/if (every-pred map? :native)
(set/rename-keys NativeQuery {:query :native})
(s/recursive #'MBQLQuery)))
(def SourceQueryMetadata
"Schema for the expected keys for a single column in `:source-metadata` (`:source-metadata` is a sequence of these
entries), if it is passed in to the query.
This metadata automatically gets added for all source queries that are referenced via the `card__id` `:source-table`
`:source-query`s."
{:name helpers/NonBlankString
:base_type helpers/FieldType
:display_name helpers/NonBlankString
(s/optional-key :semantic_type) (s/maybe helpers/FieldSemanticOrRelationType)
you 'll need to provide this in order to use BINNING
(s/optional-key :fingerprint) (s/maybe helpers/Map)
s/Any s/Any})
(def source-table-card-id-regex
"Pattern that matches `card__id` strings that can be used as the `:source-table` of MBQL queries."
#"^card__[1-9]\d*$")
(def SourceTable
"Schema for a valid value for the `:source-table` clause of an MBQL query."
(s/cond-pre helpers/IntGreaterThanZero source-table-card-id-regex))
(def join-strategies
"Valid values of the `:strategy` key in a join map."
#{:left-join :right-join :inner-join :full-join})
(def JoinStrategy
"Strategy that should be used to perform the equivalent of a SQL `JOIN` against another table or a nested query.
driver supports `:full-join` before generating a Join clause using that strategy."
(apply s/enum join-strategies))
(declare Fields)
(def Join
"Perform the equivalent of a SQL `JOIN` with another Table or nested `:source-query`. JOINs are either explicitly
specified in the incoming query, or implicitly generated when one uses a `:field` clause with `:source-field`.
In the top-level query, you can reference Fields from the joined table or nested query by including `:source-field`
the `:field` options, e.g.
[:field 1 {:join-alias \"my_join_alias\"}]
[:field \"my_field\" {:base-type :field/Integer, :join-alias \"my_join_alias\"}]"
(->
(s/optional-key :source-table)
SourceTable
(s/optional-key :source-query)
SourceQuery
:condition
Filter
(s/optional-key :strategy)
JoinStrategy
The to include in the results * if * a top - level ` : fields ` clause * is not * specified . This can be either
` : none ` , ` : all ` , or a sequence of Field clauses .
* ` : all ` : will include all of the Fields from the joined table or query
* a sequence of Field clauses : include only the specified . Valid clauses are the same as the top - level
(s/optional-key :fields)
(s/named
(s/cond-pre
(s/enum :all :none)
(s/recursive #'Fields))
"Valid Join `:fields`: `:all`, `:none`, or a sequence of `:field` clauses that have `:join-alias`.")
(s/optional-key :alias)
helpers/NonBlankString
(s/optional-key :fk-field-id)
(s/maybe helpers/IntGreaterThanZero)
Metadata about the source query being used , if pulled in from a Card via the ` : source - table " card__id " ` syntax .
(s/optional-key :source-metadata)
(s/maybe [SourceQueryMetadata])
s/Keyword s/Any}
(s/constrained
(every-pred
(some-fn :source-table :source-query)
(complement (every-pred :source-table :source-query)))
"Joins must have either a `source-table` or `source-query`, but not both.")))
(def Joins
"Schema for a valid sequence of `Join`s. Must be a non-empty sequence, and `:alias`, if specified, must be unique."
(s/constrained
(helpers/non-empty [Join])
#(helpers/empty-or-distinct? (filter some? (map :alias %)))
"All join aliases must be unique."))
(def Fields
"Schema for valid values of the MBQL `:fields` clause."
(s/named
(helpers/distinct (helpers/non-empty [Field]))
"Distinct, non-empty sequence of Field clauses"))
(def MBQLQuery
"Schema for a valid, normalized MBQL [inner] query."
(->
{(s/optional-key :source-query) SourceQuery
(s/optional-key :source-table) SourceTable
(s/optional-key :aggregation) (helpers/non-empty [Aggregation])
(s/optional-key :breakout) (helpers/non-empty [Field])
(s/optional-key :expressions) {helpers/NonBlankString FieldOrExpressionDef}
(s/optional-key :fields) Fields
(s/optional-key :filter) Filter
(s/optional-key :limit) helpers/IntGreaterThanOrEqualToZero
(s/optional-key :order-by) (helpers/distinct (helpers/non-empty [OrderBy]))
page = page , starting with 1 . items = number of items per page .
{ : page 1 , : items 10 } = items 1 - 10
{ : page 2 , : items 10 } = items 11 - 20
(s/optional-key :page) {:page helpers/IntGreaterThanZero
:items helpers/IntGreaterThanZero}
(s/optional-key :joins) Joins
used to let power things like binning when used with Field Literals instead of normal
(s/optional-key :source-metadata) (s/maybe [SourceQueryMetadata])
s/Keyword s/Any}
(s/constrained
(fn [query]
(core/= 1 (core/count (select-keys query [:source-query :source-table]))))
"Query must specify either `:source-table` or `:source-query`, but not both.")
(s/constrained
(fn [{:keys [breakout fields]}]
(empty? (set/intersection (set breakout) (set fields))))
"Fields specified in `:breakout` should not be specified in `:fields`; this is implied.")))
There are three things called ' type ' in play when we talk about parameters and template tags .
Two are used when the parameters are specified / declared , in a [ [ TemplateTag ] ] or in a Dashboard parameter :
1 . Dashboard parameter / template tag ` : type ` -- ` : dimension ` ( for a Field filter parameter ) ,
2 . ` : widget - type ` -- only specified for Field filter parameters ( where type is ` : dimension ` ) . This tells the FE
One type is used in the [ [ Parameter ] ] list ( ` : parameters ` ):
3 . Parameter ` : type ` -- specifies the type of the value being passed in . e.g. ` : text ` or ` : string/!= `
currently still allowed for backwards - compatibility purposes -- currently the FE client will just parrot back the
(def parameter-types
"Map of parameter-type -> info. Info is a map with the following keys:
### `:type`
The general type of this parameter. `:numeric`, `:string`, `:boolean`, or `:date`, if applicable. Some parameter
types like `:id` and `:category` don't have a particular `:type`. This is offered mostly so we can group stuff
together or determine things like whether a given parameter is a date parameter.
### `:operator`
Signifies this is one of the new 'operator' parameter types added in 0.39.0 or so. These parameters can only be used
for [[TemplateTag:FieldFilter]]s or for Dashboard parameters mapped to MBQL queries. The value of this key is the
arity for the parameter, either `:unary`, `:binary`, or `:variadic`. See
the [[metabase.driver.common.parameters.operators]] namespace for more information.
### `:allowed-for`
[[Parameter]]s with this `:type` may be supplied for [[TemplateTag]]s with these `:type`s (or `:widget-type` if
`:type` is `:dimension`) types. Example: it is ok to pass a parameter of type `:date/range` for template tag with
with a widget type `:date`. Why? It's a potential security risk if someone creates a Card with an \"exact-match\"
Field filter like `:date` or `:text` and you pass in a parameter like `string/!=` `NOTHING_WILL_MATCH_THIS`.
Non-exact-match parameters can be abused to enumerate *all* the rows in a table when the parameter was supposed to
lock the results down to a single row or set of rows."
the basic raw - value types . These can be used with [ [ TemplateTag : RawValue ] ] template tags as well as
:number {:type :numeric, :allowed-for #{:number :number/= :id :category :series-category :location/zip_code}}
:text {:type :string, :allowed-for #{:text :string/= :id :category :series-category
:location/city :location/state :location/zip_code :location/country}}
:date {:type :date, :allowed-for #{:date :date/single :date/all-options :id :category :series-category}}
I do n't think ` : boolean ` is actually used on the FE at all .
:boolean {:type :boolean, :allowed-for #{:boolean :id :category :series-category}}
:date/single {:type :date, :allowed-for #{:date :date/single :date/all-options :id :category :series-category}}
for MBQL queries or Field filters in native queries
widget types . They have something to do with telling the frontend to show FieldValues list / search widgets or
the actual type of the parameter based on the Field we 're filtering on . Or something like that . Parameters with
TODO FIXME -- actually , it turns out the the FE client passes parameter type ` : category ` for parameters in
:id {:allowed-for #{:id}}
:category {:allowed-for #{:category #_FIXME :number :text :date :boolean}}
:series-category {:allowed-for #{:series-category #_FIXME :number :text :date :boolean}}
:location/city {:allowed-for #{:location/city}}
:location/state {:allowed-for #{:location/state}}
:location/zip_code {:allowed-for #{:location/zip_code}}
:location/country {:allowed-for #{:location/country}}
:date/range {:type :date, :allowed-for #{:date/range :date/all-options :date/series-date}}
:date/month-year {:type :date, :allowed-for #{:date/month-year :date/all-options :date/series-date}}
:date/quarter-year {:type :date, :allowed-for #{:date/quarter-year :date/all-options :date/series-date}}
:date/relative {:type :date, :allowed-for #{:date/relative :date/all-options :date/series-date}}
:date/all-options {:type :date, :allowed-for #{:date/all-options}}
:date/series-date {:type :date, :allowed-for #{:date/series-date}}
:number/!= {:type :numeric, :operator :variadic, :allowed-for #{:number/!=}}
:number/<= {:type :numeric, :operator :unary, :allowed-for #{:number/<=}}
:number/= {:type :numeric, :operator :variadic, :allowed-for #{:number/= :number :id :category :series-category
:location/zip_code}}
:number/>= {:type :numeric, :operator :unary, :allowed-for #{:number/>=}}
:number/between {:type :numeric, :operator :binary, :allowed-for #{:number/between}}
:string/!= {:type :string, :operator :variadic, :allowed-for #{:string/!=}}
:string/= {:type :string, :operator :variadic, :allowed-for #{:string/= :text :id :category :series-category
:location/city :location/state
:location/zip_code :location/country}}
:string/contains {:type :string, :operator :unary, :allowed-for #{:string/contains}}
:string/does-not-contain {:type :string, :operator :unary, :allowed-for #{:string/does-not-contain}}
:string/ends-with {:type :string, :operator :unary, :allowed-for #{:string/ends-with}}
:string/starts-with {:type :string, :operator :unary, :allowed-for #{:string/starts-with}}})
(defn valid-parameter-type?
"Whether `param-type` is a valid non-abstract parameter type."
[param-type]
(get parameter-types param-type))
(def ParameterType
"Schema for valid values of `:type` for a [[Parameter]]."
(apply s/enum (keys parameter-types)))
{ : target [: dimension [: template - tag " my_tag " ] ] }
{ : target [: dimension [: field 100 nil ] ] }
{ : target [: field 100 nil ] }
I 'm not 100 % clear on which situations we 'll get which version . But I think the following is generally true :
* Things are wrapped in ` : dimension ` when we 're dealing with Field filter template tags
One more thing to note : apparently ` : expression ` ... is allowed below as well . I 'm not sure how this is actually
supposed to work , but we have test # 18747 that attempts to set it . I 'm not convinced this should actually be
(defclause template-tag
tag-name
(s/cond-pre helpers/NonBlankString
{:id helpers/NonBlankString}))
(defclause dimension
target (s/cond-pre Field template-tag))
(defclause variable
target template-tag)
(def ParameterTarget
"Schema for the value of `:target` in a [[Parameter]]."
not 100 % sure about this but ` field ` on its own comes from a Dashboard parameter and when it 's wrapped in
` dimension ` it comes from a Field filter template tag parameter ( do n't quote me on this -- working theory )
(s/cond-pre
Field
(one-of dimension variable)))
(def Parameter
"Schema for the *value* of a parameter (e.g. a Dashboard parameter or a native query template tag) as passed in as
part of the `:parameters` list in a query."
{:type ParameterType
TODO -- these definitely SHOULD NOT be optional but a ton of tests are n't passing them in like they should be .
(s/optional-key :id) helpers/NonBlankString
(s/optional-key :target) ParameterTarget
on the [ [ ParameterType ] ]
(s/optional-key :value) s/Any
(s/optional-key :name) helpers/NonBlankString
(s/optional-key :slug) helpers/NonBlankString
(s/optional-key :default) s/Any
s/Keyword s/Any})
(def ParameterList
"Schema for a list of `:parameters` as passed in to a query."
[Parameter]
#_(->
TODO -- disabled for now since it breaks tests . Also , I 'm not sure whether these should be distinct by
(s/constrained (fn [parameters]
(apply distinct? (map :id parameters)))
"Cannot specify parameter more than once; IDs must be distinct")))
(def ^:private Settings
"Options that tweak the behavior of the query processor."
{(s/optional-key :report-timezone) helpers/NonBlankString
s/Keyword s/Any})
(def ^:private Constraints
"Additional constraints added to a query limiting the maximum number of rows that can be returned. Mostly useful
because native queries don't support the MBQL `:limit` clause. For MBQL queries, if `:limit` is set, it will
override these values."
(s/constrained
(s/optional-key :max-results) helpers/IntGreaterThanOrEqualToZero
(s/optional-key :max-results-bare-rows) helpers/IntGreaterThanOrEqualToZero
s/Keyword s/Any}
(fn [{:keys [max-results max-results-bare-rows]}]
(if-not (core/and max-results max-results-bare-rows)
true
(core/>= max-results max-results-bare-rows)))
"max-results-bare-rows must be less or equal to than max-results"))
(def ^:private MiddlewareOptions
"Additional options that can be used to toggle middleware on or off."
(s/optional-key :skip-results-metadata?)
s/Bool
should we skip converting datetime types to ISO-8601 strings with appropriate timezone when post - processing
(s/optional-key :format-rows?)
s/Bool
disable the MBQL->native middleware . If you do this , the query will not work at all , so there are no cases where
(s/optional-key :disable-mbql->native?)
s/Bool
(s/optional-key :disable-max-results?)
s/Bool
Userland queries are ones ran as a result of an API call , Pulse , or the like . Special handling is done in the
(s/optional-key :userland-query?)
(s/maybe s/Bool)
(s/optional-key :add-default-userland-constraints?)
(s/maybe s/Bool)
(s/optional-key :process-viz-settings?)
(s/maybe s/Bool)
s/Keyword
s/Any})
(def Context
"Schema for `info.context`; used for informational purposes to record how a query was executed."
(s/enum :ad-hoc
:collection
:csv-download
:dashboard
:embedded-dashboard
:embedded-question
:json-download
:map-tiles
:public-dashboard
:public-question
:pulse
:question
:xlsx-download))
TODO - this schema is somewhat misleading because if you use a function like
(def Info
"Schema for query `:info` dictionary, which is used for informational purposes to record information about how a query
was executed in QueryExecution and other places. It is considered bad form for middleware to change its behavior
based on this information, don't do it!"
(s/optional-key :context) (s/maybe Context)
(s/optional-key :executed-by) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :card-id) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :card-name) (s/maybe helpers/NonBlankString)
(s/optional-key :dashboard-id) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :pulse-id) (s/maybe helpers/IntGreaterThanZero)
(s/optional-key :metadata/dataset-metadata) (s/maybe [{s/Any s/Any}])
and have the code that saves QueryExceutions figure out their values when it goes to save them
(s/optional-key :query-hash) (s/maybe #?(:clj (Class/forName "[B")
:cljs s/Any))})
(def ^Integer saved-questions-virtual-database-id
"The ID used to signify that a database is 'virtual' rather than physical.
A fake integer ID is used so as to minimize the number of changes that need to be made on the frontend -- by using
something that would otherwise be a legal ID, *nothing* need change there, and the frontend can query against this
'database' none the wiser. (This integer ID is negative which means it will never conflict with a *real* database
ID.)
This ID acts as a sort of flag. The relevant places in the middleware can check whether the DB we're querying is
this 'virtual' database and take the appropriate actions."
-1337)
To the reader : yes , this seems sort of hacky , but one of the goals of the Nested Query Initiative ™ was to minimize
(def DatabaseID
"Schema for a valid `:database` ID, in the top-level 'outer' query. Either a positive integer (referring to an
actual Database), or the saved questions virtual ID, which is a placeholder used for queries using the
`:source-table \"card__id\"` shorthand for a source query resolved by middleware (since clients might not know the
actual DB for that source query.)"
(s/cond-pre (s/eq saved-questions-virtual-database-id) helpers/IntGreaterThanZero))
(def Query
"Schema for an [outer] query, e.g. the sort of thing you'd pass to the query processor or save in
`Card.dataset_query`."
(->
{:database DatabaseID
:type (s/enum :query :native)
(s/optional-key :native) NativeQuery
(s/optional-key :query) MBQLQuery
(s/optional-key :parameters) ParameterList
(s/optional-key :settings) (s/maybe Settings)
(s/optional-key :constraints) (s/maybe Constraints)
(s/optional-key :middleware) (s/maybe MiddlewareOptions)
(s/optional-key :info) (s/maybe Info)
Other various keys get stuck in the query dictionary at some point or another by various pieces of QP
s/Keyword s/Any}
(s/constrained
(every-pred
(some-fn :native :query)
(complement (every-pred :native :query)))
"Query must specify either `:native` or `:query`, but not both.")
(s/constrained
(fn [{native :native, mbql :query, query-type :type}]
(core/case query-type
:native native
:query mbql))
"Native queries must specify `:native`; MBQL queries must specify `:query`.")
Where this is added was changed in Metabase 0.33.0 -- previously , when ` card__id ` source queries were resolved ,
(s/constrained
(complement :source-metadata)
"`:source-metadata` should be added in the same level as `:source-query` (i.e., the 'inner' MBQL query.)")))
(def ^{:arglists '([query])} validate-query
instead of calling `(s/validate Query query)` or similar."
(s/validator Query))
|
c13cff1b8a5c38698601cb3a47760aee32879c67ac4287cd0fc5ef831380d643 | TyGuS/hoogle_plus | PNEncoder.hs | # LANGUAGE FlexibleInstances #
# LANGUAGE FlexibleContexts #
# LANGUAGE TupleSections #
module PetriNet.PNEncoder(
encoderInit
, encoderSolve
, encoderRefine
, encoderInc
) where
import Data.Maybe
import Data.List
import Data.List.Extra
import Data.Hashable
import Data.HashMap.Strict (HashMap)
import qualified Data.HashMap.Strict as HashMap
import Data.Set (Set)
import qualified Data.Set as Set
import Z3.Monad hiding(Z3Env, newEnv)
import qualified Z3.Base as Z3
import Control.Monad.State
import System.CPUTime
import Text.Printf
import Data.Text (pack, unpack, replace)
import System.IO
import System.Process
import Types.Common
import Types.Encoder
import Types.Abstract
import PetriNet.AbstractType
import PetriNet.Util
import Synquid.Util
import Synquid.Pretty
instance MonadZ3 Encoder where
getSolver = gets (envSolver . z3env)
getContext = gets (envContext . z3env)
-- | create a new encoder in z3
createEncoder :: [AbstractSkeleton] -> AbstractSkeleton -> [FunctionCode] -> Encoder ()
createEncoder inputs ret sigs = do
places <- gets (HashMap.keys . ty2tr)
transIds <- gets (Set.toList . Set.unions . HashMap.elems . ty2tr)
-- create all the type variables for encoding
createVariables places transIds
-- add all the constraints for the solver
createConstraints places sigs
-- set initial and final state for solver
setInitialState inputs places
setFinalState ret places
-- | set the initial state for the solver, where we have tokens only in void or inputs
the tokens in the other places should be zero
setInitialState :: [AbstractSkeleton] -> [AbstractSkeleton] -> Encoder ()
setInitialState inputs places = do
let nonInputs = filter (`notElem` inputs) places
let inputCounts = map (\t -> (head t, length t)) (group (sort inputs))
let nonInputCounts = map (, 0) nonInputs
mapM_ (uncurry assignToken) nonInputCounts
enforceArg <- gets useArguments
when enforceArg $ mapM_ (uncurry assignInput) inputCounts
where
assignToken p v = do
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, 0) placeMap
eq <- mkIntNum v >>= mkEq tVar
modify $ \st -> st { optionalConstraints = eq : optionalConstraints st }
assignInput p v = do
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, 0) placeMap
noClone <- gets disableClones
ge <- mkIntNum v >>= if noClone then mkGe tVar else mkEq tVar
modify $ \st -> st { optionalConstraints = ge : optionalConstraints st }
| set the final solver state , we allow only one token in the return type
-- and maybe several tokens in the "void" place
setFinalState :: AbstractSkeleton -> [AbstractSkeleton] -> Encoder ()
setFinalState ret places = do
the return value should have only one token
includeRet
other places excluding void and ret should have nothing
let nonOutputs = filter (ret /=) places
mapM_ excludeOther nonOutputs
where
includeRet = do
placeMap <- gets place2variable
l <- gets loc
let retVar = findVariable "place2variable" (ret, l) placeMap
assrt <- mkIntNum 1 >>= mkEq retVar
modify $ \st -> st { finalConstraints = assrt : finalConstraints st }
excludeOther p = do
l <- gets loc
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, l) placeMap
eq <- mkIntNum 0 >>= mkEq tVar
modify $ \st -> st { finalConstraints = eq : finalConstraints st }
getParam :: Encoder (Int, Z3.Sort)
getParam = do
cnt <- gets counter
boolS <- mkBoolSort
return (cnt, boolS)
cancelConstraints :: String -> Encoder ()
cancelConstraints name = do
(cnt, boolS) <- getParam
cancelSym <- mkStringSymbol $ name ++ show cnt
cancelExp <- mkConst cancelSym boolS >>= mkNot
assert cancelExp
addAllConstraints :: Encoder ()
addAllConstraints = do
pcons <- gets persistConstraints
ocons <- gets optionalConstraints
fcons <- gets finalConstraints
bcons <- gets blockConstraints
mapM_ assert pcons
mapM_ assert ocons
mapM_ assert fcons
mapM_ assert bcons
nonincrementalSolve :: Encoder Z3.Result
nonincrementalSolve = do
prev <- gets prevChecked
when prev $ do
toBlock <- gets block
modify $ \st -> st { blockConstraints = toBlock : blockConstraints st
, prevChecked = False }
addAllConstraints
-- str <- solverToString
-- liftIO $ putStrLn str
liftIO $ writeFile " constraint.z3 " str
check
incrementalSolve :: Encoder Z3.Result
incrementalSolve = do
modify $ \st -> st { counter = counter st + 1 }
prev <- gets prevChecked
(cnt, boolS) <- getParam
blockSym <- mkStringSymbol $ "block" ++ show cnt
blockE <- mkConst blockSym boolS
blocked <- ifM (gets prevChecked)
(gets block >>= mkImplies blockE)
(mkTrue >>= mkImplies blockE)
modify $ \st -> st { blockConstraints = blockE : blockConstraints st
, prevChecked = False }
exclusions <- gets optionalConstraints
excludeSym <- mkStringSymbol $ "exclude" ++ show cnt
excludeE <- mkConst excludeSym boolS
excluded <- mkAnd exclusions >>= mkImplies excludeE
finals <- gets finalConstraints
finalSym <- mkStringSymbol $ "final" ++ show cnt
finalE <- mkConst finalSym boolS
finaled <- mkAnd finals >>= mkImplies finalE
assert excluded
assert finaled
assert blocked
blocks <- gets blockConstraints
checkAssumptions (excludeE : finalE : blocks)
solveAndGetModel :: Encoder [Id]
solveAndGetModel = do
l <- gets loc
incremental <- gets incrementalSolving
res <- if incremental then incrementalSolve else nonincrementalSolve
case res of
Sat -> do
model <- solverGetModel
places <- gets (HashMap.keys . ty2tr)
-- evaluate what transitions are fired
selected <- mapM (checkLit model) [0..(l-1)]
placed <- mapM (uncurry $ checkPlace model) [(p, t) | p <- places
, t <- [0..l]]
blockTrs <- zipWithM blockTr [0..(l-1)] selected
blockAss <- mkAnd (placed ++ blockTrs) >>= mkNot
modify $ \s -> s { block = blockAss }
unless incremental solverReset
selectedNames <- getTrNames selected
when incremental $ do
cancelConstraints "exclude"
cancelConstraints "final"
return selectedNames
Unsat -> do
rets <- gets returnTyps
unless incremental solverReset
if length rets == 1
then do
-- liftIO $ print "unsat for increase length"
when incremental $ do
cancelConstraints "exclude"
cancelConstraints "final"
blocks <- gets blockConstraints
mapM_ (mkNot >=> assert) blocks
return []
else do
-- liftIO $ print "unsat for change goal"
-- try a more general return type
t2tr <- gets ty2tr
when incremental $ cancelConstraints "final"
modify $ \st -> st { finalConstraints = []
, returnTyps = tail rets
, prevChecked = False }
setFinalState (rets !! 1) (HashMap.keys t2tr)
solveAndGetModel
Undef -> return []
where
getTrNames selected = do
transMap <- gets id2transition
let transNames = map (\id -> findVariable "id2transition" (fromIntegral id) transMap) selected
return transNames
checkPlace model p t = do
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
maybeInt <- evalInt model pVar
case maybeInt of
Just i -> mkIntNum i >>= mkEq pVar
Nothing -> error $ "cannot eval the variable" ++ show (p, t)
checkLit model t = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
bMay <- evalInt model tsVar
case bMay of
Just b -> return b
Nothing -> error $ "cannot eval the variable" ++ show t
blockTr t tr = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
mkIntNum tr >>= mkEq tsVar
encoderInit :: Int
-> HashMap Id [Id]
-> [AbstractSkeleton]
-> [AbstractSkeleton]
-> [FunctionCode]
-> HashMap AbstractSkeleton (Set Id)
-> Bool
-> Bool
-> Bool
-> IO EncodeState
encoderInit len hoArgs inputs rets sigs t2tr incr rele noClone = do
z3Env <- initialZ3Env
false <- Z3.mkFalse (envContext z3Env)
let initialState = emptyEncodeState {
z3env = z3Env
, loc = len
, mustFirers = hoArgs
, ty2tr = t2tr
, incrementalSolving = incr
, returnTyps = rets
, useArguments = not rele
, disableClones = noClone
}
execStateT (createEncoder inputs (head rets) sigs) initialState
encoderSolve :: EncodeState -> IO ([Id], EncodeState)
encoderSolve = runStateT solveAndGetModel
-- optimize the optional constraints here:
-- we only need to change the must firers and noTransitionTokens and final states
encoderInc :: [FunctionCode] -> [AbstractSkeleton] -> [AbstractSkeleton] -> Encoder ()
encoderInc sigs inputs rets = do
modify $ \st -> st { loc = loc st + 1
, returnTyps = rets
, optionalConstraints = []
, blockConstraints = []
, finalConstraints = []
, prevChecked = False }
places <- gets (HashMap.keys . ty2tr)
transitions <- gets (Set.toList . Set.unions . HashMap.elems . ty2tr)
l <- gets loc
-- add new place, transition and timestamp variables
mapM_ (uncurry addPlaceVar) [(a, l) | a <- places]
addTimestampVar (l - 1)
let allTransitions = [(l - 1, tr) | tr <- sigs ]
-- all places have non-negative number of tokens
nonnegativeTokens places
-- disable transitions at the new timestamp
toRemove <- gets disabledTrans
disableTransitions toRemove (l-1)
-- refine the postcondition constraints
mapM_ (uncurry fireTransitions) allTransitions
-- save the current state and add changeable constraints
transitionRng
mapM_ (uncurry noTransitionTokens) [(t, p) | p <- places, t <- [0..(l-1)]]
-- refine the must firers
mustFireTransitions
-- set new initial and final state
setInitialState inputs places
setFinalState (head rets) places
encoderRefine :: SplitInfo -> HashMap Id [Id] -> [AbstractSkeleton] -> [AbstractSkeleton] -> [FunctionCode] -> HashMap AbstractSkeleton (Set Id) -> Encoder ()
encoderRefine info musters inputs rets newSigs t2tr = do
{- update the abstraction level -}
modify $ \st -> st { ty2tr = t2tr
, mustFirers = musters
, disabledTrans = disabledTrans st ++ removedTrans info
, returnTyps = rets
, optionalConstraints = []
, finalConstraints = []
}
{- operation on places -}
l <- gets loc
let newPlaceIds = newPlaces info
let newTransIds = newTrans info
let currPlaces = HashMap.keys t2tr
let newSigs = filter ( ( ` elem ` newTransIds ) . funName ) sigs
let allTrans = [(t, tr) | t <- [0..(l-1)], tr <- newSigs ]
-- add new place, transition and timestamp variables
mapM_ (uncurry addPlaceVar) [(a, i) | a <- newPlaceIds, i <- [0..l]]
addTransitionVar newTransIds
-- all places have non-negative number of tokens
nonnegativeTokens newPlaceIds
-- refine the postcondition constraints
mapM_ (uncurry fireTransitions) allTrans
-- disable splitted transitions
mapM_ (disableTransitions (removedTrans info)) [0..(l-1)]
transitionRng
mapM_ (uncurry noTransitionTokens) [(t, p) | p <- currPlaces, t <- [0..(l-1)]]
-- refine the must firers
mustFireTransitions
-- set new initial and final state
setInitialState inputs currPlaces
setFinalState (head rets) currPlaces
disableTransitions :: [Id] -> Int -> Encoder ()
disableTransitions trs t = mapM_ disableTrAt trs
where
disableTrAt tr = do
transMap <- gets transition2id
tsMap <- gets time2variable
let trVar = findVariable "transition2id" tr transMap
let tsVar = findVariable "time2variable" t tsMap
eq <- mkEq tsVar trVar >>= mkNot
modify $ \st -> st { persistConstraints = eq : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert eq
-- | add variables for each place
addPlaceVar :: AbstractSkeleton -> Int -> Encoder ()
addPlaceVar p t = do
st <- get
placeVar <- mkZ3IntVar $ variableNb st
let p2v = HashMap.insert (p, t) placeVar $ place2variable st
unless (HashMap.member (p, t) (place2variable st))
(put $ st { place2variable = p2v
, variableNb = variableNb st + 1
})
-- | add transition mapping from (tr, lv) to integer id
-- an integer variable for each transition
addTransitionVar :: [Id] -> Encoder ()
addTransitionVar = mapM_ addTransitionVarFor
where
addTransitionVarFor tr = do
st <- get
let tid = transitionNb st
trVar <- mkIntNum tid
unless (HashMap.member tr (transition2id st))
(put $ st { transitionNb = 1 + transitionNb st
, transition2id = HashMap.insert tr trVar $ transition2id st
, id2transition = HashMap.insert tid tr $ id2transition st
})
addTimestampVar :: Int -> Encoder ()
addTimestampVar t = do
st <- get
tsVar <- mkZ3IntVar $ variableNb st
unless (HashMap.member t (time2variable st))
(put $ st { time2variable = HashMap.insert t tsVar $ time2variable st
, variableNb = variableNb st + 1
})
-- | map each place and transition to a variable in z3
createVariables :: [AbstractSkeleton] -> [Id] -> Encoder ()
createVariables places transitions = do
l <- gets loc
-- add place variables
mapM_ (uncurry addPlaceVar) [(a, i) | a <- places, i <- [0..l]]
-- add transition mapping
addTransitionVar transitions
-- add timestamp variables
mapM_ addTimestampVar [0..(l-1)]
createConstraints :: [AbstractSkeleton] -> [FunctionCode] -> Encoder ()
createConstraints places transitions = do
-- prepare constraint parameters
-- liftIO $ print places
l <- gets loc
let allTrans = [(t, tr) | t <- [0..(l-1)], tr <- transitions]
let allPlaces = [(t, p) | t <- [0..(l-1)], p <- places]
nonnegativeTokens places
mapM_ (uncurry fireTransitions) allTrans
transitionRng
mapM_ (uncurry noTransitionTokens) allPlaces
mustFireTransitions
mkZ3IntVar :: Int -> Encoder AST
mkZ3IntVar var = do
varSymbol <- mkIntSymbol var
intS <- mkIntSort
mkConst varSymbol intS
findVariable :: (Eq k, Hashable k, Show k) => String -> k -> HashMap k v -> v
findVariable blame k m = fromMaybe (error $ "cannot find in " ++ blame ++ " variable for " ++ show k)
(HashMap.lookup k m)
nonnegativeTokens :: [AbstractSkeleton] -> Encoder ()
nonnegativeTokens places = do
l <- gets loc
mapM_ (uncurry nonnegAt) [(p, t) | p <- places, t <- [0..l]]
where
nonnegAt p t = do
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
zero <- mkIntNum 0
geZero <- mkGe pVar zero
modify $ \st -> st { persistConstraints = geZero : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert geZero
| at each timestamp , only one transition can be fired , we restrict the
-- fired transition id range here
transitionRng :: Encoder ()
transitionRng = do
l <- gets loc
mapM_ fireAt [0..(l-1)]
where
fireAt t = do
tsMap <- gets time2variable
transMax <- gets transitionNb
let tsVar = findVariable "time2variable" t tsMap
start <- mkIntNum 0
geStart <- mkGe tsVar start
end <- mkIntNum transMax
ltEnd <- mkLt tsVar end
modify $ \st -> st { optionalConstraints = ltEnd : geStart : optionalConstraints st }
-- | if this place has no connected transition fired,
it has the same # of tokens
noTransitionTokens :: Int -> AbstractSkeleton -> Encoder ()
noTransitionTokens t p = do
trans <- gets transition2id
t2tr <- gets ty2tr
let transSet = Set.toList $ HashMap.lookupDefault Set.empty p t2tr
let transitions = map (\x -> findVariable "transition2id" x trans) transSet
noFireLvs <- noFireAt transitions t
noFire <- mkOr noFireLvs >>= mkNot
placeMap <- gets place2variable
let curr = findVariable "placemap" (p, t) placeMap
let next = findVariable "placemap" (p, t + 1) placeMap
tokenSame <- mkEq curr next
noChange <- mkImplies noFire tokenSame
modify $ \st -> st { optionalConstraints = noChange : optionalConstraints st }
where
noFireAt transitions t = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
mapM (mkEq tsVar) transitions
fireTransitions :: Int -> FunctionCode -> Encoder ()
fireTransitions t (FunctionCode name [] params rets) = do
transMap <- gets transition2id
placeMap <- gets place2variable
tsMap <- gets time2variable
-- accumulate counting for parameters and return types
let pcnt = map (\l -> (head l, length l)) (group (sort params))
let pmap = HashMap.fromList pcnt
let rmap = foldl' (\acc t -> HashMap.insertWith (+) t (-1) acc) pmap rets
let rcnt = HashMap.toList rmap
changes <- mapM (mkChange t) rcnt
let tsVar = findVariable "time2variable" t tsMap
let trVar = findVariable "transition2id" name transMap
fire <- mkEq tsVar trVar
enoughTokens <- mapM getSatisfiedPlace pcnt
postCond <- mkAnd (enoughTokens ++ changes)
tokenChange <- mkImplies fire postCond
modify $ \st -> st { persistConstraints = tokenChange : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert tokenChange
where
mkChange t (p, diff) = do
let d = -diff
placeMap <- gets place2variable
let before = findVariable "placemap" (p, t) placeMap
let after = findVariable "placemap" (p, t + 1) placeMap
diffw <- mkIntNum d
mkAdd [before, diffw] >>= mkEq after
getSatisfiedPlace (p, cnt) = do
w <- mkIntNum cnt
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
mkGe pVar w
fireTransitions t fc = error $ "unhandled " ++ show fc
mustFireTransitions :: Encoder ()
mustFireTransitions = do
must <- gets mustFirers
mapM_ fireTransitionFor (HashMap.toList must)
where
nameInMust must name = foldr ((||) . flip isInfixOf name) False must
fireTransition tid = do
l <- gets loc
tsMap <- gets time2variable
let tsVars = map (\t -> findVariable "time2variable" t tsMap) [0..(l-1)]
mapM (mkEq tid) tsVars
fireTransitionFor (_, tids) = do
transitions <- gets transition2id
let mustTrans = HashMap.filterWithKey (\k _ -> nameInMust tids k) transitions
fires <- mapM fireTransition mustTrans
toFire <- mkOr (concat fires)
modify $ \st -> st { optionalConstraints = toFire : optionalConstraints st }
| null | https://raw.githubusercontent.com/TyGuS/hoogle_plus/d02a1466d98f872e78ddb2fb612cb67d4bd0ca18/src/PetriNet/PNEncoder.hs | haskell | | create a new encoder in z3
create all the type variables for encoding
add all the constraints for the solver
set initial and final state for solver
| set the initial state for the solver, where we have tokens only in void or inputs
and maybe several tokens in the "void" place
str <- solverToString
liftIO $ putStrLn str
evaluate what transitions are fired
liftIO $ print "unsat for increase length"
liftIO $ print "unsat for change goal"
try a more general return type
optimize the optional constraints here:
we only need to change the must firers and noTransitionTokens and final states
add new place, transition and timestamp variables
all places have non-negative number of tokens
disable transitions at the new timestamp
refine the postcondition constraints
save the current state and add changeable constraints
refine the must firers
set new initial and final state
update the abstraction level
operation on places
add new place, transition and timestamp variables
all places have non-negative number of tokens
refine the postcondition constraints
disable splitted transitions
refine the must firers
set new initial and final state
| add variables for each place
| add transition mapping from (tr, lv) to integer id
an integer variable for each transition
| map each place and transition to a variable in z3
add place variables
add transition mapping
add timestamp variables
prepare constraint parameters
liftIO $ print places
fired transition id range here
| if this place has no connected transition fired,
accumulate counting for parameters and return types | # LANGUAGE FlexibleInstances #
# LANGUAGE FlexibleContexts #
# LANGUAGE TupleSections #
module PetriNet.PNEncoder(
encoderInit
, encoderSolve
, encoderRefine
, encoderInc
) where
import Data.Maybe
import Data.List
import Data.List.Extra
import Data.Hashable
import Data.HashMap.Strict (HashMap)
import qualified Data.HashMap.Strict as HashMap
import Data.Set (Set)
import qualified Data.Set as Set
import Z3.Monad hiding(Z3Env, newEnv)
import qualified Z3.Base as Z3
import Control.Monad.State
import System.CPUTime
import Text.Printf
import Data.Text (pack, unpack, replace)
import System.IO
import System.Process
import Types.Common
import Types.Encoder
import Types.Abstract
import PetriNet.AbstractType
import PetriNet.Util
import Synquid.Util
import Synquid.Pretty
instance MonadZ3 Encoder where
getSolver = gets (envSolver . z3env)
getContext = gets (envContext . z3env)
createEncoder :: [AbstractSkeleton] -> AbstractSkeleton -> [FunctionCode] -> Encoder ()
createEncoder inputs ret sigs = do
places <- gets (HashMap.keys . ty2tr)
transIds <- gets (Set.toList . Set.unions . HashMap.elems . ty2tr)
createVariables places transIds
createConstraints places sigs
setInitialState inputs places
setFinalState ret places
the tokens in the other places should be zero
setInitialState :: [AbstractSkeleton] -> [AbstractSkeleton] -> Encoder ()
setInitialState inputs places = do
let nonInputs = filter (`notElem` inputs) places
let inputCounts = map (\t -> (head t, length t)) (group (sort inputs))
let nonInputCounts = map (, 0) nonInputs
mapM_ (uncurry assignToken) nonInputCounts
enforceArg <- gets useArguments
when enforceArg $ mapM_ (uncurry assignInput) inputCounts
where
assignToken p v = do
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, 0) placeMap
eq <- mkIntNum v >>= mkEq tVar
modify $ \st -> st { optionalConstraints = eq : optionalConstraints st }
assignInput p v = do
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, 0) placeMap
noClone <- gets disableClones
ge <- mkIntNum v >>= if noClone then mkGe tVar else mkEq tVar
modify $ \st -> st { optionalConstraints = ge : optionalConstraints st }
| set the final solver state , we allow only one token in the return type
setFinalState :: AbstractSkeleton -> [AbstractSkeleton] -> Encoder ()
setFinalState ret places = do
the return value should have only one token
includeRet
other places excluding void and ret should have nothing
let nonOutputs = filter (ret /=) places
mapM_ excludeOther nonOutputs
where
includeRet = do
placeMap <- gets place2variable
l <- gets loc
let retVar = findVariable "place2variable" (ret, l) placeMap
assrt <- mkIntNum 1 >>= mkEq retVar
modify $ \st -> st { finalConstraints = assrt : finalConstraints st }
excludeOther p = do
l <- gets loc
placeMap <- gets place2variable
let tVar = findVariable "place2variable" (p, l) placeMap
eq <- mkIntNum 0 >>= mkEq tVar
modify $ \st -> st { finalConstraints = eq : finalConstraints st }
getParam :: Encoder (Int, Z3.Sort)
getParam = do
cnt <- gets counter
boolS <- mkBoolSort
return (cnt, boolS)
cancelConstraints :: String -> Encoder ()
cancelConstraints name = do
(cnt, boolS) <- getParam
cancelSym <- mkStringSymbol $ name ++ show cnt
cancelExp <- mkConst cancelSym boolS >>= mkNot
assert cancelExp
addAllConstraints :: Encoder ()
addAllConstraints = do
pcons <- gets persistConstraints
ocons <- gets optionalConstraints
fcons <- gets finalConstraints
bcons <- gets blockConstraints
mapM_ assert pcons
mapM_ assert ocons
mapM_ assert fcons
mapM_ assert bcons
nonincrementalSolve :: Encoder Z3.Result
nonincrementalSolve = do
prev <- gets prevChecked
when prev $ do
toBlock <- gets block
modify $ \st -> st { blockConstraints = toBlock : blockConstraints st
, prevChecked = False }
addAllConstraints
liftIO $ writeFile " constraint.z3 " str
check
incrementalSolve :: Encoder Z3.Result
incrementalSolve = do
modify $ \st -> st { counter = counter st + 1 }
prev <- gets prevChecked
(cnt, boolS) <- getParam
blockSym <- mkStringSymbol $ "block" ++ show cnt
blockE <- mkConst blockSym boolS
blocked <- ifM (gets prevChecked)
(gets block >>= mkImplies blockE)
(mkTrue >>= mkImplies blockE)
modify $ \st -> st { blockConstraints = blockE : blockConstraints st
, prevChecked = False }
exclusions <- gets optionalConstraints
excludeSym <- mkStringSymbol $ "exclude" ++ show cnt
excludeE <- mkConst excludeSym boolS
excluded <- mkAnd exclusions >>= mkImplies excludeE
finals <- gets finalConstraints
finalSym <- mkStringSymbol $ "final" ++ show cnt
finalE <- mkConst finalSym boolS
finaled <- mkAnd finals >>= mkImplies finalE
assert excluded
assert finaled
assert blocked
blocks <- gets blockConstraints
checkAssumptions (excludeE : finalE : blocks)
solveAndGetModel :: Encoder [Id]
solveAndGetModel = do
l <- gets loc
incremental <- gets incrementalSolving
res <- if incremental then incrementalSolve else nonincrementalSolve
case res of
Sat -> do
model <- solverGetModel
places <- gets (HashMap.keys . ty2tr)
selected <- mapM (checkLit model) [0..(l-1)]
placed <- mapM (uncurry $ checkPlace model) [(p, t) | p <- places
, t <- [0..l]]
blockTrs <- zipWithM blockTr [0..(l-1)] selected
blockAss <- mkAnd (placed ++ blockTrs) >>= mkNot
modify $ \s -> s { block = blockAss }
unless incremental solverReset
selectedNames <- getTrNames selected
when incremental $ do
cancelConstraints "exclude"
cancelConstraints "final"
return selectedNames
Unsat -> do
rets <- gets returnTyps
unless incremental solverReset
if length rets == 1
then do
when incremental $ do
cancelConstraints "exclude"
cancelConstraints "final"
blocks <- gets blockConstraints
mapM_ (mkNot >=> assert) blocks
return []
else do
t2tr <- gets ty2tr
when incremental $ cancelConstraints "final"
modify $ \st -> st { finalConstraints = []
, returnTyps = tail rets
, prevChecked = False }
setFinalState (rets !! 1) (HashMap.keys t2tr)
solveAndGetModel
Undef -> return []
where
getTrNames selected = do
transMap <- gets id2transition
let transNames = map (\id -> findVariable "id2transition" (fromIntegral id) transMap) selected
return transNames
checkPlace model p t = do
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
maybeInt <- evalInt model pVar
case maybeInt of
Just i -> mkIntNum i >>= mkEq pVar
Nothing -> error $ "cannot eval the variable" ++ show (p, t)
checkLit model t = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
bMay <- evalInt model tsVar
case bMay of
Just b -> return b
Nothing -> error $ "cannot eval the variable" ++ show t
blockTr t tr = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
mkIntNum tr >>= mkEq tsVar
encoderInit :: Int
-> HashMap Id [Id]
-> [AbstractSkeleton]
-> [AbstractSkeleton]
-> [FunctionCode]
-> HashMap AbstractSkeleton (Set Id)
-> Bool
-> Bool
-> Bool
-> IO EncodeState
encoderInit len hoArgs inputs rets sigs t2tr incr rele noClone = do
z3Env <- initialZ3Env
false <- Z3.mkFalse (envContext z3Env)
let initialState = emptyEncodeState {
z3env = z3Env
, loc = len
, mustFirers = hoArgs
, ty2tr = t2tr
, incrementalSolving = incr
, returnTyps = rets
, useArguments = not rele
, disableClones = noClone
}
execStateT (createEncoder inputs (head rets) sigs) initialState
encoderSolve :: EncodeState -> IO ([Id], EncodeState)
encoderSolve = runStateT solveAndGetModel
encoderInc :: [FunctionCode] -> [AbstractSkeleton] -> [AbstractSkeleton] -> Encoder ()
encoderInc sigs inputs rets = do
modify $ \st -> st { loc = loc st + 1
, returnTyps = rets
, optionalConstraints = []
, blockConstraints = []
, finalConstraints = []
, prevChecked = False }
places <- gets (HashMap.keys . ty2tr)
transitions <- gets (Set.toList . Set.unions . HashMap.elems . ty2tr)
l <- gets loc
mapM_ (uncurry addPlaceVar) [(a, l) | a <- places]
addTimestampVar (l - 1)
let allTransitions = [(l - 1, tr) | tr <- sigs ]
nonnegativeTokens places
toRemove <- gets disabledTrans
disableTransitions toRemove (l-1)
mapM_ (uncurry fireTransitions) allTransitions
transitionRng
mapM_ (uncurry noTransitionTokens) [(t, p) | p <- places, t <- [0..(l-1)]]
mustFireTransitions
setInitialState inputs places
setFinalState (head rets) places
encoderRefine :: SplitInfo -> HashMap Id [Id] -> [AbstractSkeleton] -> [AbstractSkeleton] -> [FunctionCode] -> HashMap AbstractSkeleton (Set Id) -> Encoder ()
encoderRefine info musters inputs rets newSigs t2tr = do
modify $ \st -> st { ty2tr = t2tr
, mustFirers = musters
, disabledTrans = disabledTrans st ++ removedTrans info
, returnTyps = rets
, optionalConstraints = []
, finalConstraints = []
}
l <- gets loc
let newPlaceIds = newPlaces info
let newTransIds = newTrans info
let currPlaces = HashMap.keys t2tr
let newSigs = filter ( ( ` elem ` newTransIds ) . funName ) sigs
let allTrans = [(t, tr) | t <- [0..(l-1)], tr <- newSigs ]
mapM_ (uncurry addPlaceVar) [(a, i) | a <- newPlaceIds, i <- [0..l]]
addTransitionVar newTransIds
nonnegativeTokens newPlaceIds
mapM_ (uncurry fireTransitions) allTrans
mapM_ (disableTransitions (removedTrans info)) [0..(l-1)]
transitionRng
mapM_ (uncurry noTransitionTokens) [(t, p) | p <- currPlaces, t <- [0..(l-1)]]
mustFireTransitions
setInitialState inputs currPlaces
setFinalState (head rets) currPlaces
disableTransitions :: [Id] -> Int -> Encoder ()
disableTransitions trs t = mapM_ disableTrAt trs
where
disableTrAt tr = do
transMap <- gets transition2id
tsMap <- gets time2variable
let trVar = findVariable "transition2id" tr transMap
let tsVar = findVariable "time2variable" t tsMap
eq <- mkEq tsVar trVar >>= mkNot
modify $ \st -> st { persistConstraints = eq : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert eq
addPlaceVar :: AbstractSkeleton -> Int -> Encoder ()
addPlaceVar p t = do
st <- get
placeVar <- mkZ3IntVar $ variableNb st
let p2v = HashMap.insert (p, t) placeVar $ place2variable st
unless (HashMap.member (p, t) (place2variable st))
(put $ st { place2variable = p2v
, variableNb = variableNb st + 1
})
addTransitionVar :: [Id] -> Encoder ()
addTransitionVar = mapM_ addTransitionVarFor
where
addTransitionVarFor tr = do
st <- get
let tid = transitionNb st
trVar <- mkIntNum tid
unless (HashMap.member tr (transition2id st))
(put $ st { transitionNb = 1 + transitionNb st
, transition2id = HashMap.insert tr trVar $ transition2id st
, id2transition = HashMap.insert tid tr $ id2transition st
})
addTimestampVar :: Int -> Encoder ()
addTimestampVar t = do
st <- get
tsVar <- mkZ3IntVar $ variableNb st
unless (HashMap.member t (time2variable st))
(put $ st { time2variable = HashMap.insert t tsVar $ time2variable st
, variableNb = variableNb st + 1
})
createVariables :: [AbstractSkeleton] -> [Id] -> Encoder ()
createVariables places transitions = do
l <- gets loc
mapM_ (uncurry addPlaceVar) [(a, i) | a <- places, i <- [0..l]]
addTransitionVar transitions
mapM_ addTimestampVar [0..(l-1)]
createConstraints :: [AbstractSkeleton] -> [FunctionCode] -> Encoder ()
createConstraints places transitions = do
l <- gets loc
let allTrans = [(t, tr) | t <- [0..(l-1)], tr <- transitions]
let allPlaces = [(t, p) | t <- [0..(l-1)], p <- places]
nonnegativeTokens places
mapM_ (uncurry fireTransitions) allTrans
transitionRng
mapM_ (uncurry noTransitionTokens) allPlaces
mustFireTransitions
mkZ3IntVar :: Int -> Encoder AST
mkZ3IntVar var = do
varSymbol <- mkIntSymbol var
intS <- mkIntSort
mkConst varSymbol intS
findVariable :: (Eq k, Hashable k, Show k) => String -> k -> HashMap k v -> v
findVariable blame k m = fromMaybe (error $ "cannot find in " ++ blame ++ " variable for " ++ show k)
(HashMap.lookup k m)
nonnegativeTokens :: [AbstractSkeleton] -> Encoder ()
nonnegativeTokens places = do
l <- gets loc
mapM_ (uncurry nonnegAt) [(p, t) | p <- places, t <- [0..l]]
where
nonnegAt p t = do
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
zero <- mkIntNum 0
geZero <- mkGe pVar zero
modify $ \st -> st { persistConstraints = geZero : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert geZero
| at each timestamp , only one transition can be fired , we restrict the
transitionRng :: Encoder ()
transitionRng = do
l <- gets loc
mapM_ fireAt [0..(l-1)]
where
fireAt t = do
tsMap <- gets time2variable
transMax <- gets transitionNb
let tsVar = findVariable "time2variable" t tsMap
start <- mkIntNum 0
geStart <- mkGe tsVar start
end <- mkIntNum transMax
ltEnd <- mkLt tsVar end
modify $ \st -> st { optionalConstraints = ltEnd : geStart : optionalConstraints st }
it has the same # of tokens
noTransitionTokens :: Int -> AbstractSkeleton -> Encoder ()
noTransitionTokens t p = do
trans <- gets transition2id
t2tr <- gets ty2tr
let transSet = Set.toList $ HashMap.lookupDefault Set.empty p t2tr
let transitions = map (\x -> findVariable "transition2id" x trans) transSet
noFireLvs <- noFireAt transitions t
noFire <- mkOr noFireLvs >>= mkNot
placeMap <- gets place2variable
let curr = findVariable "placemap" (p, t) placeMap
let next = findVariable "placemap" (p, t + 1) placeMap
tokenSame <- mkEq curr next
noChange <- mkImplies noFire tokenSame
modify $ \st -> st { optionalConstraints = noChange : optionalConstraints st }
where
noFireAt transitions t = do
tsMap <- gets time2variable
let tsVar = findVariable "time2variable" t tsMap
mapM (mkEq tsVar) transitions
fireTransitions :: Int -> FunctionCode -> Encoder ()
fireTransitions t (FunctionCode name [] params rets) = do
transMap <- gets transition2id
placeMap <- gets place2variable
tsMap <- gets time2variable
let pcnt = map (\l -> (head l, length l)) (group (sort params))
let pmap = HashMap.fromList pcnt
let rmap = foldl' (\acc t -> HashMap.insertWith (+) t (-1) acc) pmap rets
let rcnt = HashMap.toList rmap
changes <- mapM (mkChange t) rcnt
let tsVar = findVariable "time2variable" t tsMap
let trVar = findVariable "transition2id" name transMap
fire <- mkEq tsVar trVar
enoughTokens <- mapM getSatisfiedPlace pcnt
postCond <- mkAnd (enoughTokens ++ changes)
tokenChange <- mkImplies fire postCond
modify $ \st -> st { persistConstraints = tokenChange : persistConstraints st }
incremental <- gets incrementalSolving
when incremental $ assert tokenChange
where
mkChange t (p, diff) = do
let d = -diff
placeMap <- gets place2variable
let before = findVariable "placemap" (p, t) placeMap
let after = findVariable "placemap" (p, t + 1) placeMap
diffw <- mkIntNum d
mkAdd [before, diffw] >>= mkEq after
getSatisfiedPlace (p, cnt) = do
w <- mkIntNum cnt
placeMap <- gets place2variable
let pVar = findVariable "placemap" (p, t) placeMap
mkGe pVar w
fireTransitions t fc = error $ "unhandled " ++ show fc
mustFireTransitions :: Encoder ()
mustFireTransitions = do
must <- gets mustFirers
mapM_ fireTransitionFor (HashMap.toList must)
where
nameInMust must name = foldr ((||) . flip isInfixOf name) False must
fireTransition tid = do
l <- gets loc
tsMap <- gets time2variable
let tsVars = map (\t -> findVariable "time2variable" t tsMap) [0..(l-1)]
mapM (mkEq tid) tsVars
fireTransitionFor (_, tids) = do
transitions <- gets transition2id
let mustTrans = HashMap.filterWithKey (\k _ -> nameInMust tids k) transitions
fires <- mapM fireTransition mustTrans
toFire <- mkOr (concat fires)
modify $ \st -> st { optionalConstraints = toFire : optionalConstraints st }
|
5f7aace38f61349b3ea1b8aabd04d8aefa18937ea3a4c948029a0df43e9f6440 | primeteach/specomatic-db | core.clj | (ns specomatic-db.core
"The main namespace for consumers of specomatic-db. Contains functions for initialisation, retrieving and persisting entities."
(:require
[clojure.spec.alpha :as s]
[clojure.tools.logging :as log]
[nedap.speced.def :as sd]
[next.jdbc :as jdbc]
[seql.query :as sq]
[specomatic-db.access-control :as ac]
[specomatic-db.core.impl :as impl]
[specomatic-db.db.conversion :as cnv]
[specomatic-db.db.firebird.conversion]
[specomatic-db.db.firebird.migration]
[specomatic-db.db.firebird.mutation]
[specomatic-db.db.generic :as db-generic]
[specomatic-db.db.migration :as migration]
[specomatic-db.db.mutation :as mutation]
[specomatic-db.db.postgres.migration]
[specomatic-db.db.postgres.mutation]
[specomatic-db.db.sql :as sql]
[specomatic-db.field-def :as sdf]
[specomatic-db.seql :as seql]
[specomatic-db.spec :as sp]
[specomatic.core :as sc]
[specomatic.field-def :as sf]
[specomatic.util :as su]))
(sd/defn init!
"Given the environment `env`, does all necessary initialization.
To skip automatic database schema migration, pass `{:skip-migration? true}` as a second argument.
Currently validates the schema, initializes transaction infrastructure, ensures access control views exist and registers coercion functions."
([^::sp/env env]
(init! env {}))
([^::sp/env env {:keys [:skip-migration?]}]
(let [{:keys [config jdbc]} env
{:keys [schema]} config]
(log/info "Validating the schema...")
(when-not (s/valid? ::sp/schema
schema)
(throw (let [expl (s/explain-str ::sp/schema
schema)]
(ex-info (str "Invalid schema: " expl)
{:schema schema
:explain expl}))))
(log/info "Ensuring transaction infrastructure exists...")
(migration/ensure-transaction-infrastructure! jdbc)
(log/info "Clearing system transaction ids...")
(migration/clear-transaction-system-txid! jdbc)
(when-not skip-migration?
(migration/update-schema! jdbc (:schema config)))
(log/info "Initializing access control views...")
(ac/ensure-views-exist! jdbc config)
(log/info "Initializing transformation rules...")
(seql/set-transform! schema)
(log/info "Initialization complete."))))
(sd/defn default-fields
"Given `schema` and entity type `etype`, returns a seql vector of default fields."
^::sq/seql-query [^::sp/schema schema ^::sp/etype etype]
(vec
(for [[field field-def] (sc/field-defs schema etype)]
(if (sf/relational? field-def)
(let [target (sf/target field-def)]
{field (into [(sc/id-field schema target)]
(map (partial su/qualify target) (sc/display-name-fields schema target)))})
field))))
(sd/defn entity-history
"Retrieves the full history of an entity."
^::sp/query-result [^::sp/env env ^::sp/etype etype id ^::sq/seql-query fields]
(impl/entity-history env etype id fields))
(sd/defn query
"Given the environment `env`, retrieves the seql `fields` from the `etype` entities matching the HoneySQL `conditions`.
Optionally, the root entity may contain verbs like :verb/read, :verb/update as fields in addition to seql fields.
These contain a boolean indicating whether or not the user (given by [:user :id] in `env`) is allowed to do what the verb describes with the specific entity.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^::sp/query-result
([^::sp/env env ^::sp/etype etype]
(query env etype nil nil))
([^::sp/env env ^::sp/etype etype ^::sp/nilable-query fields]
(query env etype fields nil))
([^::sp/env env ^::sp/etype etype ^::sp/nilable-query fields ^::sp/conditions conditions]
(let [schema (get-in env [:config :schema])
my-fields (or fields
(default-fields schema etype))]
(ac/check-query-arguments env my-fields conditions)
(impl/execute-query env
etype
my-fields
(ac/concatenate-extra-conditions env my-fields conditions)))))
(sd/defn by-id
"Retrieves an entity by id. Returns nil if not found."
(^::sd/nilable ^map? [^::sp/env env ^::sp/etype etype id ^::sq/seql-query fields]
(-> (query env etype fields [[:= (sc/id-field (get-in env [:config :schema]) etype) id]])
first))
(^::sd/nilable ^map? [^::sp/env env ^::sp/etype etype id]
(by-id env etype id (default-fields (get-in env [:config :schema]) etype))))
(defmulti save-related!
"Saves the changeset `value` for related entities contained in a relational `field` of a entity to the database.
`opts` is a map of outer-etype and outer-id.
If `:specomatic.core/delete` is true in the related entity, it is deleted.
Otherwise, if the id field of the related entity is not nil, it is updated, if the id field is not present or nil, it is created.
In the case of a reference collection of a has-many-through type, these mutations are applied to the join table, not the actual related entity."
(fn [_env _field field-def _value _opts] [(sf/kind field-def) (sf/reference-type field-def)]))
(defn- extract-reference-id
[my-ref id-field]
(if (map? my-ref)
(id-field my-ref)
my-ref))
(defn- extract-reference-ids
[schema etype entity]
(merge entity
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:when (and (sf/relational? field-def)
(sdf/owns-relation? field-def)
(nil? (sdf/join-table field-def))
(field entity))]
[field
(let [target-id-field (sc/id-field schema (sf/target field-def))]
(-> entity
field
(extract-reference-id target-id-field)))]))))
(defn- create*
[{:keys [config jdbc user]
:as env}
etype
entity]
(let [{:keys [schema]} config
my-entity (extract-reference-ids schema etype entity)
result (mutation/insert! jdbc schema etype my-entity)
my-entity-id-value (:id result)
transaction-id (:tx/id result)
entity-id (sc/id-field schema etype)
ret (merge entity
{entity-id my-entity-id-value
:tx/id transaction-id}
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:let [v (field my-entity)]
:when (and v (sdf/save-related? field-def))]
[field
(save-related! env
field
field-def
v
{:outer-etype etype
:outer-id my-entity-id-value})])))]
(when-not (or
(:root? user)
(ac/allowed-all? user :verb/create etype)
(:verb/create (by-id env etype my-entity-id-value [entity-id :verb/create])))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
ret))
(sd/defn create!
"Given the environment `env`, creates the `entity` of type `etype` in the database.
Returns the given `entity` containing the new id and transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [{:keys [jdbc user]} env]
(when-not (s/valid? etype
entity)
(throw (let [expl (s/explain-str etype
entity)]
(ex-info (str "Invalid entity: " expl)
{:etype etype
:entity entity
:explain expl}))))
(when-not (or (:root? user)
(ac/allowed-some? user :verb/create etype))
;; We are not allowed to create any of the `etype` entities
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(jdbc/with-transaction
[trans jdbc]
(create* (assoc env :jdbc trans) etype entity))))
(defn- update*
[{:keys [jdbc user]
:as env} etype entity id]
(when-not (or (:root? user)
(ac/allowed-some? user :verb/update etype))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(let [schema (get-in env [:config :schema])
my-entity (extract-reference-ids schema etype entity)
entity-id (sc/id-field schema etype)
cond-snippet (ac/conditions-snippet schema
[etype]
(get-in env [:user :permissions])
:verb/update
{:user-id (:id user)
:user-etype (get-in env [:config :user-etype])})
result (mutation/update! jdbc
schema
etype
my-entity
cond-snippet)]
(if-let [tx-id (:tx/id result)]
;; Happy path
(merge entity
{entity-id id
:tx/id tx-id}
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:let [v (field my-entity)]
:when (and v (sdf/save-related? field-def))]
[field
(save-related! env
field
field-def
v
{:outer-etype etype
:outer-id id})])))
;; The statement didn't return anything, let's check why
(when (and cond-snippet (by-id env etype id [entity-id]))
;; We are only allowed to update some of the `etype` entities
;; AND we are allowed to read the entity, so it must be due to permissions
(throw (ex-info "Permission denied"
{:etype etype
:entity entity}))))))
(sd/defn update!
"Given the environment `env`, updates the `entity` of type `etype` in the database.
Returns the given `entity` containing the transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [{:keys [jdbc user]} env
entity-id (sc/id-field (get-in env [:config :schema]) etype)]
(when-not (s/valid? (s/keys)
entity)
(throw (let [expl (s/explain-str etype
entity)]
(ex-info (str "Invalid changeset: " expl)
{:etype etype
:entity entity
:explain (s/explain-str (s/keys)
entity)}))))
(when-not (or (:root? user)
(ac/allowed-some? user :verb/update etype))
;; We are not allowed to update any of the `etype` entities
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(jdbc/with-transaction
[trans jdbc]
(update* (assoc env :jdbc trans) etype entity (entity-id entity)))))
(sd/defn save!
"Given the environment `env`, saves the `entity` of type `etype` into the database.
Tries to create the entity if its id field is nonexistent or nil.
Tries to update the entity if it has an id.
Returns the given `entity` containing the new id (if created) and transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [entity-id (sc/id-field (get-in env [:config :schema]) etype)
entity-id-value (entity-id entity)
new? (nil? entity-id-value)]
(if new?
(create! env etype entity)
(update! env etype entity))))
(defn- delete*
[{:keys [config jdbc user]
:as env} etype id]
(let [{:keys [schema user-etype]} config
cond-snippet (ac/conditions-snippet schema
[etype]
(:permissions user)
:verb/delete
{:user-id (:id user)
:user-etype user-etype})
result (mutation/delete! jdbc
schema
etype
id
cond-snippet)]
(if (:tx/id result)
;; Happy path
(dissoc result :tx/ts)
;; The statement didn't return anything, let's check why
(when (and cond-snippet (by-id env etype id [(sc/id-field (get-in env [:config :schema]) etype)]))
;; We are only allowed to delete some of the `etype` entities
;; AND we are allowed to read the entity, so it must be due to permissions
;; We are allowed to read the entity, so it must be due to permissions
(throw (ex-info "Permission denied"
{:etype etype
:id id}))))))
(sd/defn delete!
"Given the environment `env`, delete the `entity` of type `etype`.
Returns a map of id, :tx/id, nil if not found (might be due to permissions).
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
[^::sp/env env ^::sp/etype etype id]
(let [{:keys [jdbc user]} env]
(when-not (or (:root? user)
(ac/allowed-some? user :verb/delete etype))
;; We are not allowed to delete any of the `etype` entities
(throw (ex-info "Permission denied"
{:etype etype
:id id})))
(jdbc/with-transaction
[trans jdbc]
(delete* (assoc env :jdbc trans) etype id))))
(defmethod save-related! :default
[_env _field _field-def _value _opts]
nil)
(defmethod save-related! [::sf/reference :has-one]
[env _field field-def value {:keys [outer-id]}]
(let [target (sf/target field-def)
target-id (sc/id-field (get-in env [:config :schema]) target)
via (sdf/db-via field-def)
target-id-value (target-id value)]
(if (::delete value)
(when target-id-value
(delete* env target target-id-value))
(if target-id-value
(merge value (update* env target value target-id-value))
(merge value (create* env target (assoc value via outer-id)))))))
(defmethod save-related! [::sf/reference-coll :has-many]
[env _field field-def value {:keys [outer-id]}]
(let [target (sf/target field-def)
target-id (sc/id-field (get-in env [:config :schema]) target)
via (sdf/db-via field-def)]
(->>
(for [entity value
:let [target-id-value (target-id entity)]]
(if (::delete entity)
(when target-id-value
(delete* env target target-id-value))
(if target-id-value
(merge entity (update* env target entity target-id-value))
(merge entity (create* env target (assoc entity via outer-id))))))
(filterv some?))))
(defmethod save-related! [::sf/reference-coll :has-many-through]
[{:keys [jdbc]} _field field-def value {:keys [outer-id]}]
(let [join-table (sdf/join-table field-def)
[_ etype-fk target-fk target-id] (sdf/db-via field-def)]
(->>
(for [my-ref value
:let [target-id-value (if (map? my-ref)
(target-id my-ref)
my-ref)]
:when target-id-value]
(let [table (cnv/etype->table-name jdbc join-table nil)
entity-fk-column (cnv/field->column-name jdbc etype-fk nil)
target-fk-column (cnv/field->column-name jdbc target-fk nil)]
(if (::delete my-ref)
(do (db-generic/delete-reference-coll-element!
jdbc
{:table table
:entity-id outer-id
:entity-idfield entity-fk-column
:target-id target-id-value
:target-idfield target-fk-column})
nil)
(do (sql/upsert-reference-coll-element! jdbc
{:table table
:entity-id outer-id
:entity-idfield entity-fk-column
:target-id target-id-value
:target-idfield target-fk-column})
my-ref))))
(filterv some?))))
| null | https://raw.githubusercontent.com/primeteach/specomatic-db/52dd0b08a9e928be6aea7a59f90fe35e4f1b59e8/src/specomatic_db/core.clj | clojure | We are not allowed to create any of the `etype` entities
Happy path
The statement didn't return anything, let's check why
We are only allowed to update some of the `etype` entities
AND we are allowed to read the entity, so it must be due to permissions
We are not allowed to update any of the `etype` entities
Happy path
The statement didn't return anything, let's check why
We are only allowed to delete some of the `etype` entities
AND we are allowed to read the entity, so it must be due to permissions
We are allowed to read the entity, so it must be due to permissions
We are not allowed to delete any of the `etype` entities | (ns specomatic-db.core
"The main namespace for consumers of specomatic-db. Contains functions for initialisation, retrieving and persisting entities."
(:require
[clojure.spec.alpha :as s]
[clojure.tools.logging :as log]
[nedap.speced.def :as sd]
[next.jdbc :as jdbc]
[seql.query :as sq]
[specomatic-db.access-control :as ac]
[specomatic-db.core.impl :as impl]
[specomatic-db.db.conversion :as cnv]
[specomatic-db.db.firebird.conversion]
[specomatic-db.db.firebird.migration]
[specomatic-db.db.firebird.mutation]
[specomatic-db.db.generic :as db-generic]
[specomatic-db.db.migration :as migration]
[specomatic-db.db.mutation :as mutation]
[specomatic-db.db.postgres.migration]
[specomatic-db.db.postgres.mutation]
[specomatic-db.db.sql :as sql]
[specomatic-db.field-def :as sdf]
[specomatic-db.seql :as seql]
[specomatic-db.spec :as sp]
[specomatic.core :as sc]
[specomatic.field-def :as sf]
[specomatic.util :as su]))
(sd/defn init!
"Given the environment `env`, does all necessary initialization.
To skip automatic database schema migration, pass `{:skip-migration? true}` as a second argument.
Currently validates the schema, initializes transaction infrastructure, ensures access control views exist and registers coercion functions."
([^::sp/env env]
(init! env {}))
([^::sp/env env {:keys [:skip-migration?]}]
(let [{:keys [config jdbc]} env
{:keys [schema]} config]
(log/info "Validating the schema...")
(when-not (s/valid? ::sp/schema
schema)
(throw (let [expl (s/explain-str ::sp/schema
schema)]
(ex-info (str "Invalid schema: " expl)
{:schema schema
:explain expl}))))
(log/info "Ensuring transaction infrastructure exists...")
(migration/ensure-transaction-infrastructure! jdbc)
(log/info "Clearing system transaction ids...")
(migration/clear-transaction-system-txid! jdbc)
(when-not skip-migration?
(migration/update-schema! jdbc (:schema config)))
(log/info "Initializing access control views...")
(ac/ensure-views-exist! jdbc config)
(log/info "Initializing transformation rules...")
(seql/set-transform! schema)
(log/info "Initialization complete."))))
(sd/defn default-fields
"Given `schema` and entity type `etype`, returns a seql vector of default fields."
^::sq/seql-query [^::sp/schema schema ^::sp/etype etype]
(vec
(for [[field field-def] (sc/field-defs schema etype)]
(if (sf/relational? field-def)
(let [target (sf/target field-def)]
{field (into [(sc/id-field schema target)]
(map (partial su/qualify target) (sc/display-name-fields schema target)))})
field))))
(sd/defn entity-history
"Retrieves the full history of an entity."
^::sp/query-result [^::sp/env env ^::sp/etype etype id ^::sq/seql-query fields]
(impl/entity-history env etype id fields))
(sd/defn query
"Given the environment `env`, retrieves the seql `fields` from the `etype` entities matching the HoneySQL `conditions`.
Optionally, the root entity may contain verbs like :verb/read, :verb/update as fields in addition to seql fields.
These contain a boolean indicating whether or not the user (given by [:user :id] in `env`) is allowed to do what the verb describes with the specific entity.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^::sp/query-result
([^::sp/env env ^::sp/etype etype]
(query env etype nil nil))
([^::sp/env env ^::sp/etype etype ^::sp/nilable-query fields]
(query env etype fields nil))
([^::sp/env env ^::sp/etype etype ^::sp/nilable-query fields ^::sp/conditions conditions]
(let [schema (get-in env [:config :schema])
my-fields (or fields
(default-fields schema etype))]
(ac/check-query-arguments env my-fields conditions)
(impl/execute-query env
etype
my-fields
(ac/concatenate-extra-conditions env my-fields conditions)))))
(sd/defn by-id
"Retrieves an entity by id. Returns nil if not found."
(^::sd/nilable ^map? [^::sp/env env ^::sp/etype etype id ^::sq/seql-query fields]
(-> (query env etype fields [[:= (sc/id-field (get-in env [:config :schema]) etype) id]])
first))
(^::sd/nilable ^map? [^::sp/env env ^::sp/etype etype id]
(by-id env etype id (default-fields (get-in env [:config :schema]) etype))))
(defmulti save-related!
"Saves the changeset `value` for related entities contained in a relational `field` of a entity to the database.
`opts` is a map of outer-etype and outer-id.
If `:specomatic.core/delete` is true in the related entity, it is deleted.
Otherwise, if the id field of the related entity is not nil, it is updated, if the id field is not present or nil, it is created.
In the case of a reference collection of a has-many-through type, these mutations are applied to the join table, not the actual related entity."
(fn [_env _field field-def _value _opts] [(sf/kind field-def) (sf/reference-type field-def)]))
(defn- extract-reference-id
[my-ref id-field]
(if (map? my-ref)
(id-field my-ref)
my-ref))
(defn- extract-reference-ids
[schema etype entity]
(merge entity
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:when (and (sf/relational? field-def)
(sdf/owns-relation? field-def)
(nil? (sdf/join-table field-def))
(field entity))]
[field
(let [target-id-field (sc/id-field schema (sf/target field-def))]
(-> entity
field
(extract-reference-id target-id-field)))]))))
(defn- create*
[{:keys [config jdbc user]
:as env}
etype
entity]
(let [{:keys [schema]} config
my-entity (extract-reference-ids schema etype entity)
result (mutation/insert! jdbc schema etype my-entity)
my-entity-id-value (:id result)
transaction-id (:tx/id result)
entity-id (sc/id-field schema etype)
ret (merge entity
{entity-id my-entity-id-value
:tx/id transaction-id}
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:let [v (field my-entity)]
:when (and v (sdf/save-related? field-def))]
[field
(save-related! env
field
field-def
v
{:outer-etype etype
:outer-id my-entity-id-value})])))]
(when-not (or
(:root? user)
(ac/allowed-all? user :verb/create etype)
(:verb/create (by-id env etype my-entity-id-value [entity-id :verb/create])))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
ret))
(sd/defn create!
"Given the environment `env`, creates the `entity` of type `etype` in the database.
Returns the given `entity` containing the new id and transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [{:keys [jdbc user]} env]
(when-not (s/valid? etype
entity)
(throw (let [expl (s/explain-str etype
entity)]
(ex-info (str "Invalid entity: " expl)
{:etype etype
:entity entity
:explain expl}))))
(when-not (or (:root? user)
(ac/allowed-some? user :verb/create etype))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(jdbc/with-transaction
[trans jdbc]
(create* (assoc env :jdbc trans) etype entity))))
(defn- update*
[{:keys [jdbc user]
:as env} etype entity id]
(when-not (or (:root? user)
(ac/allowed-some? user :verb/update etype))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(let [schema (get-in env [:config :schema])
my-entity (extract-reference-ids schema etype entity)
entity-id (sc/id-field schema etype)
cond-snippet (ac/conditions-snippet schema
[etype]
(get-in env [:user :permissions])
:verb/update
{:user-id (:id user)
:user-etype (get-in env [:config :user-etype])})
result (mutation/update! jdbc
schema
etype
my-entity
cond-snippet)]
(if-let [tx-id (:tx/id result)]
(merge entity
{entity-id id
:tx/id tx-id}
(into {}
(for [[field field-def] (sc/field-defs schema etype)
:let [v (field my-entity)]
:when (and v (sdf/save-related? field-def))]
[field
(save-related! env
field
field-def
v
{:outer-etype etype
:outer-id id})])))
(when (and cond-snippet (by-id env etype id [entity-id]))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity}))))))
(sd/defn update!
"Given the environment `env`, updates the `entity` of type `etype` in the database.
Returns the given `entity` containing the transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [{:keys [jdbc user]} env
entity-id (sc/id-field (get-in env [:config :schema]) etype)]
(when-not (s/valid? (s/keys)
entity)
(throw (let [expl (s/explain-str etype
entity)]
(ex-info (str "Invalid changeset: " expl)
{:etype etype
:entity entity
:explain (s/explain-str (s/keys)
entity)}))))
(when-not (or (:root? user)
(ac/allowed-some? user :verb/update etype))
(throw (ex-info "Permission denied"
{:etype etype
:entity entity})))
(jdbc/with-transaction
[trans jdbc]
(update* (assoc env :jdbc trans) etype entity (entity-id entity)))))
(sd/defn save!
"Given the environment `env`, saves the `entity` of type `etype` into the database.
Tries to create the entity if its id field is nonexistent or nil.
Tries to update the entity if it has an id.
Returns the given `entity` containing the new id (if created) and transaction id.
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
^map? [^::sp/env env ^::sp/etype etype ^map? entity]
(let [entity-id (sc/id-field (get-in env [:config :schema]) etype)
entity-id-value (entity-id entity)
new? (nil? entity-id-value)]
(if new?
(create! env etype entity)
(update! env etype entity))))
(defn- delete*
[{:keys [config jdbc user]
:as env} etype id]
(let [{:keys [schema user-etype]} config
cond-snippet (ac/conditions-snippet schema
[etype]
(:permissions user)
:verb/delete
{:user-id (:id user)
:user-etype user-etype})
result (mutation/delete! jdbc
schema
etype
id
cond-snippet)]
(if (:tx/id result)
(dissoc result :tx/ts)
(when (and cond-snippet (by-id env etype id [(sc/id-field (get-in env [:config :schema]) etype)]))
(throw (ex-info "Permission denied"
{:etype etype
:id id}))))))
(sd/defn delete!
"Given the environment `env`, delete the `entity` of type `etype`.
Returns a map of id, :tx/id, nil if not found (might be due to permissions).
Shape of `env`:
{:jdbc database specification suitable for use with next.jdbc
:config specomatic config
:user {:id user id
:permissions sequence of permissions
:root? if user is root}}"
[^::sp/env env ^::sp/etype etype id]
(let [{:keys [jdbc user]} env]
(when-not (or (:root? user)
(ac/allowed-some? user :verb/delete etype))
(throw (ex-info "Permission denied"
{:etype etype
:id id})))
(jdbc/with-transaction
[trans jdbc]
(delete* (assoc env :jdbc trans) etype id))))
(defmethod save-related! :default
[_env _field _field-def _value _opts]
nil)
(defmethod save-related! [::sf/reference :has-one]
[env _field field-def value {:keys [outer-id]}]
(let [target (sf/target field-def)
target-id (sc/id-field (get-in env [:config :schema]) target)
via (sdf/db-via field-def)
target-id-value (target-id value)]
(if (::delete value)
(when target-id-value
(delete* env target target-id-value))
(if target-id-value
(merge value (update* env target value target-id-value))
(merge value (create* env target (assoc value via outer-id)))))))
(defmethod save-related! [::sf/reference-coll :has-many]
[env _field field-def value {:keys [outer-id]}]
(let [target (sf/target field-def)
target-id (sc/id-field (get-in env [:config :schema]) target)
via (sdf/db-via field-def)]
(->>
(for [entity value
:let [target-id-value (target-id entity)]]
(if (::delete entity)
(when target-id-value
(delete* env target target-id-value))
(if target-id-value
(merge entity (update* env target entity target-id-value))
(merge entity (create* env target (assoc entity via outer-id))))))
(filterv some?))))
(defmethod save-related! [::sf/reference-coll :has-many-through]
[{:keys [jdbc]} _field field-def value {:keys [outer-id]}]
(let [join-table (sdf/join-table field-def)
[_ etype-fk target-fk target-id] (sdf/db-via field-def)]
(->>
(for [my-ref value
:let [target-id-value (if (map? my-ref)
(target-id my-ref)
my-ref)]
:when target-id-value]
(let [table (cnv/etype->table-name jdbc join-table nil)
entity-fk-column (cnv/field->column-name jdbc etype-fk nil)
target-fk-column (cnv/field->column-name jdbc target-fk nil)]
(if (::delete my-ref)
(do (db-generic/delete-reference-coll-element!
jdbc
{:table table
:entity-id outer-id
:entity-idfield entity-fk-column
:target-id target-id-value
:target-idfield target-fk-column})
nil)
(do (sql/upsert-reference-coll-element! jdbc
{:table table
:entity-id outer-id
:entity-idfield entity-fk-column
:target-id target-id-value
:target-idfield target-fk-column})
my-ref))))
(filterv some?))))
|
121bc4991eff72c5816322852ba5893e1db8fe0868e6c2d6b8315dd5fedf9808 | melange-re/melange | test_regex.ml | let v = [%bs.re "/b/ig"]
let r = [%bs.re "/Bucklescript是一个程序语言/"]
) ; ;
Js.log(r ) ; ;
Js.log(r);;*)
let c = v;; | null | https://raw.githubusercontent.com/melange-re/melange/246e6df78fe3b6cc124cb48e5a37fdffd99379ed/jscomp/test/test_regex.ml | ocaml | let v = [%bs.re "/b/ig"]
let r = [%bs.re "/Bucklescript是一个程序语言/"]
) ; ;
Js.log(r ) ; ;
Js.log(r);;*)
let c = v;; | |
07ad7dd9c37c0eb334b9c6731189382477035a596903d8950020f8fc84b1b3a2 | binsec/haunted | kset.ml | (**************************************************************************)
This file is part of BINSEC .
(* *)
Copyright ( C ) 2016 - 2019
CEA ( Commissariat à l'énergie atomique et aux énergies
(* alternatives) *)
(* *)
(* you can redistribute it and/or modify it under the terms of the GNU *)
Lesser General Public License as published by the Free Software
Foundation , version 2.1 .
(* *)
(* It is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU Lesser General Public License for more details. *)
(* *)
See the GNU Lesser General Public License version 2.1
for more details ( enclosed in the file licenses / LGPLv2.1 ) .
(* *)
(**************************************************************************)
open Errors
open Ai_options
exception Elements_of_top
module K_set = Set.Make (
struct
type t = Region_bitvector.t
let compare param1 param2 =
match param1, param2 with
|`Value (r1, b1), `Value (r2, b2) ->
let size1 = Bitvector.size_of b1 in
let b1 = Bitvector.value_of b1 in
let size2 = Bitvector.size_of b2 in
let b2 = Bitvector.value_of b2 in
let modulo b n =
let max = Bigint.power_int_positive_int 2 n in
Bigint.mod_big_int b max
in
let bb1 = modulo b1 size1 in
let bb2 = modulo b2 size2 in
let c = Bigint.compare_big_int bb1 bb2 in
if c = 0 && (Region_bitvector.region_equal r1 r2) then 0
else if c < 0 then -1
else 1
| `Symb s1, `Symb s2 -> Region_bitvector.SubSymb.compare s1 s2
| `SymbSmt s1, `SymbSmt s2 ->
(* FIXME: Why not use Pervasives here ? *)
if Smt_bitvectors.is_equal_smtBvExpr s1 s2 then 0 else 1
| `Undef sz1, `Undef sz2 -> compare sz1 sz2
| `Value _, _ -> 1
| _, `Value _ -> -1
| `Symb _, `SymbSmt _ -> 1
| `SymbSmt _, `Symb _ -> -1
| `Undef _, _ -> -1
| _, `Undef _ -> 1
end
)
type proper_kset = (int * K_set.t)
Lbound of Region_bitvector.t | Hbound of Region_bitvector.t
type t =
| Proper_kset of proper_kset
| Top of tag
let universe = Top NoTag
let of_bounds _ = Top NoTag
let elements kset =
match kset with
| Proper_kset (_k, s) -> K_set.elements s
| Top _ -> raise Elements_of_top
let pp ppf kset =
let open Format in
match kset with
| Top _ -> fprintf ppf "T"
| Proper_kset (_, set) ->
fprintf ppf "{@[<hov 0>%a@]}"
(fun ppf rbvs ->
K_set.iter (fun rbv -> fprintf ppf "%a;@ " Region_bitvector.pp rbv) rbvs
) set
let to_string kset =
Format.(fprintf str_formatter "%a" pp kset; flush_str_formatter ())
let empty = Proper_kset (Ai_options.KSetSize.get (), K_set.empty)
let _is_empty kset =
match kset with
Proper_kset (_, s) -> K_set.is_empty s
| _ -> false
let _insert elem kset =
match kset with
| Proper_kset (k, set) -> Proper_kset (k, K_set.add elem set)
| Top tag -> Top tag
let is_empty = function
| Proper_kset (_, s) -> K_set.is_empty s
| Top _ -> false
let _is_homogeneous ks =
let s =
match ks with
| Proper_kset (_, s) -> s
| Top _ -> failwith "Kset.ml : is_homogeneous of Top?"
in
let param1 = K_set.choose s in
let is_same_region param2 =
match param1, param2 with
| `Value (r1, _), `Value (r2, _) ->
Region_bitvector.region_equal r1 r2
| _, _ -> false
in
let cond = K_set.for_all is_same_region s in
if cond = true then Some (Region_bitvector.region_of param1)
else None
let create =
let limit = Ai_options.KSetSize.get () in
fun s ->
if K_set.cardinal s > limit then Top NoTag
else Proper_kset (limit, s)
let singleton i =
Proper_kset (Ai_options.KSetSize.get (), K_set.singleton i)
let _nondet = Top NoTag
let _mem l ks =
match ks with
| Proper_kset (_, s) -> K_set.mem l s
| _ -> false
let contains ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_, s1) , Proper_kset (_, s2) -> K_set.subset s2 s1
| Proper_kset _, Top _ -> false
| Top _, Proper_kset _ -> true
| Top _, Top _ -> true
let equal ks1 ks2 =
(contains ks1 ks2) && (contains ks2 ks1)
let concat ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_k1, s1), Proper_kset (_k2, s2) ->
let s =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
try
K_set.add (Region_bitvector.append elem1 elem2) acc2
with
| Errors.Bad_concat _ ->
Logger.warning "KSet: Bad concat";
acc2
) s2 acc1
) s1 K_set.empty
in
Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let max ks =
match ks with
Proper_kset (_, s) -> K_set.max_elt s
| _ -> failwith "kset.ml: max of non proper kset"
let _min ks =
match ks with
Proper_kset (_, s) -> K_set.min_elt s
| _ -> failwith "kset.ml: max of non proper kset"
let join ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
if K_set.cardinal s1 > Ai_options.KSetSize.get () ||
K_set.cardinal s2 > Ai_options.KSetSize.get () then
Top NoTag
else
let s = K_set.union s1 s2 in
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let widen ks1 ks2 _thresholds =
match ks1, ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
if K_set.cardinal s1 > Ai_options.KSetSize.get () ||
K_set.cardinal s2 > Ai_options.KSetSize.get ()
then Top NoTag
else
let s = K_set.union s1 s2 in
if (K_set.cardinal s) > Ai_options.KSetSize.get () then Top NoTag
else if ((K_set.cardinal s) > (K_set.cardinal s1))
then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let meet ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
let s = K_set.inter s1 s2 in
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| Proper_kset (_, s), Top _
| Top _, Proper_kset (_, s) ->
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| Top _, Top _ -> Top NoTag
let neg ks =
match ks with
| Proper_kset (k, s) ->
let s' = K_set.fold
(fun elem acc ->
K_set.add (Region_bitvector.neg elem) acc)
s K_set.empty in
Proper_kset (k, s')
| Top t -> Top t
let lognot ks =
match ks with
| Proper_kset (k, s) ->
let s' = K_set.fold
(fun elem acc ->
K_set.add (Region_bitvector.lognot elem) acc)
s K_set.empty in
Proper_kset (k, s')
| Top t -> Top t
let addc ks c =
match ks with
| Proper_kset (_ , s) ->
let f elem acc = K_set.add (Region_bitvector.add elem c) acc in
let s' = K_set.fold f s K_set.empty in
create s'
| Top t -> Top t
let apply f ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_, s1), Proper_kset (_, s2) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
K_set.add (f elem1 elem2) acc2)
s2 acc1)
s1 K_set.empty in
create s'
| _ -> Top NoTag (* TODO: check region *)
let add = apply Region_bitvector.add
let _subc ks c = addc ks (Region_bitvector.neg c)
let _csub c ks = addc (neg ks) c
let sub ks1 ks2 = add ks1 (neg ks2)
let mul = apply Region_bitvector.mul
let power = apply Region_bitvector.pow
let udiv = apply Region_bitvector.udiv
let sdiv ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_, s1), Proper_kset (_, s2) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
K_set.add (Region_bitvector.sdiv elem1 elem2) acc2)
s2 acc1)
s1 K_set.empty in
create s'
| _, Proper_kset (_, s2) ->
let s = K_set.filter Region_bitvector.is_zero s2 in
if K_set.cardinal s > 0 then raise Errors.Div_by_zero
else Top NoTag
| _ -> Top NoTag (* TODO: check region *)
let restrict ks of1 of2 =
match ks with
| Proper_kset (_, s) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.restrict elem1 of1 of2) acc1)
s K_set.empty in
create s'
| _ -> Top NoTag (* TODO: check region *)
let umod = apply Region_bitvector.umod
let smod = apply Region_bitvector.smod
let logor = apply Region_bitvector.logor
let logxor = apply Region_bitvector.logxor
let logand = apply Region_bitvector.logand
let lshift = apply Region_bitvector.lshift
let rshiftU = apply Region_bitvector.rshiftU
let rshiftS = apply Region_bitvector.rshiftS
let rotate_left = apply Region_bitvector.rotate_left
let rotate_right = apply Region_bitvector.rotate_right
let extension ks1 l =
match ks1 with
| Proper_kset (_, s1) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.extension elem1 l) acc1)
s1 K_set.empty in
create s'
| _ -> Top NoTag (* TODO: check region *)
let signed_extension ks1 l =
match ks1 with
| Proper_kset (_, s1) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.signed_extension elem1 l) acc1)
s1 K_set.empty in
create s'
| _ -> Top NoTag (* TODO: check region *)
let eq = apply Region_bitvector.eq
let diff = apply Region_bitvector.diff
let leqU = apply Region_bitvector.leqU
let leqS = apply Region_bitvector.leqS
let ltU = apply Region_bitvector.ltU
let ltS = apply Region_bitvector.ltS
let geqU = apply Region_bitvector.geqU
let geqS = apply Region_bitvector.geqS
let gtU = apply Region_bitvector.gtU
let gtS = apply Region_bitvector.gtS
let filter (f : K_set.elt -> bool) ks =
match ks with
| Proper_kset (k, s) -> Proper_kset (k, K_set.filter f s)
| Top tag -> Top tag
let is_true rbv =
match rbv with
| `Value (`Constant, bv) when Bitvector.is_one bv -> true
| _ -> false
let exists f ks =
match ks with
| Proper_kset (_k, s) -> K_set.exists f s
| Top _ -> true
let filter_exists f ks1 ks2 =
filter (fun elt1 -> exists (fun elt2 -> is_true (f elt1 elt2)) ks2) ks1,
filter (fun elt1 -> exists (fun elt2 -> is_true (f elt1 elt2)) ks1) ks2
let guard op ks1 ks2 =
let ks_1, ks_2 =
(match op with
| Dba.Binary_op.Eq ->
let ks = meet ks1 ks2 in ks, ks
| Dba.Binary_op.Diff -> filter_exists Region_bitvector.diff ks1 ks2
| Dba.Binary_op.LeqU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LtU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GeqU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GtU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LeqS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LtS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GeqS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GtS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltS elt1 elt2) in
is_true c) ks1) ks2)
| _ -> ks1, ks2)
in
ks_1, ks_2
let _fold f a b =
match a with
| Proper_kset (_k, s) -> K_set.fold f s b
| Top _ -> failwith "kset.ml : fold of top kset"
let _iter f a =
match a with
Proper_kset (_, s) -> K_set.iter f s
| Top _ -> failwith "Kset.ml : iter of top"
let is_true kset assumes glbs =
begin match kset with
| Proper_kset (_k, set) ->
let b' =
let elem = K_set.choose set in
begin match elem with
| `Value (`Constant, b) when Bitvector.size_of b = 1 ->
Bitvector.value_of b
| `SymbSmt smb ->
let e = Region_bitvector.get_expr smb 1 assumes glbs in
begin match e with
| Dba.Expr.Cst (`Constant, b)
when Bitvector.size_of b = 1 -> Bitvector.value_of b
| _ -> raise (Bad_region "Evaluating non cst condition1")
end
| _ -> raise (Bad_region "Evaluating non cst condition2")
end
in
let cond_homogeneous =
let f rbv =
begin match rbv with
| `Value (`Constant, b) when Bitvector.size_of b = 1 ->
Bigint.eq_big_int (Bitvector.value_of b) b'
| `SymbSmt smb ->
let e = Region_bitvector.get_expr smb 1 assumes glbs in
begin match e with
| Dba.Expr.Cst (`Constant, b) when Bitvector.size_of b = 1 ->
Bigint.eq_big_int (Bitvector.value_of b) b'
| _ -> raise (Bad_region "Evaluating non cst condition3")
end
| _ -> raise (Bad_region "Evaluating non cst condition4")
end
in
K_set.for_all f set
in
let open Basic_types.Ternary in
if cond_homogeneous
then of_bool (Bigint.eq_big_int b' Bigint.unit_big_int)
else Unknown
| Top _ -> Basic_types.Ternary.Unknown
end
let to_smt (kset: t) (var: Formula.bv_term) : Formula.bl_term list =
match kset with
Top _p -> []
| Proper_kset (_k, set) ->
let expr =
K_set.fold (fun rbv acc ->
match rbv with
| `Value (_r, bv) ->
Formula.(mk_bl_or (mk_bv_equal (mk_bv_cst bv) var) acc)
| _ -> acc
) set Formula.mk_bl_false
in
[expr]
let smt_refine kset env_smt var =
match kset with
Top p -> Top p
| Proper_kset (k, set) ->
let set = K_set.filter (fun rbv ->
match rbv with
| `Value (_r, bv) ->
let cond = Formula_pp.print_bv_term (Formula.mk_bv_cst bv) in
let conds = Format.asprintf "(assert (= %s %s))@\n" var cond in
Normalize_instructions.is_sat env_smt conds
| _ -> true
) set
in
Proper_kset (k, set)
| null | https://raw.githubusercontent.com/binsec/haunted/7ffc5f4072950fe138f53fe953ace98fff181c73/src/static/ai/domains/kset.ml | ocaml | ************************************************************************
alternatives)
you can redistribute it and/or modify it under the terms of the GNU
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
************************************************************************
FIXME: Why not use Pervasives here ?
TODO: check region
TODO: check region
TODO: check region
TODO: check region
TODO: check region | This file is part of BINSEC .
Copyright ( C ) 2016 - 2019
CEA ( Commissariat à l'énergie atomique et aux énergies
Lesser General Public License as published by the Free Software
Foundation , version 2.1 .
See the GNU Lesser General Public License version 2.1
for more details ( enclosed in the file licenses / LGPLv2.1 ) .
open Errors
open Ai_options
exception Elements_of_top
module K_set = Set.Make (
struct
type t = Region_bitvector.t
let compare param1 param2 =
match param1, param2 with
|`Value (r1, b1), `Value (r2, b2) ->
let size1 = Bitvector.size_of b1 in
let b1 = Bitvector.value_of b1 in
let size2 = Bitvector.size_of b2 in
let b2 = Bitvector.value_of b2 in
let modulo b n =
let max = Bigint.power_int_positive_int 2 n in
Bigint.mod_big_int b max
in
let bb1 = modulo b1 size1 in
let bb2 = modulo b2 size2 in
let c = Bigint.compare_big_int bb1 bb2 in
if c = 0 && (Region_bitvector.region_equal r1 r2) then 0
else if c < 0 then -1
else 1
| `Symb s1, `Symb s2 -> Region_bitvector.SubSymb.compare s1 s2
| `SymbSmt s1, `SymbSmt s2 ->
if Smt_bitvectors.is_equal_smtBvExpr s1 s2 then 0 else 1
| `Undef sz1, `Undef sz2 -> compare sz1 sz2
| `Value _, _ -> 1
| _, `Value _ -> -1
| `Symb _, `SymbSmt _ -> 1
| `SymbSmt _, `Symb _ -> -1
| `Undef _, _ -> -1
| _, `Undef _ -> 1
end
)
type proper_kset = (int * K_set.t)
Lbound of Region_bitvector.t | Hbound of Region_bitvector.t
type t =
| Proper_kset of proper_kset
| Top of tag
let universe = Top NoTag
let of_bounds _ = Top NoTag
let elements kset =
match kset with
| Proper_kset (_k, s) -> K_set.elements s
| Top _ -> raise Elements_of_top
let pp ppf kset =
let open Format in
match kset with
| Top _ -> fprintf ppf "T"
| Proper_kset (_, set) ->
fprintf ppf "{@[<hov 0>%a@]}"
(fun ppf rbvs ->
K_set.iter (fun rbv -> fprintf ppf "%a;@ " Region_bitvector.pp rbv) rbvs
) set
let to_string kset =
Format.(fprintf str_formatter "%a" pp kset; flush_str_formatter ())
let empty = Proper_kset (Ai_options.KSetSize.get (), K_set.empty)
let _is_empty kset =
match kset with
Proper_kset (_, s) -> K_set.is_empty s
| _ -> false
let _insert elem kset =
match kset with
| Proper_kset (k, set) -> Proper_kset (k, K_set.add elem set)
| Top tag -> Top tag
let is_empty = function
| Proper_kset (_, s) -> K_set.is_empty s
| Top _ -> false
let _is_homogeneous ks =
let s =
match ks with
| Proper_kset (_, s) -> s
| Top _ -> failwith "Kset.ml : is_homogeneous of Top?"
in
let param1 = K_set.choose s in
let is_same_region param2 =
match param1, param2 with
| `Value (r1, _), `Value (r2, _) ->
Region_bitvector.region_equal r1 r2
| _, _ -> false
in
let cond = K_set.for_all is_same_region s in
if cond = true then Some (Region_bitvector.region_of param1)
else None
let create =
let limit = Ai_options.KSetSize.get () in
fun s ->
if K_set.cardinal s > limit then Top NoTag
else Proper_kset (limit, s)
let singleton i =
Proper_kset (Ai_options.KSetSize.get (), K_set.singleton i)
let _nondet = Top NoTag
let _mem l ks =
match ks with
| Proper_kset (_, s) -> K_set.mem l s
| _ -> false
let contains ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_, s1) , Proper_kset (_, s2) -> K_set.subset s2 s1
| Proper_kset _, Top _ -> false
| Top _, Proper_kset _ -> true
| Top _, Top _ -> true
let equal ks1 ks2 =
(contains ks1 ks2) && (contains ks2 ks1)
let concat ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_k1, s1), Proper_kset (_k2, s2) ->
let s =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
try
K_set.add (Region_bitvector.append elem1 elem2) acc2
with
| Errors.Bad_concat _ ->
Logger.warning "KSet: Bad concat";
acc2
) s2 acc1
) s1 K_set.empty
in
Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let max ks =
match ks with
Proper_kset (_, s) -> K_set.max_elt s
| _ -> failwith "kset.ml: max of non proper kset"
let _min ks =
match ks with
Proper_kset (_, s) -> K_set.min_elt s
| _ -> failwith "kset.ml: max of non proper kset"
let join ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
if K_set.cardinal s1 > Ai_options.KSetSize.get () ||
K_set.cardinal s2 > Ai_options.KSetSize.get () then
Top NoTag
else
let s = K_set.union s1 s2 in
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let widen ks1 ks2 _thresholds =
match ks1, ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
if K_set.cardinal s1 > Ai_options.KSetSize.get () ||
K_set.cardinal s2 > Ai_options.KSetSize.get ()
then Top NoTag
else
let s = K_set.union s1 s2 in
if (K_set.cardinal s) > Ai_options.KSetSize.get () then Top NoTag
else if ((K_set.cardinal s) > (K_set.cardinal s1))
then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| _, _ -> Top NoTag
let meet ks1 ks2 =
match ks1,ks2 with
| Proper_kset (_,s1) , Proper_kset (_,s2) ->
let s = K_set.inter s1 s2 in
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| Proper_kset (_, s), Top _
| Top _, Proper_kset (_, s) ->
if K_set.cardinal s > Ai_options.KSetSize.get () then Top NoTag
else Proper_kset (Ai_options.KSetSize.get (), s)
| Top _, Top _ -> Top NoTag
let neg ks =
match ks with
| Proper_kset (k, s) ->
let s' = K_set.fold
(fun elem acc ->
K_set.add (Region_bitvector.neg elem) acc)
s K_set.empty in
Proper_kset (k, s')
| Top t -> Top t
let lognot ks =
match ks with
| Proper_kset (k, s) ->
let s' = K_set.fold
(fun elem acc ->
K_set.add (Region_bitvector.lognot elem) acc)
s K_set.empty in
Proper_kset (k, s')
| Top t -> Top t
let addc ks c =
match ks with
| Proper_kset (_ , s) ->
let f elem acc = K_set.add (Region_bitvector.add elem c) acc in
let s' = K_set.fold f s K_set.empty in
create s'
| Top t -> Top t
let apply f ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_, s1), Proper_kset (_, s2) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
K_set.add (f elem1 elem2) acc2)
s2 acc1)
s1 K_set.empty in
create s'
let add = apply Region_bitvector.add
let _subc ks c = addc ks (Region_bitvector.neg c)
let _csub c ks = addc (neg ks) c
let sub ks1 ks2 = add ks1 (neg ks2)
let mul = apply Region_bitvector.mul
let power = apply Region_bitvector.pow
let udiv = apply Region_bitvector.udiv
let sdiv ks1 ks2 =
match ks1, ks2 with
| Proper_kset (_, s1), Proper_kset (_, s2) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.fold (fun elem2 acc2 ->
K_set.add (Region_bitvector.sdiv elem1 elem2) acc2)
s2 acc1)
s1 K_set.empty in
create s'
| _, Proper_kset (_, s2) ->
let s = K_set.filter Region_bitvector.is_zero s2 in
if K_set.cardinal s > 0 then raise Errors.Div_by_zero
else Top NoTag
let restrict ks of1 of2 =
match ks with
| Proper_kset (_, s) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.restrict elem1 of1 of2) acc1)
s K_set.empty in
create s'
let umod = apply Region_bitvector.umod
let smod = apply Region_bitvector.smod
let logor = apply Region_bitvector.logor
let logxor = apply Region_bitvector.logxor
let logand = apply Region_bitvector.logand
let lshift = apply Region_bitvector.lshift
let rshiftU = apply Region_bitvector.rshiftU
let rshiftS = apply Region_bitvector.rshiftS
let rotate_left = apply Region_bitvector.rotate_left
let rotate_right = apply Region_bitvector.rotate_right
let extension ks1 l =
match ks1 with
| Proper_kset (_, s1) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.extension elem1 l) acc1)
s1 K_set.empty in
create s'
let signed_extension ks1 l =
match ks1 with
| Proper_kset (_, s1) ->
let s' =
K_set.fold (fun elem1 acc1 ->
K_set.add (Region_bitvector.signed_extension elem1 l) acc1)
s1 K_set.empty in
create s'
let eq = apply Region_bitvector.eq
let diff = apply Region_bitvector.diff
let leqU = apply Region_bitvector.leqU
let leqS = apply Region_bitvector.leqS
let ltU = apply Region_bitvector.ltU
let ltS = apply Region_bitvector.ltS
let geqU = apply Region_bitvector.geqU
let geqS = apply Region_bitvector.geqS
let gtU = apply Region_bitvector.gtU
let gtS = apply Region_bitvector.gtS
let filter (f : K_set.elt -> bool) ks =
match ks with
| Proper_kset (k, s) -> Proper_kset (k, K_set.filter f s)
| Top tag -> Top tag
let is_true rbv =
match rbv with
| `Value (`Constant, bv) when Bitvector.is_one bv -> true
| _ -> false
let exists f ks =
match ks with
| Proper_kset (_k, s) -> K_set.exists f s
| Top _ -> true
let filter_exists f ks1 ks2 =
filter (fun elt1 -> exists (fun elt2 -> is_true (f elt1 elt2)) ks2) ks1,
filter (fun elt1 -> exists (fun elt2 -> is_true (f elt1 elt2)) ks1) ks2
let guard op ks1 ks2 =
let ks_1, ks_2 =
(match op with
| Dba.Binary_op.Eq ->
let ks = meet ks1 ks2 in ks, ks
| Dba.Binary_op.Diff -> filter_exists Region_bitvector.diff ks1 ks2
| Dba.Binary_op.LeqU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LtU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GeqU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GtU ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtU elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltU elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LeqS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.LtS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GeqS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.geqS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.leqS elt1 elt2) in
is_true c) ks1) ks2)
| Dba.Binary_op.GtS ->
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.gtS elt1 elt2) in
is_true c) ks2) ks1),
(filter (fun elt1 ->
exists (fun elt2 ->
let c = (Region_bitvector.ltS elt1 elt2) in
is_true c) ks1) ks2)
| _ -> ks1, ks2)
in
ks_1, ks_2
let _fold f a b =
match a with
| Proper_kset (_k, s) -> K_set.fold f s b
| Top _ -> failwith "kset.ml : fold of top kset"
let _iter f a =
match a with
Proper_kset (_, s) -> K_set.iter f s
| Top _ -> failwith "Kset.ml : iter of top"
let is_true kset assumes glbs =
begin match kset with
| Proper_kset (_k, set) ->
let b' =
let elem = K_set.choose set in
begin match elem with
| `Value (`Constant, b) when Bitvector.size_of b = 1 ->
Bitvector.value_of b
| `SymbSmt smb ->
let e = Region_bitvector.get_expr smb 1 assumes glbs in
begin match e with
| Dba.Expr.Cst (`Constant, b)
when Bitvector.size_of b = 1 -> Bitvector.value_of b
| _ -> raise (Bad_region "Evaluating non cst condition1")
end
| _ -> raise (Bad_region "Evaluating non cst condition2")
end
in
let cond_homogeneous =
let f rbv =
begin match rbv with
| `Value (`Constant, b) when Bitvector.size_of b = 1 ->
Bigint.eq_big_int (Bitvector.value_of b) b'
| `SymbSmt smb ->
let e = Region_bitvector.get_expr smb 1 assumes glbs in
begin match e with
| Dba.Expr.Cst (`Constant, b) when Bitvector.size_of b = 1 ->
Bigint.eq_big_int (Bitvector.value_of b) b'
| _ -> raise (Bad_region "Evaluating non cst condition3")
end
| _ -> raise (Bad_region "Evaluating non cst condition4")
end
in
K_set.for_all f set
in
let open Basic_types.Ternary in
if cond_homogeneous
then of_bool (Bigint.eq_big_int b' Bigint.unit_big_int)
else Unknown
| Top _ -> Basic_types.Ternary.Unknown
end
let to_smt (kset: t) (var: Formula.bv_term) : Formula.bl_term list =
match kset with
Top _p -> []
| Proper_kset (_k, set) ->
let expr =
K_set.fold (fun rbv acc ->
match rbv with
| `Value (_r, bv) ->
Formula.(mk_bl_or (mk_bv_equal (mk_bv_cst bv) var) acc)
| _ -> acc
) set Formula.mk_bl_false
in
[expr]
let smt_refine kset env_smt var =
match kset with
Top p -> Top p
| Proper_kset (k, set) ->
let set = K_set.filter (fun rbv ->
match rbv with
| `Value (_r, bv) ->
let cond = Formula_pp.print_bv_term (Formula.mk_bv_cst bv) in
let conds = Format.asprintf "(assert (= %s %s))@\n" var cond in
Normalize_instructions.is_sat env_smt conds
| _ -> true
) set
in
Proper_kset (k, set)
|
077c42aa503baf6b69a83f3e3623f2c0c659badd0eda2695ea354118b047ebec | TerrorJack/ghc-alter | T5943.hs | # LANGUAGE GeneralizedNewtypeDeriving #
import Control.Monad
import Control.Monad.Fix
import Data.IORef
import Prelude hiding (until)
data Phase a = Ready a | Updated a a
delay :: IO Int -- ^ the signal to delay
-> IO (IO (), IO (), IO Int) -- ^ the delayed signal
delay s = do
ref <- newIORef (Ready 0)
let
upd = do v <- readIORef ref
case v of
Ready x -> do putStrLn "upd: Ready"; x' <- s; putStrLn (show x'); writeIORef ref (Updated x' x)
_ -> return ()
fin = do v <- readIORef ref
case v of
Updated x _ -> do putStrLn "fin: Updated"; writeIORef ref $! Ready x
_ -> error "Signal not updated!"
sig = do v <- readIORef ref
case v of
Ready x -> do putStrLn "sig: Ready"; return x
Updated _ x -> do putStrLn "sig: Updated"; return x
return (upd,fin,sig)
main = do
(upd,fin,_) <- mfix $ \ ~(_,_,sig) -> delay (fmap (1+) sig)
upd
fin
upd
| null | https://raw.githubusercontent.com/TerrorJack/ghc-alter/db736f34095eef416b7e077f9b26fc03aa78c311/ghc-alter/boot-lib/base/tests/T5943.hs | haskell | ^ the signal to delay
^ the delayed signal | # LANGUAGE GeneralizedNewtypeDeriving #
import Control.Monad
import Control.Monad.Fix
import Data.IORef
import Prelude hiding (until)
data Phase a = Ready a | Updated a a
delay s = do
ref <- newIORef (Ready 0)
let
upd = do v <- readIORef ref
case v of
Ready x -> do putStrLn "upd: Ready"; x' <- s; putStrLn (show x'); writeIORef ref (Updated x' x)
_ -> return ()
fin = do v <- readIORef ref
case v of
Updated x _ -> do putStrLn "fin: Updated"; writeIORef ref $! Ready x
_ -> error "Signal not updated!"
sig = do v <- readIORef ref
case v of
Ready x -> do putStrLn "sig: Ready"; return x
Updated _ x -> do putStrLn "sig: Updated"; return x
return (upd,fin,sig)
main = do
(upd,fin,_) <- mfix $ \ ~(_,_,sig) -> delay (fmap (1+) sig)
upd
fin
upd
|
c23fd7c72f97e5c9842364dbe001d74d98d04aad40149a117dac2e8fb1738aba | OCamlPro/ez_pgocaml | ezPG.mli | (**************************************************************************)
(* *)
Copyright 2018 - 2021 OCamlPro
(* *)
(* All rights reserved. This file is distributed under the terms of the *)
GNU Lesser General Public License version 2.1 , with the special
(* exception on linking described in the file LICENSE. *)
(* *)
(**************************************************************************)
val connect :
?host:string ->
?port:int ->
?user:string ->
?password:string ->
?unix_domain_socket_dir:string ->
string -> 'a PGOCaml.t
val close : 'a PGOCaml.t -> unit
val exec :
?verbose:bool -> (* print commands, true by default *)
'a PGOCaml.t -> (* database handler *)
?callback: (* function called with results, None = error *)
(string list list option -> unit) ->
string -> (* Query *)
unit
val execs : (* same as exec, but with a list of queries *)
?verbose:bool ->
'a PGOCaml.t ->
string list ->
unit
val upgrade_database :
?verbose:bool -> (* print commands, false by default *)
?downgrades: (int * string list) list ->
?allow_downgrade: bool ->
upgrades: (* migration scripts *)
(int * ('a PGOCaml.t -> int -> unit)) list ->
?target:int -> (* target version *)
?witness:string -> (* a file modified if the db is modified *)
'a PGOCaml.t -> (* database handler *)
unit
val touch_witness : ?witness:string -> int -> unit
(* ~searchpath can be used to register meta tables in a different
domain (for example, "db") *)
val init : ?verbose:bool -> ?witness:string ->
?searchpath:string -> 'a PGOCaml.t -> unit
(* Useful functions to create the initial database *)
val createdb :
?verbose:bool ->
?host:string ->
?port:int ->
?unix_domain_socket_dir:string ->
string -> unit
val dropdb :
?verbose:bool ->
?host:string ->
?port:int ->
?unix_domain_socket_dir:string ->
string -> unit
val begin_tr : 'a PGOCaml.t -> unit
val end_tr : 'a PGOCaml.t -> unit
val abort_tr : 'a PGOCaml.t -> unit
val in_tr : 'a PGOCaml.t -> ('a PGOCaml.t -> unit) -> unit
val upgrade :
?verbose:bool -> version:int ->
?downgrade:string list ->
dbh:'c PGOCaml.t -> string list -> unit
val printf :
?verbose:bool ->
?callback:(string list list option -> unit) ->
'a PGOCaml.t -> ('b, unit, string, unit) format4 -> 'b
val may_upgrade_old_info : ?verbose:bool -> 'a PGOCaml.t -> unit
(* Add columns row_created_ and row_modified_ to a table,
automatically updated in INSERT and UPDATE by a trigger.*)
module Mtimes : sig
val upgrade_init : string list
val downgrade_init : string list
val upgrade_table : string -> string list
val downgrade_table : string -> string list
end
| null | https://raw.githubusercontent.com/OCamlPro/ez_pgocaml/e84e6835e6048a27fcdfdc58403ca3a8ce16d744/src/ezPG.mli | ocaml | ************************************************************************
All rights reserved. This file is distributed under the terms of the
exception on linking described in the file LICENSE.
************************************************************************
print commands, true by default
database handler
function called with results, None = error
Query
same as exec, but with a list of queries
print commands, false by default
migration scripts
target version
a file modified if the db is modified
database handler
~searchpath can be used to register meta tables in a different
domain (for example, "db")
Useful functions to create the initial database
Add columns row_created_ and row_modified_ to a table,
automatically updated in INSERT and UPDATE by a trigger. | Copyright 2018 - 2021 OCamlPro
GNU Lesser General Public License version 2.1 , with the special
val connect :
?host:string ->
?port:int ->
?user:string ->
?password:string ->
?unix_domain_socket_dir:string ->
string -> 'a PGOCaml.t
val close : 'a PGOCaml.t -> unit
val exec :
(string list list option -> unit) ->
unit
?verbose:bool ->
'a PGOCaml.t ->
string list ->
unit
val upgrade_database :
?downgrades: (int * string list) list ->
?allow_downgrade: bool ->
(int * ('a PGOCaml.t -> int -> unit)) list ->
unit
val touch_witness : ?witness:string -> int -> unit
val init : ?verbose:bool -> ?witness:string ->
?searchpath:string -> 'a PGOCaml.t -> unit
val createdb :
?verbose:bool ->
?host:string ->
?port:int ->
?unix_domain_socket_dir:string ->
string -> unit
val dropdb :
?verbose:bool ->
?host:string ->
?port:int ->
?unix_domain_socket_dir:string ->
string -> unit
val begin_tr : 'a PGOCaml.t -> unit
val end_tr : 'a PGOCaml.t -> unit
val abort_tr : 'a PGOCaml.t -> unit
val in_tr : 'a PGOCaml.t -> ('a PGOCaml.t -> unit) -> unit
val upgrade :
?verbose:bool -> version:int ->
?downgrade:string list ->
dbh:'c PGOCaml.t -> string list -> unit
val printf :
?verbose:bool ->
?callback:(string list list option -> unit) ->
'a PGOCaml.t -> ('b, unit, string, unit) format4 -> 'b
val may_upgrade_old_info : ?verbose:bool -> 'a PGOCaml.t -> unit
module Mtimes : sig
val upgrade_init : string list
val downgrade_init : string list
val upgrade_table : string -> string list
val downgrade_table : string -> string list
end
|
e085cccd5c9c3cd16dc3697bfdab774f2f715751c96df52f283028f9d46b844c | mejgun/haskell-tdlib | OptimizeStorage.hs | {-# LANGUAGE OverloadedStrings #-}
-- |
module TD.Query.OptimizeStorage where
import qualified Data.Aeson as A
import qualified Data.Aeson.Types as T
import qualified TD.Data.FileType as FileType
import qualified Utils as U
-- |
Optimizes storage usage , i.e. deletes some files and returns new storage usage statistics . Secret thumbnails ca n't be deleted
data OptimizeStorage = OptimizeStorage
| Same as in getStorageStatistics . Affects only returned statistics
chat_limit :: Maybe Int,
-- | Pass true if statistics about the files that were deleted must be returned instead of the whole storage usage statistics. Affects only returned statistics
return_deleted_file_statistics :: Maybe Bool,
-- | If non-empty, files from the given chats are excluded. Use 0 as chat identifier to exclude all files not belonging to any chat (e.g., profile photos)
exclude_chat_ids :: Maybe [Int],
-- | If non-empty, only files from the given chats are considered. Use 0 as chat identifier to delete files not belonging to any chat (e.g., profile photos)
chat_ids :: Maybe [Int],
-- | If non-empty, only files with the given types are considered. By default, all types except thumbnails, profile photos, stickers and wallpapers are deleted
file_types :: Maybe [FileType.FileType],
| The amount of time after the creation of a file during which it ca n't be deleted , in seconds . Pass -1 to use the default value
immunity_delay :: Maybe Int,
-- | Limit on the total number of files after deletion. Pass -1 to use the default limit
count :: Maybe Int,
-- | Limit on the time that has passed since the last time a file was accessed (or creation time for some filesystems). Pass -1 to use the default limit
ttl :: Maybe Int,
-- | Limit on the total size of files after deletion, in bytes. Pass -1 to use the default limit
size :: Maybe Int
}
deriving (Eq)
instance Show OptimizeStorage where
show
OptimizeStorage
{ chat_limit = chat_limit_,
return_deleted_file_statistics = return_deleted_file_statistics_,
exclude_chat_ids = exclude_chat_ids_,
chat_ids = chat_ids_,
file_types = file_types_,
immunity_delay = immunity_delay_,
count = count_,
ttl = ttl_,
size = size_
} =
"OptimizeStorage"
++ U.cc
[ U.p "chat_limit" chat_limit_,
U.p "return_deleted_file_statistics" return_deleted_file_statistics_,
U.p "exclude_chat_ids" exclude_chat_ids_,
U.p "chat_ids" chat_ids_,
U.p "file_types" file_types_,
U.p "immunity_delay" immunity_delay_,
U.p "count" count_,
U.p "ttl" ttl_,
U.p "size" size_
]
instance T.ToJSON OptimizeStorage where
toJSON
OptimizeStorage
{ chat_limit = chat_limit_,
return_deleted_file_statistics = return_deleted_file_statistics_,
exclude_chat_ids = exclude_chat_ids_,
chat_ids = chat_ids_,
file_types = file_types_,
immunity_delay = immunity_delay_,
count = count_,
ttl = ttl_,
size = size_
} =
A.object
[ "@type" A..= T.String "optimizeStorage",
"chat_limit" A..= chat_limit_,
"return_deleted_file_statistics" A..= return_deleted_file_statistics_,
"exclude_chat_ids" A..= exclude_chat_ids_,
"chat_ids" A..= chat_ids_,
"file_types" A..= file_types_,
"immunity_delay" A..= immunity_delay_,
"count" A..= count_,
"ttl" A..= ttl_,
"size" A..= size_
]
| null | https://raw.githubusercontent.com/mejgun/haskell-tdlib/81516bd04c25c7371d4a9a5c972499791111c407/src/TD/Query/OptimizeStorage.hs | haskell | # LANGUAGE OverloadedStrings #
|
|
| Pass true if statistics about the files that were deleted must be returned instead of the whole storage usage statistics. Affects only returned statistics
| If non-empty, files from the given chats are excluded. Use 0 as chat identifier to exclude all files not belonging to any chat (e.g., profile photos)
| If non-empty, only files from the given chats are considered. Use 0 as chat identifier to delete files not belonging to any chat (e.g., profile photos)
| If non-empty, only files with the given types are considered. By default, all types except thumbnails, profile photos, stickers and wallpapers are deleted
| Limit on the total number of files after deletion. Pass -1 to use the default limit
| Limit on the time that has passed since the last time a file was accessed (or creation time for some filesystems). Pass -1 to use the default limit
| Limit on the total size of files after deletion, in bytes. Pass -1 to use the default limit |
module TD.Query.OptimizeStorage where
import qualified Data.Aeson as A
import qualified Data.Aeson.Types as T
import qualified TD.Data.FileType as FileType
import qualified Utils as U
Optimizes storage usage , i.e. deletes some files and returns new storage usage statistics . Secret thumbnails ca n't be deleted
data OptimizeStorage = OptimizeStorage
| Same as in getStorageStatistics . Affects only returned statistics
chat_limit :: Maybe Int,
return_deleted_file_statistics :: Maybe Bool,
exclude_chat_ids :: Maybe [Int],
chat_ids :: Maybe [Int],
file_types :: Maybe [FileType.FileType],
| The amount of time after the creation of a file during which it ca n't be deleted , in seconds . Pass -1 to use the default value
immunity_delay :: Maybe Int,
count :: Maybe Int,
ttl :: Maybe Int,
size :: Maybe Int
}
deriving (Eq)
instance Show OptimizeStorage where
show
OptimizeStorage
{ chat_limit = chat_limit_,
return_deleted_file_statistics = return_deleted_file_statistics_,
exclude_chat_ids = exclude_chat_ids_,
chat_ids = chat_ids_,
file_types = file_types_,
immunity_delay = immunity_delay_,
count = count_,
ttl = ttl_,
size = size_
} =
"OptimizeStorage"
++ U.cc
[ U.p "chat_limit" chat_limit_,
U.p "return_deleted_file_statistics" return_deleted_file_statistics_,
U.p "exclude_chat_ids" exclude_chat_ids_,
U.p "chat_ids" chat_ids_,
U.p "file_types" file_types_,
U.p "immunity_delay" immunity_delay_,
U.p "count" count_,
U.p "ttl" ttl_,
U.p "size" size_
]
instance T.ToJSON OptimizeStorage where
toJSON
OptimizeStorage
{ chat_limit = chat_limit_,
return_deleted_file_statistics = return_deleted_file_statistics_,
exclude_chat_ids = exclude_chat_ids_,
chat_ids = chat_ids_,
file_types = file_types_,
immunity_delay = immunity_delay_,
count = count_,
ttl = ttl_,
size = size_
} =
A.object
[ "@type" A..= T.String "optimizeStorage",
"chat_limit" A..= chat_limit_,
"return_deleted_file_statistics" A..= return_deleted_file_statistics_,
"exclude_chat_ids" A..= exclude_chat_ids_,
"chat_ids" A..= chat_ids_,
"file_types" A..= file_types_,
"immunity_delay" A..= immunity_delay_,
"count" A..= count_,
"ttl" A..= ttl_,
"size" A..= size_
]
|
6164a47c1231645edce83c24b163f473588ede4261644cc50a14107bb9a8bc1b | naoiwata/sicp | q-1.40.scm | ;;
;; @author naoiwata
SICP Chapter1
;; q-1.40
;;
(add-load-path "." :relative)
(load "p42.scm")
(define (cubic a b c)
(lambda (x)
(+
(cube x)
(* a (square x))
(* b x)
c)))
(newton (cubic a b c) 1.0)
; END | null | https://raw.githubusercontent.com/naoiwata/sicp/7314136c5892de402015acfe4b9148a3558b1211/chapter1/q-1.40.scm | scheme |
@author naoiwata
q-1.40
END | SICP Chapter1
(add-load-path "." :relative)
(load "p42.scm")
(define (cubic a b c)
(lambda (x)
(+
(cube x)
(* a (square x))
(* b x)
c)))
(newton (cubic a b c) 1.0)
|
ab6ff10b544d09c27b6d0c0738b1cf88e8987e70130958f72ab51ca27f676445 | openbadgefactory/salava | field.cljs | (ns salava.core.ui.field
(:require [cljs-uuid-utils.core :refer [make-random-uuid uuid-string]]
[salava.core.helper :refer [dump]]))
(defn random-key []
(-> (make-random-uuid)
(uuid-string)))
(defn add-field
([fields-atom new-field] (add-field fields-atom new-field (count @fields-atom)))
([fields-atom new-field index]
(let [[before-blocks after-blocks] (split-at index @fields-atom)]
(reset! fields-atom (vec (concat before-blocks [(assoc new-field :key (random-key))] after-blocks))))))
(defn add-field-atomic
([fields-atom new-field] (add-field-atomic fields-atom new-field (count @fields-atom)))
([fields-atom new-field index]
(let [[before-blocks after-blocks] (split-at index @fields-atom)]
(reset! fields-atom (vec (concat before-blocks [(assoc @new-field :key (random-key))] after-blocks))))))
(defn remove-field [fields-atom index]
(let [fields @fields-atom
start (subvec fields 0 index)
end (subvec fields (inc index) (count fields))]
(reset! fields-atom (vec (concat start end)))))
(defn move-field [direction fields-atom old-position]
(let [new-position (cond
(= :down direction) (if-not (= old-position (- (count @fields-atom) 1))
(inc old-position))
(= :up direction) (if-not (= old-position 0)
(dec old-position)))]
(if new-position
(swap! fields-atom assoc old-position (nth @fields-atom new-position)
new-position (nth @fields-atom old-position)))))
(defn move-field-drop [fields-atom old-position new-position]
(let [fields @fields-atom
new-position (if-not (nil? new-position) new-position (count fields))
direction (if (< new-position old-position) :up :down)
start (subvec fields 0 new-position)
end (subvec fields new-position (count fields))]
(reset! fields-atom (vec (concat (case direction
:up (vec (concat start (conj [] (nth @fields-atom old-position))))
:down (let [new-end (vec (concat (conj [] (nth @fields-atom old-position)) end))]
(remove (fn [b] (some #(identical? b %) (vec (concat (conj [] (nth @fields-atom old-position)) end)))) (vec (concat start new-end)))))
(case direction
:up (let [new-start (vec (concat start (conj [] (nth @fields-atom old-position))))]
(remove (fn [b] (some #(identical? b %) new-start)) (vec (concat new-start end))))
:down (vec (concat (conj [] (nth @fields-atom old-position)) end))))))))
| null | https://raw.githubusercontent.com/openbadgefactory/salava/97f05992406e4dcbe3c4bff75c04378d19606b61/src/cljs/salava/core/ui/field.cljs | clojure | (ns salava.core.ui.field
(:require [cljs-uuid-utils.core :refer [make-random-uuid uuid-string]]
[salava.core.helper :refer [dump]]))
(defn random-key []
(-> (make-random-uuid)
(uuid-string)))
(defn add-field
([fields-atom new-field] (add-field fields-atom new-field (count @fields-atom)))
([fields-atom new-field index]
(let [[before-blocks after-blocks] (split-at index @fields-atom)]
(reset! fields-atom (vec (concat before-blocks [(assoc new-field :key (random-key))] after-blocks))))))
(defn add-field-atomic
([fields-atom new-field] (add-field-atomic fields-atom new-field (count @fields-atom)))
([fields-atom new-field index]
(let [[before-blocks after-blocks] (split-at index @fields-atom)]
(reset! fields-atom (vec (concat before-blocks [(assoc @new-field :key (random-key))] after-blocks))))))
(defn remove-field [fields-atom index]
(let [fields @fields-atom
start (subvec fields 0 index)
end (subvec fields (inc index) (count fields))]
(reset! fields-atom (vec (concat start end)))))
(defn move-field [direction fields-atom old-position]
(let [new-position (cond
(= :down direction) (if-not (= old-position (- (count @fields-atom) 1))
(inc old-position))
(= :up direction) (if-not (= old-position 0)
(dec old-position)))]
(if new-position
(swap! fields-atom assoc old-position (nth @fields-atom new-position)
new-position (nth @fields-atom old-position)))))
(defn move-field-drop [fields-atom old-position new-position]
(let [fields @fields-atom
new-position (if-not (nil? new-position) new-position (count fields))
direction (if (< new-position old-position) :up :down)
start (subvec fields 0 new-position)
end (subvec fields new-position (count fields))]
(reset! fields-atom (vec (concat (case direction
:up (vec (concat start (conj [] (nth @fields-atom old-position))))
:down (let [new-end (vec (concat (conj [] (nth @fields-atom old-position)) end))]
(remove (fn [b] (some #(identical? b %) (vec (concat (conj [] (nth @fields-atom old-position)) end)))) (vec (concat start new-end)))))
(case direction
:up (let [new-start (vec (concat start (conj [] (nth @fields-atom old-position))))]
(remove (fn [b] (some #(identical? b %) new-start)) (vec (concat new-start end))))
:down (vec (concat (conj [] (nth @fields-atom old-position)) end))))))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.