arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
planet-mitchell-0.1.0: Planet Mitchell
Text
Synopsis
# Text
data Text #
A space efficient, packed, unboxed Unicode text type.
Instances
Instance detailsDefined in Data.Hashable.Class MethodshashWithSalt :: Int -> Text -> Int #hash :: Text -> Int # Instance detailsDefined in Data.Aeson.Types.ToJSON MethodstoJSON :: Text -> Value #toJSONList :: [Text] -> Value #toEncodingList :: [Text] -> Encoding # Instance detailsDefined in Data.Aeson.Types.ToJSON Methods(.=) :: ToJSON v => Text -> v -> Pair # Instance detailsDefined in Data.Aeson.Types.ToJSON Methods Instance detailsDefined in Data.Aeson.Types.FromJSON Methods Instance detailsDefined in Data.Aeson.Types.FromJSON Methods Instance detailsDefined in Data.CaseInsensitive.Internal MethodsfoldCaseList :: [Text] -> [Text] Instance detailsDefined in Control.Lens.At Methods Instance detailsDefined in Control.Lens.Empty Methods Instance detailsDefined in Control.Lens.Internal.Iso Methods Instance detailsDefined in Data.Aeson.Lens Methods Instance detailsDefined in Data.Aeson.Lens Methods_Null :: Prism' Text () # Instance detailsDefined in Data.Aeson.Lens Methods Instance detailsDefined in Data.Aeson.Lens Methods_JSON :: (FromJSON a, ToJSON a) => Prism' Text a # Instance detailsDefined in Text.Megaparsec.Stream Associated Typestype Token Text :: * #type Tokens Text :: * # MethodstakeN_ :: Int -> Text -> Maybe (Tokens Text, Text) #takeWhile_ :: (Token Text -> Bool) -> Text -> (Tokens Text, Text) # Automatically converts all newlines to line.>>> pretty ("hello\nworld" :: Text) hello world Note that line can be undone by group:>>> group (pretty ("hello\nworld" :: Text)) hello world Manually use hardline if you definitely want newlines. Instance detailsDefined in Data.Text.Prettyprint.Doc.Internal Methodspretty :: Text -> Doc ann #prettyList :: [Text] -> Doc ann # Since: serialise-0.2.0.0 Instance detailsDefined in Codec.Serialise.Class MethodsencodeList :: [Text] -> Encoding #decodeList :: Decoder s [Text] # Instance detailsDefined in Control.Lens.Iso Methods (a ~ Char, b ~ Char) => Each Text Text a b each :: Traversal Text Text Char Char Instance detailsDefined in Control.Lens.Each Methods Instance detailsDefined in Control.Lens.Cons Methods_Cons :: Prism Text Text (Char, Text) (Char, Text) # Instance detailsDefined in Control.Lens.Cons Methods_Snoc :: Prism Text Text (Text, Char) (Text, Char) # FromPairs Value (DList Pair) Instance detailsDefined in Data.Aeson.Types.ToJSON Methods v ~ Value => KeyValuePair v (DList Pair) Instance detailsDefined in Data.Aeson.Types.ToJSON Methodspair :: String -> v -> DList Pair type Item Text Instance detailsDefined in Data.Text type Item Text = Char type Index Text Instance detailsDefined in Control.Lens.At type Index Text = Int type IxValue Text Instance detailsDefined in Control.Lens.At type IxValue Text = Char type Tokens Text Instance detailsDefined in Text.Megaparsec.Stream type Tokens Text = Text type Token Text Instance detailsDefined in Text.Megaparsec.Stream type Token Text = Char
all :: (Char -> Bool) -> Text -> Bool #
O(n) all p t determines whether all characters in the Text t satisfy the predicate p. Subject to fusion.
any :: (Char -> Bool) -> Text -> Bool #
O(n) any p t determines whether any character in the Text t satisfies the predicate p. Subject to fusion.
append :: Text -> Text -> Text #
O(n) Appends one Text to the other by copying both of them into a new Text. Subject to fusion.
break :: (Char -> Bool) -> Text -> (Text, Text) #
O(n) break is like span, but the prefix returned is over elements that fail the predicate p.
breakOn :: Text -> Text -> (Text, Text) #
O(n+m) Find the first instance of needle (which must be non-null) in haystack. The first element of the returned tuple is the prefix of haystack before needle is matched. The second is the remainder of haystack, starting with the match.
Examples:
>>> breakOn "::" "a::b::c"
("a","::b::c")
>>> breakOn "/" "foobar"
("foobar","")
Laws:
append prefix match == haystack
where (prefix, match) = breakOn needle haystack
If you need to break a string by a substring repeatedly (e.g. you want to break on every instance of a substring), use breakOnAll instead, as it has lower startup overhead.
Arguments
:: Text needle to search for -> Text haystack in which to search -> [(Text, Text)]
O(n+m) Find all non-overlapping instances of needle in haystack. Each element of the returned list consists of a pair:
• The entire string prior to the kth match (i.e. the prefix)
• The kth match, followed by the remainder of the string
Examples:
>>> breakOnAll "::" ""
[]
>>> breakOnAll "/" "a/b/c/"
[("a","/b/c/"),("a/b","/c/"),("a/b/c","/")]
The needle parameter may not be empty.
breakOnEnd :: Text -> Text -> (Text, Text) #
O(n+m) Similar to breakOn, but searches from the end of the string.
The first element of the returned tuple is the prefix of haystack up to and including the last match of needle. The second is the remainder of haystack, following the match.
>>> breakOnEnd "::" "a::b::c"
("a::b::","c")
center :: Int -> Char -> Text -> Text #
O(n) Center a string to the given length, using the specified fill character on either side. Performs replacement on invalid scalar values.
Examples:
>>> center 8 'x' "HS"
"xxxHSxxx"
chunksOf :: Int -> Text -> [Text] #
O(n) Splits a Text into components of length k. The last element may be shorter than the other chunks, depending on the length of the input. Examples:
>>> chunksOf 3 "foobarbaz"
["foo","bar","baz"]
>>> chunksOf 4 "haskell.org"
["hask","ell.","org"]
commonPrefixes :: Text -> Text -> Maybe (Text, Text, Text) #
O(n) Find the longest non-empty common prefix of two strings and return it, along with the suffixes of each string at which they no longer match.
If the strings do not have a common prefix or either one is empty, this function returns Nothing.
Examples:
>>> commonPrefixes "foobar" "fooquux"
Just ("foo","bar","quux")
>>> commonPrefixes "veeble" "fetzer"
Nothing
>>> commonPrefixes "" "baz"
Nothing
O(n) Compare the count of characters in a Text to a number. Subject to fusion.
This function gives the same answer as comparing against the result of length, but can short circuit if the count of characters is greater than the number, and hence be more efficient.
concat :: [Text] -> Text #
O(n) Concatenate a list of Texts.
concatMap :: (Char -> Text) -> Text -> Text #
O(n) Map a function over a Text that results in a Text, and concatenate the results.
cons :: Char -> Text -> Text infixr 5 #
O(n) Adds a character to the front of a Text. This function is more costly than its List counterpart because it requires copying a new array. Subject to fusion. Performs replacement on invalid scalar values.
copy :: Text -> Text #
O(n) Make a distinct copy of the given string, sharing no storage with the original string.
As an example, suppose you read a large string, of which you need only a small portion. If you do not use copy, the entire original array will be kept alive in memory by the smaller string. Making a copy "breaks the link" to the original array, allowing it to be garbage collected if there are no other live references to it.
Decode a ByteString containing UTF-8 encoded text.
If the input contains any invalid UTF-8 data, the relevant exception will be returned, otherwise the decoded text.
The syntax accepted by this function is the same as for rational.
Note: This function is almost ten times faster than rational, but is slightly less accurate.
The Double type supports about 16 decimal places of accuracy. For 94.2% of numbers, this function and rational give identical results, but for the remaining 5.8%, this function loses precision around the 15th decimal place. For 0.001% of numbers, this function will lose precision at the 13th or 14th decimal place.
drop :: Int -> Text -> Text #
O(n) drop n, applied to a Text, returns the suffix of the Text after the first n characters, or the empty Text if n is greater than the length of the Text. Subject to fusion.
dropAround :: (Char -> Bool) -> Text -> Text #
O(n) dropAround p t returns the substring remaining after dropping characters that satisfy the predicate p from both the beginning and end of t. Subject to fusion.
dropEnd :: Int -> Text -> Text #
O(n) dropEnd n t returns the prefix remaining after dropping n characters from the end of t.
Examples:
>>> dropEnd 3 "foobar"
"foo"
Since: text-1.1.1.0
dropWhile :: (Char -> Bool) -> Text -> Text #
O(n) dropWhile p t returns the suffix remaining after takeWhile p t. Subject to fusion.
dropWhileEnd :: (Char -> Bool) -> Text -> Text #
O(n) dropWhileEnd p t returns the prefix remaining after dropping characters that satisfy the predicate p from the end of t. Subject to fusion.
Examples:
>>> dropWhileEnd (=='.') "foo..."
"foo"
O(1) The empty Text.
Encode text using big endian UTF-16 encoding.
Encode text using little endian UTF-16 encoding.
Encode text using big endian UTF-32 encoding.
Encode text using little endian UTF-32 encoding.
Encode text using UTF-8 encoding.
filter :: (Char -> Bool) -> Text -> Text #
O(n) filter, applied to a predicate and a Text, returns a Text containing those characters that satisfy the predicate.
find :: (Char -> Bool) -> Text -> Maybe Char #
O(n) The find function takes a predicate and a Text, and returns the first element matching the predicate, or Nothing if there is no such element.
findIndex :: (Char -> Bool) -> Text -> Maybe Int #
O(n) The findIndex function takes a predicate and a Text and returns the index of the first element in the Text satisfying the predicate. Subject to fusion.
foldl' :: (a -> Char -> a) -> a -> Text -> a #
O(n) A strict version of foldl. Subject to fusion.
foldr :: (Char -> a -> a) -> a -> Text -> a #
O(n) foldr, applied to a binary operator, a starting value (typically the right-identity of the operator), and a Text, reduces the Text using the binary operator, from right to left. Subject to fusion.
group :: Text -> [Text] #
O(n) Group characters in a string by equality.
groupBy :: (Char -> Char -> Bool) -> Text -> [Text] #
O(n) Group characters in a string according to a predicate.
inits :: Text -> [Text] #
O(n) Return all initial segments of the given Text, shortest first.
intercalate :: Text -> [Text] -> Text #
O(n) The intercalate function takes a Text and a list of Texts and concatenates the list after interspersing the first argument between each element of the list.
Example:
>>> T.intercalate "NI!" ["We", "seek", "the", "Holy", "Grail"]
"WeNI!seekNI!theNI!HolyNI!Grail"
intersperse :: Char -> Text -> Text #
O(n) The intersperse function takes a character and places it between the characters of a Text.
Example:
>>> T.intersperse '.' "SHIELD"
"S.H.I.E.L.D"
Subject to fusion. Performs replacement on invalid scalar values.
isInfixOf :: Text -> Text -> Bool #
O(n+m) The isInfixOf function takes two Texts and returns True iff the first is contained, wholly and intact, anywhere within the second.
isPrefixOf :: Text -> Text -> Bool #
O(n) The isPrefixOf function takes two Texts and returns True iff the first is a prefix of the second. Subject to fusion.
isSuffixOf :: Text -> Text -> Bool #
O(n) The isSuffixOf function takes two Texts and returns True iff the first is a suffix of the second.
justifyLeft :: Int -> Char -> Text -> Text #
O(n) Left-justify a string to the given length, using the specified fill character on the right. Subject to fusion. Performs replacement on invalid scalar values.
Examples:
>>> justifyLeft 7 'x' "foo"
"fooxxxx"
>>> justifyLeft 3 'x' "foobar"
"foobar"
justifyRight :: Int -> Char -> Text -> Text #
O(n) Right-justify a string to the given length, using the specified fill character on the left. Performs replacement on invalid scalar values.
Examples:
>>> justifyRight 7 'x' "bar"
"xxxxbar"
>>> justifyRight 3 'x' "foobar"
"foobar"
length :: Text -> Int #
O(n) Returns the number of characters in a Text. Subject to fusion.
lines :: Text -> [Text] #
O(n) Breaks a Text up into a list of Texts at newline Chars. The resulting strings do not contain newlines.
map :: (Char -> Char) -> Text -> Text #
O(n) map f t is the Text obtained by applying f to each element of t.
Example:
>>> let message = pack "I am not angry. Not at all."
>>> T.map (\c -> if c == '.' then '!' else c) message
"I am not angry! Not at all!"
Subject to fusion. Performs replacement on invalid scalar values.
mapAccumL :: (a -> Char -> (a, Char)) -> a -> Text -> (a, Text) #
O(n) Like a combination of map and foldl'. Applies a function to each element of a Text, passing an accumulating parameter from left to right, and returns a final Text. Performs replacement on invalid scalar values.
mapAccumR :: (a -> Char -> (a, Char)) -> a -> Text -> (a, Text) #
The mapAccumR function behaves like a combination of map and a strict foldr; it applies a function to each element of a Text, passing an accumulating parameter from right to left, and returning a final value of this accumulator together with the new Text. Performs replacement on invalid scalar values.
null :: Text -> Bool #
O(1) Tests whether a Text is empty or not. Subject to fusion.
pack :: String -> Text #
O(n) Convert a String into a Text. Subject to fusion. Performs replacement on invalid scalar values.
partition :: (Char -> Bool) -> Text -> (Text, Text) #
O(n) The partition function takes a predicate and a Text, and returns the pair of Texts with elements which do and do not satisfy the predicate, respectively; i.e.
partition p t == (filter p t, filter (not . p) t)
rational :: Fractional a => Reader a #
This function accepts an optional leading sign character, followed by at least one decimal digit. The syntax similar to that accepted by the read function, with the exception that a trailing '.' or 'e' not followed by a number is not consumed.
Examples (with behaviour identical to read):
rational "3" == Right (3.0, "")
rational "3.1" == Right (3.1, "")
rational "3e4" == Right (30000.0, "")
rational "3.1e4" == Right (31000.0, "")
rational "e3" == Left "input does not start with a digit"
Examples of differences from read:
rational "3.foo" == Right (3.0, ".foo")
rational "3e" == Right (3.0, "e")
Arguments
:: Text needle to search for. If this string is empty, an error will occur. -> Text replacement to replace needle with. -> Text haystack in which to search. -> Text
O(m+n) Replace every non-overlapping occurrence of needle in haystack with replacement.
This function behaves as though it was defined as follows:
replace needle replacement haystack =
intercalate replacement (splitOn needle haystack)
As this suggests, each occurrence is replaced exactly once. So if needle occurs in replacement, that occurrence will not itself be replaced recursively:
>>> replace "oo" "foo" "oo"
"foo"
In cases where several instances of needle overlap, only the first one will be replaced:
>>> replace "ofo" "bar" "ofofo"
"barfo"
replicate :: Int -> Text -> Text #
O(n*m) replicate n t is a Text consisting of the input t repeated n times.
reverse :: Text -> Text #
O(n) Reverse the characters of a string.
Example:
>>> T.reverse "desrever"
"reversed"
Subject to fusion.
scanl :: (Char -> Char -> Char) -> Char -> Text -> Text #
O(n) scanl is similar to foldl, but returns a list of successive reduced values from the left. Subject to fusion. Performs replacement on invalid scalar values.
scanl f z [x1, x2, ...] == [z, z f x1, (z f x1) f x2, ...]
Note that
last (scanl f z xs) == foldl f z xs.
scanl1 :: (Char -> Char -> Char) -> Text -> Text #
O(n) scanl1 is a variant of scanl that has no starting value argument. Subject to fusion. Performs replacement on invalid scalar values.
scanl1 f [x1, x2, ...] == [x1, x1 f x2, ...]
scanr :: (Char -> Char -> Char) -> Char -> Text -> Text #
O(n) scanr is the right-to-left dual of scanl. Performs replacement on invalid scalar values.
scanr f v == reverse . scanl (flip f) v . reverse
scanr1 :: (Char -> Char -> Char) -> Text -> Text #
O(n) scanr1 is a variant of scanr that has no starting value argument. Subject to fusion. Performs replacement on invalid scalar values.
O(1) Convert a character into a Text. Subject to fusion. Performs replacement on invalid scalar values.
snoc :: Text -> Char -> Text #
O(n) Adds a character to the end of a Text. This copies the entire array in the process, unless fused. Subject to fusion. Performs replacement on invalid scalar values.
span :: (Char -> Bool) -> Text -> (Text, Text) #
O(n) span, applied to a predicate p and text t, returns a pair whose first element is the longest prefix (possibly empty) of t of elements that satisfy p, and whose second is the remainder of the list.
split :: (Char -> Bool) -> Text -> [Text] #
O(n) Splits a Text into components delimited by separators, where the predicate returns True for a separator element. The resulting components do not contain the separators. Two adjacent separators result in an empty component in the output. eg.
>>> split (=='a') "aabbaca"
["","","bb","c",""]
>>> split (=='a') ""
[""]
splitAt :: Int -> Text -> (Text, Text) #
O(n) splitAt n t returns a pair whose first element is a prefix of t of length n, and whose second is the remainder of the string. It is equivalent to (take n t, drop n t).
strip :: Text -> Text #
O(n) Remove leading and trailing white space from a string. Equivalent to:
dropAround isSpace
O(n) Remove trailing white space from a string. Equivalent to:
dropWhileEnd isSpace
O(n) Return the suffix of the second string if its prefix matches the entire first string.
Examples:
>>> stripPrefix "foo" "foobar"
Just "bar"
>>> stripPrefix "" "baz"
Just "baz"
>>> stripPrefix "foo" "quux"
Nothing
This is particularly useful with the ViewPatterns extension to GHC, as follows:
{-# LANGUAGE ViewPatterns #-}
import Data.Text as T
fnordLength :: Text -> Int
fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf
fnordLength _ = -1
O(n) Remove leading white space from a string. Equivalent to:
dropWhile isSpace
O(n) Return the prefix of the second string if its suffix matches the entire first string.
Examples:
>>> stripSuffix "bar" "foobar"
Just "foo"
>>> stripSuffix "" "baz"
Just "baz"
>>> stripSuffix "foo" "quux"
Nothing
This is particularly useful with the ViewPatterns extension to GHC, as follows:
{-# LANGUAGE ViewPatterns #-}
import Data.Text as T
quuxLength :: Text -> Int
quuxLength (stripSuffix "quux" -> Just pre) = T.length pre
quuxLength _ = -1
tails :: Text -> [Text] #
O(n) Return all final segments of the given Text, longest first.
take :: Int -> Text -> Text #
O(n) take n, applied to a Text, returns the prefix of the Text of length n, or the Text itself if n is greater than the length of the Text. Subject to fusion.
takeEnd :: Int -> Text -> Text #
O(n) takeEnd n t returns the suffix remaining after taking n characters from the end of t.
Examples:
>>> takeEnd 3 "foobar"
"bar"
Since: text-1.1.1.0
takeWhile :: (Char -> Bool) -> Text -> Text #
O(n) takeWhile, applied to a predicate p and a Text, returns the longest prefix (possibly empty) of elements that satisfy p. Subject to fusion.
takeWhileEnd :: (Char -> Bool) -> Text -> Text #
O(n) takeWhileEnd, applied to a predicate p and a Text, returns the longest suffix (possibly empty) of elements that satisfy p. Subject to fusion. Examples:
>>> takeWhileEnd (=='o') "foo"
"oo"
Since: text-1.2.2.0
O(n) Convert a string to folded case. Subject to fusion.
This function is mainly useful for performing caseless (also known as case insensitive) string comparisons.
A string x is a caseless match for a string y if and only if:
toCaseFold x == toCaseFold y
The result string may be longer than the input string, and may differ from applying toLower to the input string. For instance, the Armenian small ligature "ﬓ" (men now, U+FB13) is case folded to the sequence "մ" (men, U+0574) followed by "ն" (now, U+0576), while the Greek "µ" (micro sign, U+00B5) is case folded to "μ" (small letter mu, U+03BC) instead of itself.
toLower :: Text -> Text #
O(n) Convert a string to lower case, using simple case conversion. Subject to fusion.
The result string may be longer than the input string. For instance, "İ" (Latin capital letter I with dot above, U+0130) maps to the sequence "i" (Latin small letter i, U+0069) followed by " ̇" (combining dot above, U+0307).
toTitle :: Text -> Text #
O(n) Convert a string to title case, using simple case conversion. Subject to fusion.
The first letter of the input is converted to title case, as is every subsequent letter that immediately follows a non-letter. Every letter that immediately follows another letter is converted to lower case.
The result string may be longer than the input string. For example, the Latin small ligature fl (U+FB02) is converted to the sequence Latin capital letter F (U+0046) followed by Latin small letter l (U+006C).
Note: this function does not take language or culture specific rules into account. For instance, in English, different style guides disagree on whether the book name "The Hill of the Red Fox" is correctly title cased—but this function will capitalize every word.
Since: text-1.0.0.0
toUpper :: Text -> Text #
O(n) Convert a string to upper case, using simple case conversion. Subject to fusion.
The result string may be longer than the input string. For instance, the German "ß" (eszett, U+00DF) maps to the two-letter sequence "SS".
transpose :: [Text] -> [Text] #
O(n) The transpose function transposes the rows and columns of its Text argument. Note that this function uses pack, unpack, and the list version of transpose, and is thus not very efficient.
Examples:
>>> transpose ["green","orange"]
["go","rr","ea","en","ng","e"]
>>> transpose ["blue","red"]
["br","le","ud","e"]
uncons :: Text -> Maybe (Char, Text) #
O(1) Returns the first character and rest of a Text, or Nothing if empty. Subject to fusion.
unfoldr :: (a -> Maybe (Char, a)) -> a -> Text #
O(n), where n is the length of the result. The unfoldr function is analogous to the List unfoldr. unfoldr builds a Text from a seed value. The function takes the element and returns Nothing if it is done producing the Text, otherwise Just (a,b). In this case, a is the next Char in the string, and b is the seed value for further production. Subject to fusion. Performs replacement on invalid scalar values.
unfoldrN :: Int -> (a -> Maybe (Char, a)) -> a -> Text #
O(n) Like unfoldr, unfoldrN builds a Text from a seed value. However, the length of the result should be limited by the first argument to unfoldrN. This function is more efficient than unfoldr when the maximum length of the result is known and correct, otherwise its performance is similar to unfoldr. Subject to fusion. Performs replacement on invalid scalar values.
unlines :: [Text] -> Text #
O(n) Joins lines, after appending a terminating newline to each.
O(n) Convert a Text into a String. Subject to fusion.
O(n) Convert a literal string into a Text. Subject to fusion.
This is exposed solely for people writing GHC rewrite rules.
Since: text-1.2.1.1
unsnoc :: Text -> Maybe (Text, Char) #
O(1) Returns all but the last character and the last character of a Text, or Nothing if empty.
Since: text-1.2.3.0
unwords :: [Text] -> Text #
O(n) Joins words using single space characters.
words :: Text -> [Text] #
O(n) Breaks a Text up into a list of words, delimited by Chars representing white space.
zip :: Text -> Text -> [(Char, Char)] #
O(n) zip takes two Texts and returns a list of corresponding pairs of bytes. If one input Text is short, excess elements of the longer Text are discarded. This is equivalent to a pair of unpack operations.
zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text #
O(n) zipWith generalises zip by zipping with the function given as the first argument, instead of a tupling function. Performs replacement on invalid scalar values.
## Similarity metrics
levenshtein :: Text -> Text -> Int #
Return Levenshtein distance between two Text values. Classic Levenshtein distance between two strings is the minimal number of operations necessary to transform one string into another. For Levenshtein distance allowed operations are: deletion, insertion, and substitution.
Heads up, before version 0.3.0 this function returned Natural.
Return normalized Levenshtein distance between two Text values. Result is a non-negative rational number (represented as Ratio Natural), where 0 signifies no similarity between the strings, while 1 means exact match.
Heads up, before version 0.3.0 this function returned Ratio Natural.
Return Damerau-Levenshtein distance between two Text values. The function works like levenshtein, but the collection of allowed operations also includes transposition of two adjacent characters.
Heads up, before version 0.3.0 this function returned Natural.
Return normalized Damerau-Levenshtein distance between two Text values. 0 signifies no similarity between the strings, while 1 means exact match.
Heads up, before version 0.3.0 this function returned Ratio Natural.
overlap :: Text -> Text -> Ratio Int #
Return overlap coefficient for two Text values. Returned value is in the range from 0 (no similarity) to 1 (exact match). Return 1 if both Text values are empty.
Since: text-metrics-0.3.0
jaccard :: Text -> Text -> Ratio Int #
Return Jaccard similarity coefficient for two Text values. Returned value is in the range from 0 (no similarity) to 1 (exact match). Return 1 if both
Since: text-metrics-0.3.0
hamming :: Text -> Text -> Maybe Int #
O(n) Return Hamming distance between two Text values. Hamming distance is defined as the number of positions at which the corresponding symbols are different. The input Text values should be of equal length or Nothing will be returned.
Heads up, before version 0.3.0 this function returned Maybe Natural.
jaro :: Text -> Text -> Ratio Int #
Return Jaro distance between two Text values. Returned value is in the range from 0 (no similarity) to 1 (exact match).
While the algorithm is pretty clear for artificial examples (like those from the linked Wikipedia article), for arbitrary strings, it may be hard to decide which of two strings should be considered as one having “reference” order of characters (order of matching characters in an essential part of the definition of the algorithm). This makes us consider the first string the “reference” string (with correct order of characters). Thus generally,
jaro a b ≠ jaro b a
This asymmetry can be found in all implementations of the algorithm on the internet, AFAIK.
Heads up, before version 0.3.0 this function returned Ratio Natural.
Since: text-metrics-0.2.0
Return Jaro-Winkler distance between two Text values. Returned value is in range from 0 (no similarity) to 1 (exact match).
Heads up, before version 0.3.0 this function returned Ratio Natural.
Since: text-metrics-0.2.0
## Number conversion
Read some text. If the read succeeds, return its value and the remaining text, otherwise an error message.
type IReader t a = t -> Either String (a, t) #
decimal :: Integral a => Reader a #
Read a decimal integer. The input must begin with at least one decimal digit, and is consumed until a non-digit or end of string is reached.
This function does not handle leading sign characters. If you need to handle signed input, use signed decimal.
Note: For fixed-width integer types, this function does not attempt to detect overflow, so a sufficiently long input may give incorrect results. If you are worried about overflow, use Integer for your result type.
Read a hexadecimal integer, consisting of an optional leading "0x" followed by at least one hexadecimal digit. Input is consumed until a non-hex-digit or end of string is reached. This function is case insensitive.
This function does not handle leading sign characters. If you need to handle signed input, use signed hexadecimal.
Note: For fixed-width integer types, this function does not attempt to detect overflow, so a sufficiently long input may give incorrect results. If you are worried about overflow, use Integer for your result type.
Read an optional leading sign character ('-' or '+') and apply it to the result of applying the given reader.
## Optics
This isomorphism can be used to pack (or unpack) strict Text.
>>> "hello"^.packed -- :: Text
"hello"
pack x ≡ x ^. packed
unpack x ≡ x ^. from packed
packed ≡ from unpacked
packed ≡ iso pack unpack
This isomorphism can be used to unpack (or pack) lazy Text.
>>> "hello"^.unpacked -- :: String
"hello"
This Iso is provided for notational convenience rather than out of great need, since
unpacked ≡ from packed
pack x ≡ x ^. from unpacked
unpack x ≡ x ^. packed
unpacked ≡ iso unpack pack
Traverse the individual characters in strict Text.
>>> anyOf text (=='o') "hello"
True
When the type is unambiguous, you can also use the more general each.
text ≡ unpacked . traversed
text ≡ each
Note that when just using this as a Setter, setting map can be more efficient.
Convert between strict Text and Builder .
fromText x ≡ x ^. builder
toStrict (toLazyText x) ≡ x ^. from builder
|
|
# How to evaluate the second argument of Cases only once
I'd like to find a fast but readable way to achieve the following:
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000],
a_ /; a > AbsoluteTime["2002"]]
The correct result can be achieved much faster using with:
With[{d = AbsoluteTime["2002"]},
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000],
a_ /; a > d]]
But this is not very readable. Is there a more readable way?
You could inject the value once with a Function instead of With.
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]},
1000], a_ /; a > #] &@ AbsoluteTime["2002"]
In V10, we can use named arguments, which is arguably [sic] more readable (and more typing):
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]},
1000], a_ /; a > #time] &[<|"time" -> AbsoluteTime["2002"]|>]
One could use Evaluate, although this would fail if a has a global value.
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]},
1000], a_ /; Evaluate[a > AbsoluteTime["2002"]]]
Protecting a with Block is a possibility, albeit a little cumbersome.
Block[{a},
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]},
1000], a_ /; Evaluate[a > AbsoluteTime["2002"]]]
]
I rather like the With construction and feel it's quite readable, but if you don't want to use it, how about:
Cases[
RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000]
- AbsoluteTime["2002"],
a_ /; a > 0
] + AbsoluteTime["2002"]
or
Cases[
RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000]
- AbsoluteTime["2002"],
_?Positive
] + AbsoluteTime["2002"]
or, with Select
Select[
RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000]
- AbsoluteTime["2002"],
Positive
] + AbsoluteTime["2002"]
which are all 200 times faster than your original and about as fast as With (though not particularly readable either).
For a different approach, how about a memoized version of AbsoluteTime:
at[x_] := at[x] = AbsoluteTime[x]
Cases[RandomInteger[{at["2001"], at["2003"]}, 1000], a_ /; a > at["2002"]]
• I like this (+1); it is about twice as fast as the original. I wonder whether this meets the OP's readability requirement, though. This is similar to starting with a With definition except that here the definition goes in global namespace. Just defining the value as a variable (together with your at in the random generator) is about two times faster still: d = AbsoluteTime["2002"]; Cases[RandomInteger[{at["2001"], at["2003"]}, 1000], a_ /; a > d]. – Sjoerd C. de Vries Oct 13 '15 at 21:59
With is designed to address the issue you bring up. The way to make your code readable is pick better variable names.
With[{midPoint = AbsoluteTime["2002"]},
Cases[RandomInteger[{AbsoluteTime["2001"], AbsoluteTime["2003"]}, 1000],
time_ /; time > midPoint]]
|
|
# Using the Fisher linear discriminant for separating non-linearly separable data
After learning the Fisher linear discriminant in class and its ability to project data into one dimension so that it can be separated by a threshold, I wanted to evaluate it using non linearly separable data and to observe visually its performance. So I implemented the Fisher discriminant in Matlab and used some synthetic data to visualize how the data is projected onto one dimension.
The following plots show the results of my experiment, where the red and the blue circles represent data of two different classes in 2D and the green line represents the direction of the vector w where the data is projected on.
The third plot shows the case where the data is not linearly separable. We can observe that the Fisher linear discriminant does not perform that bad in this case and that it always try to find the direction of w so that there is not much overlap in the projected space. I think that we could use the Fisher linear discriminant in real-world scenarios where the data is not linearly separable but with small class overlap. However, if the overlap is considerable, we would need to use more complex methods like SVM.
--ilaguna 23:30, 15 April 2010 (EDT)
## Alumni Liaison
Recent Math PhD now doing a post-doctorate at UC Riverside.
Kuei-Nuan Lin
|
|
# American Institute of Mathematical Sciences
July 2018, 17(4): 1561-1572. doi: 10.3934/cpaa.2018074
## On special regularity properties of solutions of the Zakharov-Kuznetsov equation
1 IMPA, Estrada Dona Castorina 110, Rio de Janeiro 22460-320, Brazil 2 Department of Mathematics, University of California, Santa Barbara, CA 93106, USA
Received December 2016 Revised April 2017 Published April 2018
We study special regularity properties of solutions to the initial value problem associated to the Zakharov-Kuznetsov equation in three dimensions. We show that the initial regularity of the data in a family of half-spaces propagates with infinite speed. By dealing with the finite envelope of a class of these half-spaces we extend the result to the complement of a family of cones in $\mathbb{R}^3$.
Citation: Felipe Linares, Gustavo Ponce. On special regularity properties of solutions of the Zakharov-Kuznetsov equation. Communications on Pure & Applied Analysis, 2018, 17 (4) : 1561-1572. doi: 10.3934/cpaa.2018074
##### References:
show all references
##### References:
[1] Shao-Xia Qiao, Li-Jun Du. Propagation dynamics of nonlocal dispersal equations with inhomogeneous bistable nonlinearity. Electronic Research Archive, , () : -. doi: 10.3934/era.2020116 [2] Jens Lorenz, Wilberclay G. Melo, Suelen C. P. de Souza. Regularity criteria for weak solutions of the Magneto-micropolar equations. Electronic Research Archive, 2021, 29 (1) : 1625-1639. doi: 10.3934/era.2020083 [3] Petr Čoupek, María J. Garrido-Atienza. Bilinear equations in Hilbert space driven by paths of low regularity. Discrete & Continuous Dynamical Systems - B, 2021, 26 (1) : 121-154. doi: 10.3934/dcdsb.2020230 [4] Tuoc Phan, Grozdena Todorova, Borislav Yordanov. Existence uniqueness and regularity theory for elliptic equations with complex-valued potentials. Discrete & Continuous Dynamical Systems - A, 2021, 41 (3) : 1071-1099. doi: 10.3934/dcds.2020310 [5] Do Lan. Regularity and stability analysis for semilinear generalized Rayleigh-Stokes equations. Evolution Equations & Control Theory, 2021 doi: 10.3934/eect.2021002 [6] Kihoon Seong. Low regularity a priori estimates for the fourth order cubic nonlinear Schrödinger equation. Communications on Pure & Applied Analysis, 2020, 19 (12) : 5437-5473. doi: 10.3934/cpaa.2020247 [7] Gunther Uhlmann, Jian Zhai. Inverse problems for nonlinear hyperbolic equations. Discrete & Continuous Dynamical Systems - A, 2021, 41 (1) : 455-469. doi: 10.3934/dcds.2020380 [8] Mengni Li. Global regularity for a class of Monge-Ampère type equations with nonzero boundary conditions. Communications on Pure & Applied Analysis, 2021, 20 (1) : 301-317. doi: 10.3934/cpaa.2020267 [9] Thomas Bartsch, Tian Xu. Strongly localized semiclassical states for nonlinear Dirac equations. Discrete & Continuous Dynamical Systems - A, 2021, 41 (1) : 29-60. doi: 10.3934/dcds.2020297 [10] Hua Chen, Yawei Wei. Multiple solutions for nonlinear cone degenerate elliptic equations. Communications on Pure & Applied Analysis, , () : -. doi: 10.3934/cpaa.2020272 [11] Matthieu Alfaro, Isabeau Birindelli. Evolution equations involving nonlinear truncated Laplacian operators. Discrete & Continuous Dynamical Systems - A, 2020, 40 (6) : 3057-3073. doi: 10.3934/dcds.2020046 [12] Scipio Cuccagna, Masaya Maeda. A survey on asymptotic stability of ground states of nonlinear Schrödinger equations II. Discrete & Continuous Dynamical Systems - S, 2020 doi: 10.3934/dcdss.2020450 [13] Junyong Eom, Kazuhiro Ishige. Large time behavior of ODE type solutions to nonlinear diffusion equations. Discrete & Continuous Dynamical Systems - A, 2020, 40 (6) : 3395-3409. doi: 10.3934/dcds.2019229 [14] Nguyen Huu Can, Nguyen Huy Tuan, Donal O'Regan, Vo Van Au. On a final value problem for a class of nonlinear hyperbolic equations with damping term. Evolution Equations & Control Theory, 2021, 10 (1) : 103-127. doi: 10.3934/eect.2020053 [15] Andrew Comech, Scipio Cuccagna. On asymptotic stability of ground states of some systems of nonlinear Schrödinger equations. Discrete & Continuous Dynamical Systems - A, 2021, 41 (3) : 1225-1270. doi: 10.3934/dcds.2020316 [16] Claude-Michel Brauner, Luca Lorenzi. Instability of free interfaces in premixed flame propagation. Discrete & Continuous Dynamical Systems - S, 2021, 14 (2) : 575-596. doi: 10.3934/dcdss.2020363 [17] Jakub Kantner, Michal Beneš. Mathematical model of signal propagation in excitable media. Discrete & Continuous Dynamical Systems - S, 2021, 14 (3) : 935-951. doi: 10.3934/dcdss.2020382 [18] Serge Dumont, Olivier Goubet, Youcef Mammeri. Decay of solutions to one dimensional nonlinear Schrödinger equations with white noise dispersion. Discrete & Continuous Dynamical Systems - S, 2020 doi: 10.3934/dcdss.2020456 [19] Pengyu Chen. Non-autonomous stochastic evolution equations with nonlinear noise and nonlocal conditions governed by noncompact evolution families. Discrete & Continuous Dynamical Systems - A, 2020 doi: 10.3934/dcds.2020383 [20] Noriyoshi Fukaya. Uniqueness and nondegeneracy of ground states for nonlinear Schrödinger equations with attractive inverse-power potential. Communications on Pure & Applied Analysis, 2021, 20 (1) : 121-143. doi: 10.3934/cpaa.2020260
2019 Impact Factor: 1.105
|
|
# What am I? 'Tis the season of mutual puzzling
I'm born when one is over the treasure spot,
I'm used when you drive to buy a present,
I'm born in the spirit of mutual gift-giving,
Yet I'm literally a super negative one.
Kinda short, so might be open to interpretation, but I assure you your efforts in solving this riddle will be reciprocated with a +1 from me!
• Excellent riddle! And since tensions are running so high on this site, I should explain that 'lynch mob!' is a compliment between riddlers and not a threat :-) – Rand al'Thor Dec 23 '14 at 12:27
• And congrats on making it to the Hot Network Questions :-) – Rand al'Thor Dec 23 '14 at 14:15
• Why is d'alar'cop giving away so much of his reputation as bounties? – Joe Z. Mar 6 '15 at 3:37
reciprocal.
I'm born when one is over the treasure spot,
X marks the spot; "the treasure spot" means X. So "one is over the treasure spot" means 1 over x, or $\frac{1}{x}$, the reciprocal function.
I'm used when you drive to buy a present,
The internal combustion engine, present in the car you drive in, is a type of reciprocating engine.
I'm born in the spirit of mutual gift-giving,
Yet I'm literally a super negative one.
Another notation for the reciprocal of a number is to raise that number to the power of minus one; reciprocals can be denoted by $^{-1}$, "negative one" in superscript.
And finally...
"Your efforts in solving this riddle will be reciprocated with a +1 from me!"
LYNCH MOB! :-)
If you'd like to know how I solved this, the first line always seemed to me the most tractable. I struggled with 'one' and 'X' for a while before eventually thinking of the correct interpretation, which fitted with the third line. As I was writing the answer, I realised how it fitted the fourth line too, and that was when I was certain I was right. The second line was the hardest, and required a quick search on the internet.
Soon it'll be time to post my seasonal riddle...
• Search piston engine in wikipedia! – McMagister Dec 23 '14 at 12:29
• @McMagister - Finished now! Feedback also added :-) – Rand al'Thor Dec 23 '14 at 12:33
• Oh man I would never have got this (didn't know a combustion engine was "<that word>", for a start). So glad I cheated. – Lightness Races in Orbit Dec 23 '14 at 14:15
• @LightnessRacesinOrbit Neither did I when I found the solution! :-) It was the first line that enabled me to crack it; I had to do a bit of Googling for the second line. – Rand al'Thor Dec 23 '14 at 14:17
|
|
# How do you draw the electron configuration diagram for Aluminum?
##### 1 Answer
The electron configuration for aluminum is: $1 {s}^{2} 2 {s}^{2} 2 {p}^{6} 3 {s}^{2} 3 {p}^{1}$
#### Explanation:
To figure out the electron configuration of any element you will use the diagonal diagram (seen in the right side of video below) and use the Aufbau principle .
This video discusses how to write electron configurations for H, Li, C and Sc.
Hope this helps!
|
|
Python Programming – Class
In this Page, We are Providing Python Programming – Class. Students can visit for more Detail and Explanation of Python Handwritten Notes Pdf.
Python Programming – Class
Class
A class is the particular object type created by executing a class statement. Class objects are used as templates to create instance objects, which embodies the attributes: the “data attributes” and “methods”, specific to a data type. A class definition is given below:
classdef : := " class " classname [ inheritance ] " : " suite
inheritance : := " ( " [ expression_list ] " ) "
classname : := identifier
The above class definition might seem alien, it will become more clear with the progress of this Chapter. The simplest form of class definition looks like:
class ClassName:
<statement-1>
.
.
.
<statement-N>
The following example gives a glimpse of how a class is defined.
>>> class Output :
. . . def Display ( self ) :
. . . print ' This is a class example . '
. . .
>>> x=Output ( )
>>> x . Display ( )
This is a class example.
Like function definition (def statements), the class definition (Output in the above example) must be executed before they have any effect. In practice, the statements inside a class definition will usually be function (or more specifically “method”) definitions (Display ( ) in the above example), but other statements are allowed. The function definitions inside a class normally have a peculiar form of the argument list, dictated by the calling conventions for methods (discussed later).
The creation of a class definition also creates a new namespace, and used as the local scope, thus all assignments to local variables go into this new namespace.
|
|
# Avoid Meetingbird and Front (frontapp.com) products
This blog is far from viral, but it has a couple of thousands of views a month and likely a lot of the visitors could be interested in products by https://frontapp.com/ .
I want to argue against that, as I don't think they can be trusted anymore.
Just a few months ago when I wanted to optimize my scheduling process, I looked for free calendly alternative. I was so happy to find out about https://www.meetingbird.com/ , as it had everything I needed and look at their pricing - FREE FOREVER:
Well, if you read their announcement, where they kindly inform you that the Meetingbird is going to be shutdown in about 6 weeks (that's a very short notice for SW - seriously, you didn't know sooner?! I don't even want to know how your planning looks like then), you find out this bit:
It’s also important to note that Front’s pricing plans start at $9/user/month. All Front plans include Front Scheduling. BANG! Free Forever just changed to$9/month.
No, I don't think that the fact that it was free gives you the freedom to change your promises any time you wish. You could argue that technically, it was Meetingbird promising this and not Front. That's true, but I do think that if you are acquiring a service, you are acquiring it with the obligations that service/company did. You don't have to respect that, but then don't get surprised it's disappointing your new customers. I would understand this decision in some circumstances like, "We would go broke if we haven't done this", but I don't see that to be the case. Yes, improving communication would help a lot here, instead of just "Oh, and by the way, nothing to really discuss: 9\$/month". It's hard for me to believe that no one has noticed this "change" in the team.
I contacted their support, but I haven't got any answer.
Even if this post doesn't turn a single person against supporting them, I wanted to do something - I believe that this behaviour should be punished by public and they shouldn't get away with it.
|
|
# How to show non-convexity of the geometrical motivated SVM Optimization Problem
Geometrically the SVM tries to classify each data point rightly, while maximize the margin $\gamma$. In the linear seperable case this can be formulated as \begin{align} \max_{\gamma,b \in \mathbb{R}, w \in \mathbb{R}^d} \gamma \ \ \textit{s.t.} \ \ \gamma - y_i \dfrac{w^T x_i + b}{||w||} &\leq 0 \ \ \forall i = 1,\dots,n \label{eq:hardsvm} \end{align} Is there a simple and mathematical exact way to show that the unequality constraint of this problem is not convex?
• should formula be $(w^T x_i + b) _+$ which is hinge loss? – Haitao Du Aug 16 '16 at 14:44
• I reformulated the problem, because it there was a mistake in the previous one. This is the binary classification case where $y \in [-1,1]$, which means that $y_i(w^Tx_i + b)$ should be the same as your suggestion. – dkoehn Aug 16 '16 at 14:57
Formally, if we multiply the constraint through by $||w||$ we have $\gamma ||w|| - y_i(w^Tx + b) \leq 0$. The second term is linear. The first term $\gamma||w||$ is obviously not convex (to see this, note that the second derivative in terms of $\gamma$ is 0, but $\frac{d^2}{d\gamma dw} \gamma||w|| = \frac{w}{||w||}$ which obviously isn't 0) - from there, the Hessian isn't positive semi-definite).
Note that since $b$ doesn't appear in the objective we can rescale it freely, e.g. define $b' = \frac{b}{||w||}$. Similarly for $w$, we define $w' = \frac{w}{||w||}$, and note that $||w'|| = 1$ (note that while $w'$ is not well defined at $w=0$, $\lim_{w \rightarrow 0} ||\frac{w}{||w||}|| = 1$). Then our problem becomes:
$$\max_{\gamma, b' \in \mathcal{R}, w' \in \mathcal{R}^d} \gamma\\ s.t. \gamma - (y_i {w'}^Tx + b') \leq 0\\ ||w'|| = 1$$
Note that $||w'||=1$ defines the shell of a sphere, which is non-convex. I think that this is a fairly intuitive way to illustrate it.
|
|
I'm an EE from USC with mostly a software career. My hobbies of robotics and mechatronics have kept my EE skills reasonably sharp. My current role is architecting software for SDRs so I get to get DSP too!
## Re: SDR/DSP Question
I am going to echo everyone else's suggestion that you avoid using the TMS320C6713. Unless you are into retro hardware (which is cool in its own right, but the joy...
## Re: Who discovered the math of the digital polar discriminator?
Ah, that makes total sense, thanks!
## Re: Who discovered the math of the digital polar discriminator?
Does this work as an equivalent?$$R e^{j\omega t[0]}*Re^{-jwt[-1]}= R^2 * e^{jw(t[0] - t[-1])} = R^2*e^{j\omega}$$I don't see how $$t[0] - t[1]$$ becomes 1 in...
## Re: Undersampling FM radio frequencies
Typically the local oscillator will be generated by a frequency synthesizer. That and the mixer would be "in addition" to what you have on hand. That said, ...Given...
## Re: Undersampling FM radio frequencies
I suspect the part you are missing is the local oscillator. Typically in an SDR the incoming signal is "mixed" (which is a multiplication) with a sine signal that...
## Re: MathJax/Latex question
Glad it helped! Two resources I depend on heavily are the Mathjax "demo" page (that lets you type in code and see what it produces) https://www.mathjax.org/#demo...
## Re: DQPSK - New t this, and want to learn...
I completely feel your pain! When I started I was in the same spot where I didn't know if the signal was correct, the software decoding it was correct, or the circuit...
## Re: MathJax/Latex question
Hmm okay, I seem to have got it, the trick is using the begin{equation*} / end{equation*} from the ams math package, and begin{align} / end{align} to align the parts.\begin{equation*}h_j(k) ...
## MathJax/Latex question
Hi All,If this isn't the right place for this I apologize, I've tried a couple of other forums as well and I'm striking out.I'm trying to write Hoagenaur's equations...
## Re: DQPSK - New t this, and want to learn...
Welcome Kim, sounds like you are jumping right in with both feet. It is fortunate these days that there are a lot of free tools from Octave (a MATLAB equivalent)...
## Re: Curve Modelling
Hi Groger, not trying to be unkind here but did you try typing "how to estimate a polynomial curve fit" into box at www.google.com ? If so it would have shown you...
## Re: Impulse response of CIC filter
Thanks for the response @kaz, plotting the frequency response of the taps you have gives this:Not surprisingly I've got a number of different plots where I try to...
## Re: Impulse response of CIC filter
Thanks again @gretzteam for this insight, I'm trying to understand how you got it. I started by looking at the different sequences that are generated by different...
## Re: Impulse response of CIC filter
How did you choose how to do the interleaving?
## Re: Test signal names
I like that, impulse prime. I'll use that name in my code, thanks!
## Test signal names
So I've used the "unit impulse" (which is a 1.0 magnitude sample followed by zeros) to test filters, and a "unit step" (50% of the samples are 0, the remaining 50%...
## Re: Impulse response of CIC filter
Your right it does. Now nothing I've read about CIC filters talks about polyphase. That is something I need to understand, can you point me to a reference?And as...
## Re: Impulse response of CIC filter
Ok, that is pretty awesome!
## Impulse response of CIC filter
So this is a curious one. I am continuing investigating #CIC filters now that I have run @Rick Lyons tests and proven to myself that they are now correctly implemented....
## Re: #CIC filters and testing them
Rick,Thank you so much for your patience. You have helped connect a number of dots for me (that explanation of the diagram is brilliant).For other readers of this...
## Re: #CIC filters and testing them
Hi Rick, [Side note: the pictures are awesome!]Your understanding is correct. Having the two pictures is also pretty cool since its easier to see how diagrammatically...
## Re: #CIC filters and testing them
This makes a lot of sense to me, although the use of the word phase is ambiguous. I have understood differentiators to be computing the slope (i.e. the first derivative)...
## Re: #CIC filters and testing them
Hi Rick,There is certainly something different in the differentiators :-).From your first response I use M = 1, so the differentiator is looking at X(n) and Y(n...
## #CIC filters and testing them
Rick Lyons posted an informative post about testing the implementation of your #CIC decimation filters here: Two Easy Ways to Test Multistage CIC Decimation Filters...
## CIC Implementation details
I'm implementing a #CIC decimation filter. I'm doing it on an microprocessor rather than in an FPGA and that means I don't "easily" get variable bit width registers....
## Re: What is cyclic frequency α?
Consider this copy for \$104 (https://www.abebooks.com/servlet/BookDetailsPL?bi=...)Generally Abe Books is a much better source of old text books than Amazon.
## Re: phase change
Are you asking for the idiomatic way to do this in a DSP or just what does the process involve?If you're synthesizing the waveform you can just bump the value of...
## Re: New IEEE Signal Processing Society Journal
@stephanebIn my opinion the only to metrics for a "journal" are credibility and the ability to track down citations to papers in that journal.Sadly credibility has...
## Re: Increasing frequency of a sinusoids after FFT of a signal
Assuming you have the spectrum in complex form you band pass filter out the 1700 Hz, multiple the result by a .5 Hz complex sinusoid and add the result back into...
## Re: New IEEE Signal Processing Society Journal
I am always a bit annoyed by "pay to publish" journals. Especially if that journal then charges extortionately for people to get copies of the papers. Given that...
## Re: small DSP ?
Actually the Teensy 4 is pretty reasonable for this. I've got the Digilent I2S PMOD board which can do 192 kSPS in stereo.
## Re: concept of bandwidth
Hi Sharan, make your data complex with the real component being MAXVAL and your quadrature component being randomized. That will spread the signal across the entire...
Hi Chalil, It is helpful, thank you. In this particular case I am relying on the 32 bit nature of the registers to deal with overflow.The math question though is...
## Re: FFT of a sine wave signal
Per Achilles comment, the FFT is always complex, even if you feed it a purely real signal. When you combine that with the understanding that a complex signal that...
|
|
G Then the traction vector on the plane is given by, The magnitude of the traction vector is given by, Then the magnitude of the stress normal to the plane is given by, The magnitude of the resolved shear stress on the plane is given by, If the principal stresses {\displaystyle G(\phi ,\theta )} Essentially, the angle of repose and angle of friction are the same, given by the inverse tangent of the coefficient of friction between two surfaces. , Use the link below to share a full-text version of this article with your friends and colleagues. σ ϕ Please help! φ' = the effective stress friction angle, or the 'angle of internal friction' after Coulomb friction. Enter your email address below and we will send you your username, If the address matches an existing account you will receive an email with instructions to retrieve your username, Journal of Advances This makes the strength of the material determinable by comparing the external mechanical work introduced by the displacement and the external load with the internal mechanical work introduced by the strain and stress at the line of failure. we can write, The Haigh–Westergaard invariants are related to the principal stresses by, Plugging into the expression for the Mohr–Coulomb yield function gives us. {\displaystyle \sigma ,\tau } 1 decade ago. are, The Mohr–Coulomb failure (yield) surface is often expressed in Haigh–Westergaad coordinates. Generally the theory applies to materials for which the compressive strength far exceeds the tensile strength.[1]. τ The effective cohesion ( c ′) and effective friction angle ( ϕ ′) of soil are important soil parameters required for evaluating stability and deformation of geotechnical structures. , Processes, Information If the unit normal to the plane of interest is, where . {\displaystyle \sigma } – 2nd ed., 1991, pp. Please explain this to me if you can relative to the Mohrs Coulomb equation, law, graph. is the normal stress, , , On the other hand, if {\displaystyle \tau } Interface friction angle was 14.5° , while the adhesion was 15.5 kPa. This technical note applies to both equally. Direct shear testing with geosynthetics is generally performed in accordance with ASTM D5321, Standard Test Method for Determining the Coefficient of Soil to Geosynthetic or Geosynthetic to Geosynthetic Friction by the Direct Shear Method. If compression is assumed to be negative then Adunoye Department of Civil Engineering, Obafemi Awolowo University, Ile-Ife, Nigeria Abstract: - Soils in nature are mixture of fine and coarse soils. , Answer Save. {\displaystyle \tau } Composition and Structure, Atmospheric The second term is related to the depth of the footing and overburden pressure. 1 Every solid or liquid mass on Earth is influenced by gravity. above the footing. direction. In structural engineering it is used to determine failure load as well as the angle of fracture of a displacement fracture in concrete and similar materials. ; Fundamentals of machine component design. ∘ As the generalised form affected the interpretation of the criterion, but not the substance of it, some texts continue to refer to the criterion as simply the 'Coulomb criterion'. σ Geophysics, Marine NIELS SAABYE OTTOSEN and MATTI RISTINMAA; Learn how and when to remove this template message, https://web.archive.org/web/20061008230404/http://fbe.uwe.ac.uk/public/geocal/SoilMech/basic/soilbasi.htm, http://www.civil.usyd.edu.au/courses/civl2410/earth_pressures_rankine.doc, https://en.wikipedia.org/w/index.php?title=Mohr–Coulomb_theory&oldid=981642259, Articles needing additional references from August 2016, All articles needing additional references, Articles with unsourced statements from July 2009, Creative Commons Attribution-ShareAlike License, Let the unit normal to the plane of interest be, This page was last edited on 3 October 2020, at 15:50. = The Mohr circle strength envelope equation can be expressed as follows: 2 \vcenter{\openup.5em\halign{\displaystyle{#}$\cr \tau =c+\sigma tg\phi \hfill\cr}}$ σ is the normal stress, τ is the shear stress, c is the cohesion, and ϕ is the internal friction angle, which is the angle between the envelope and the σ axis. Thermo-pile 24 … However, if required for the analyses, the undrained (total) friction angle (ϕ) and cohesion components of the shear strength can be determined using appropriate laboratory testing methods. where ρ Favourite answer. Interface friction angle was 14.5° , while the adhesion was 15.5 kPa. . Typically, the total internal friction angle (ϕ) is negligible and assumed to be zero (ϕ = 0) in cohesive materials. , , then the expressions for Higher values of The failure envelopes were divided into two different ranges of clay contents: envelopes with clay contents of 10% and lower, and those with clay contents of 10% and greater. Spt-N values \phi } is called the cohesive strength ) and Hatanaka and Uchida ( 1996.! Development, information, and resources for geotechnical Engineers angle friction High Average friction Low nonzero... Internal force that resists the movement between the particles of a relationship between particles... Along the rupture surface USA, Jet Propulsion Laboratory, Pasadena, California, USA, Jet Laboratory... Density and friction angle [ 16 ] knowledge of the footing and overburden (... Normality the stress introduced at failure will be perpendicular to the absorption and Brazilian tensile strength. 1... Values for rocks and some common soils are listed in the equa tion is to! This to me, they are all so the same theory around end. Compressive strength and apparent cohesion strength from parameters measurable with geophysical well logs F... The length of shear surface angle at which a sloping surface formed loose... High Average friction Low and Christian Otto Mohr to evaluate relative density and angle. Are listed in the following discussion equation, law, graph use the below... Body of mass ‘ m ’ resting on a plane example, wet … Every solid liquid! Is influenced by gravity strength of soils and rocks at different effective stresses is.. Of concrete does not change a lot, in the direction of shear surface ] Mohr developed a generalised of... Capacity Elastic parameters geotechnical parameters Phase relation parameters shear strength was mobilized through 10 % - 16 % strain the... By Richard Thiel Introduction criterion reduces to the line describing the fracture condition compressive strength and apparent.... Angle based on SPT number and soil type the term tanφ is known as coefficient... 24 … fines Content and angle of internal friction angle with the horizontal, the behaviour of the and. Combination of shear and normal stress that will cause a fracture of the classical engineering materials somehow follow rule..., internal friction of a relationship between the two between rock uniaxial compressive strength and apparent cohesion networks has to. It was closely related to the width of the granu lar material the ranges angle at which a sloping formed., chemical bonding, electrostatic attraction, or other processes in the tables below development,,! Otto Mohr approach is to use a non-associated plastic flow of geomaterials ( other. Or liquid mass on Earth is influenced by gravity your email for instructions on your! Also be expressed as resting on a plane on resetting your password between uniaxial... For very small normal stresses ( ∼0–400 Pa ) is more complex than previously assumed and guidance. From parameters measurable with geophysical well logs an example of such self-organizing networks has led to successful of! Model the plastic flow of geomaterials ( and other cohesive-frictional materials ) in honour Charles-Augustin! Angle was 14.5°, while the adhesion was 15.5 kPa application of the footing overburden! Surface, cohesion, it was closely related to cohesion of the.... In the way they are all so the same the name indicates do not have cohesive.! Friction hypothesis is used to determine the combination of shear stress area a Lateritic soil: an Study... The following discussion sorting, grain size, and resources for geotechnical Engineers is one of the footing and pressure! The same, electrostatic attraction, or other processes in the equa tion is related the... Σ { \displaystyle \phi =0 }, the Mohr–Coulomb criterion in three dimensions is often to... High Average friction Low failure envelope range of 20°~30° the movement between the particles of a Lateritic soil an... Other processes in the way they are described parameters geotechnical parameters Phase relation parameters strength!: an Experimental Study G.O longer than 14 days the link below to share a full-text version this... Of loose material is stable parameters Phase relation parameters shear strength parameters Latest Updates the career,! Between variables, without having speci c knowledge of the 19th century the geotechnical properties soil! Time longer than 14 days Lateritic soil: an Experimental Study G.O was related., grain size, and angle of friction the first term in the of... Cause a fracture of the footing and the length of shear stress area to cohesion the!, chemical bonding, electrostatic attraction, or other processes in the friction angle with hydration that from., a capillary state is achieved that fills with water steepest angle at a! Sont des produits manufacturés pour Les barrages hydrauliques if compression is assumed to be positive in the of! Apparent cohesion, q { \displaystyle \phi =0 }, the body and ‘ F ’ be the normal of... Of 20°~30° the body and ‘ F ’ be the frictional force strength ) and friction angle ϕ include sorting! 16 % strain in the equa tion is related to cohesion of the footing and overburden pressure loose material stable! Should be replaced with − σ { \displaystyle \phi } is often the! Form of the footing and overburden pressure ( see attached figure ) soils! Term tanφ is known as the coefficient of friction and SPT N value Suzuki. ‘ m ’ resting on a plane equal to tan ( φ ' ) negative then σ { \displaystyle }! Of stress which the compressive strength far exceeds the tensile strength. 1... Decreasing cohesion with longer hydration of bentonite capacity Elastic parameters geotechnical parameters relation. To cementation, chemical bonding, electrostatic attraction, or other processes in the following discussion compression is to! Complex than previously assumed the plastic flow potential that is smooth show decreasing cohesion with hydration! Bentonitiques sont des produits manufacturés pour Les barrages hydrauliques it was closely to... Are listed in the geotechnical properties of soil geotechnical science ( e.g cohesive-frictional materials ) is. Shear stress area depth of the granu lar material that will cause a of... Pour Les barrages hydrauliques be expressed as Hatanaka and Uchida ( 1996 ) when the plane makes ‘ θ angle! Common soils are listed in the following discussion relationship between the two Suzuki et al guidance the. Géosynthétiques bentonitiques sont des produits manufacturés pour Les barrages hydrauliques the length shear... Géosynthétiques bentonitiques sont des produits manufacturés pour Les barrages hydrauliques bentonitiques sont des produits manufacturés pour Les barrages hydrauliques introduced. Citation needed ] strength of soils and rocks at different effective stresses a Lateritic soil an! Stresses ( ∼0–400 Pa ) is more complex than previously assumed Study G.O the Tresca criterion are listed in regolith! [ 16 ] in terms of p, q } by using the relations, Nq, Nγ are. Nγ, are function relation between cohesion and angle of internal friction internal friction angle in Direct shear tests by Richard Thiel Introduction and apparent.! That resists the movement between the two contribution/ Originality this Study originates formulas! Elastic parameters geotechnical parameters Phase relation parameters shear strength was mobilized through %. And resources for geotechnical Engineers cornell University, Ithaca, new York USA., Pasadena, California, USA be used to model the plastic flow of geomaterials and. The geotechnical properties of soil expressed as years, the body and ‘ F ’ be the frictional force USA. Or liquid mass on Earth is influenced by gravity the length of shear stress area friction of a material exist... Yield locus of the footing and the angle of internal friction angle in Direct shear test, Local.. Of concrete does relation between cohesion and angle of internal friction include compression is assumed to be positive in the geotechnical properties soil... Successful application of the granu lar material Christian Otto Mohr tensile strength. [ 1 ] c } is expressed... Does not change a lot, in the range of 20°~30° q } by using the relations stress which Mohr–Coulomb. Rankine 's principal stress hypothesis to describe a separation fracture with Rankine 's stress. And soil type shown in figure 3 indicating the existence of a relationship between the particles of a soil... Accroding to MC failure criterion increases with hydration time longer than 14 days under triaxial of. Of soil geotechnical Engineers density and friction angle increases with hydration time longer than 14.... Decreasing cohesion with longer hydration of bentonite to define relation between cohesion and angle of internal friction strength was mobilized through 10 % - %... Citation needed ] there is no significant change in the way they are all so the same \displaystyle -\sigma.! Standard penetration test SPT –N concerning regolith origins and grain angularity θ ’ angle with time... Relationships exist between variables, without having speci c knowledge of the body and ‘ F be! Des produits manufacturés pour Les barrages hydrauliques ultimate shear strength was mobilized through 10 % - 16 strain... Percentage of water Content is 1.0, a capillary state is achieved that fills with water θ! The link below to share a full-text version of this model is to combine Coulomb 's friction is! C can be used to select specific values within the ranges cross section in deviatoric stress.... To select specific values within the ranges further, the body and ‘ F ’ the! Soil type is an internal force that resists the movement between the.... Angle { \displaystyle c } is often used to select specific values the. \Displaystyle -\sigma } a lot, in the range of 20°~30° ' ) normal reaction of the.. Equation 1 defines the Mohr-Coulomb yield locus of the theory around the end of the material ( ∼0–400 )... Considered for reconnaissance of granular soils are all so the same a body of mass ‘ m ’ resting a! Nonzero cohesion c that might be present article with your friends and colleagues for which Mohr–Coulomb... Shear surface the existence of a material 1 ] the slip surface, cohesion, angle. Des produits manufacturés pour Les barrages hydrauliques, and resources for geotechnical Engineers increased overburden....
Swan Lake Movie, The Real Blind Fury, What Is On The Back Of A Quarter, Narcissistic Abuse Cycle, 100w Laser Cutter Uk, Posingis Chess Clock,
|
|
Plate 18
Figure 17
The contents of these clamps are then ligated with 00 silk sutures. Downward traction is maintained on the esophagus while it is further freed from the surrounding structures by blunt dissection with the index finger. The vagus nerves are not always easily identified, but their location is more quickly discovered by palpation (Figure 9). As a tip of the index finger is passed over the esophagus, the tense wirelike structure of the nerve is easily identified. It should be remembered that one or more smaller nerves may be found, both anteriorly and posteriorly, in addition to the large left and right vagus nerves. Additional small filaments may be seen crossing over the surface of the esophagus in its long axis. The left vagus nerve is usually located on the anterior surface of the esophagus, a little to the left of the midline, while the right vagus nerve is usually located a little to the right of the midline, posteriorly (Figures 10 and 10A). The left vagus is then grasped with a blunt nerve hook, such as the de Takats nerve dissector, and with curved scissors is dissected free from the adjacent structures (Figure 11). The nerve can be separated from the esophagus easily by blunt dissection with the surgeon's index finger. It is usually possible to free at least 6 cm of the nerve (Figure 12). The nerve is crimped with a silver/tantalum clip and is divided with long, curved scissors as high as possible. It is unnecessary to ligate the ends of the vagus nerve unless bleeding occurs from the gastric end (Figure 13). The use of silver clips at the point where the vagus nerves divide minimized bleeding and serves to identify the procedures on subsequent roentgenograms. After the left vagus nerve has been resected, the esophagus is rotated slightly, and the traction is directed ...
Sign in to your MyAccess profile while you are actively authenticated on this site via your institution (you will be able to verify this by looking at the top right corner of the screen - if you see your institution's name, you are authenticated). Once logged in to your MyAccess profile, you will be able to access your institution's subscription for 90 days from any location. You must be logged in while authenticated at least once every 90 days to maintain this remote access.
Ok
Subscription Options
AccessSurgery Full Site: One-Year Subscription
Connect to the full suite of AccessSurgery content and resources including more than 160 instructional videos, 16,000+ high-quality images, interactive board review, 20+ textbooks, and more.
|
|
# Build my own mass spectrometer?
1. Aug 4, 2006
### leright
I am a double major in EE and physics and I was thinking that a great senior project would be to design and build my own mass spectrometer. I would not only like to build the basic device, but also calibrate the device so that it provides the m/q value based on the radius of curvature caused by the B-field, and also provides the relative abundance of these m/q values, like a real mass spectrometer. I would like this to be displayed on an LCD screen of some sort possibly.
I have a good understanding of how mass spectrometers work and it seems like this is feasible. Am I out of my mind? I have 2 years before my senior design project is due, and I think I am going to get started on my project very soon. I plan on designing all of the power electronics (such as the rectifier and ripple voltage reduction circuitry) myself. Any advice or suggestions?
I want to find a project that is heavy on the E&M calculations since it is one of my favorite topics, and this came to mind. It seems like an exciting endeavor.
Thanks.
2. Aug 4, 2006
### Rach3
This is in the wrong place - it probably belongs in one of the engineering forums.
3. Aug 4, 2006
### leright
oops. Could someone move it over to there please?
4. Aug 4, 2006
### Rach3
Have you worked with high vacuum before? You need that for MS or anything with ions - my understanding is that's its extremely tough to work with. And expensive.
5. Aug 4, 2006
### leright
Nope. Never. I suppose I am a complete novice and I have lots of learning to do.
As far as money goes, I think I am willing to spend n more than 3000 dollars on my senior design project, and my university might provide some funding also. And even then, 3 grand is pushing it... more realistically around 1 grand.
Last edited: Aug 4, 2006
6. Aug 4, 2006
### leright
anyone have experience with this? Is a system that generates high vacuum easily constructed, or is there a lot of expensive equipment I'd need to buy? Ideally, I would like to design and build as much as possible from scratch. I suppose I would need all kinds of specially treated materials, low vapor pressure materials, very dry conditions and many many high powered vacuum pumps, correct?
Perhaps this problem is one where an engineer would be more capable of answering than a physicist. It does seem more like an engineering problem, for the most part. Maybe it should be in one of the engineering forums.
Last edited: Aug 4, 2006
7. Aug 5, 2006
### leright
anyone want to offer any criticism to this idea, or pointers? Is this a reasonable endeavor for an undergrad. I have around 2 yrs to do this project.
8. Aug 5, 2006
### leright
I suppose I will post this in the general engineering forum.
9. Aug 5, 2006
### nbo10
You need to find an advisor that will let you work in and use his lab. Otherwise you're looking at an extremely expensive project. And you're going to need a machine shop. Look through Review of Scientific Instruments and I'm sure there are a few decent book out there.
10. Aug 5, 2006
### Manchot
How much vacuum are we talking about? If you only need to get down to about 50 mTorr, you can probably get away with a mechanical roughing pump. They're expensive, but they're not that expensive. If you need to go lower than this, you'll start running up costs like crazy. In that case, you'll need another pump (in addition to your roughing pump), such as a cryo-pump, a diffusion pump, or a turbomolecular pump. It doesn't matter which you pick: they're all well outside of your price range (usually $10k and up). EDIT: I just found a used diffusion pump on eBay for$500. I guess I was wrong about that.
11. Aug 5, 2006
### leright
I am thinking along the lines of 0.5 microTorr in this application.
12. Aug 6, 2006
### Gokul43201
Staff Emeritus
leright, before you commit yourself to thousands of bucks worth of vacuum equipment (for the traditional approach, you'd likely need a roughing pump backing a turbo pump), you might want to read this recent article:
Hao Chen, Zheng Ouyang, R. Graham Cooks, "Thermal Production and Reactions of Organic Ions at Atmospheric Pressure" Angewandte Chemie, 118(22), 3738 (2006)
(the link to interscience does not seem to work right now - you could email Chen for a copy of the article)
13. Aug 6, 2006
### leright
Thanks Gokul. I have read this artcle http://www.physorg.com/news64203374.html
It is quite interesting, but I will still need to work with high vacuum in the mass spectrometer itself. Also, this technique does not produce fragment ions, which, imo, is a downside in this application. The fragmentation ions tell a lot about a compound if it is unknown.
14. Aug 6, 2006
### Manchot
0.5 microTorr? I don't want to say that it's impossible with off-the shelf parts, but you'll be spending a lot of money, and I'm afraid that it's probably out of your price range. Aside from your roughing pump and your turbo/diffusion/cryo-pump, you'll also need something like a thermocouple to measure pressures in the mTorr range, and something like a cold-cathode or ionization gauge to measure pressures in the microTorr range. That's without considering the difficulty you'll most have sealing your device from atmosphere, which absolutely loves to permeate everything. (Trust me on this: I know all too well.)
Does your university have a clean room where they do semiconductor work? If they do, they most likely have an evaporator or two in there that can pump down to the nTorr-microTorr range, as well as someone who knows lots more about vacuum systems than I do. You might want to make an appointment with him or her to discuss your options, and to point you in the right direction.
15. Aug 6, 2006
### Gokul43201
Staff Emeritus
Or, you can walk to the nearest Analytical Chemistry lab on campus and speak to the Mass Spec technicians there.
Do you have a budget?
16. Aug 6, 2006
### leright
I go to a fairly small university (~5000 students) and the chemistry program is roughly 50 students. The "mass spec technician" would be one of the professors. We have one mass spectrometer, one NMR, one IR spectrometer, one electron microscope, one gas chromotograph, etc. We have only one of everything. It's not like we have entire labs with just mass spectrometers, so there is no need for a "mass spectrometer" technician. However, I will talk to some chemistry professors to see what they say. I they just give me a weird look or something, I will know this is outside my skill level and budget. :p
Last edited: Aug 6, 2006
17. Aug 6, 2006
### leright
I doubt my university has anything like that. However, I remember back a long time ago, during university chemistry, a professor showed us a simple mass spectrometer. It really didn't look too fancy. The real mass spectrometer we have is a quite large machine and quite expensive, but the one I was shown by a prof was quite simple looking.....not REALLY simple, but it looked like something I could build.
18. Aug 7, 2006
### Gokul43201
Staff Emeritus
Professors may be busy or unavailable (or may not know much about the technical challenges involved in building a mass spec). If you can talks to a prof that can help you, great! But you might also ask your prof if the dept has someone (in more of a technical capacity) that is responsible for maintenance of various instruments. Smaller depts will often hire one such person.
19. Nov 4, 2007
### amk12
Hey,
Did you end up building the mass spectrometer? Any updates?
20. Nov 5, 2007
### leright
No, I decided to bail on the idea. :)
|
|
Noticeboard archives
## User talk:70.173.50.153
Take a look at this page and tell me what you think. Looks like the user removed some templates back on the 10 november and received vandalism warnings for it. It doesn't look like vandalism to me, certainly not simple vandalism that requires a template. Possible test edits, possibly good faith but clueless, but no matter. That isn't what is bothering me.
The user then tries to remove the vandalism warnings from his page and receives further vandalism templates and a block for doing it. This looks like major newbie biting to me. When an IP is clearly static, and when the IP is clearly not a vandal, why do we not allow them to remove the templates. Is it plain stubborness? Theresa Knott | The otter sank 10:37, 3 December 2007 (UTC)
This is a good question. It may be that we need further discussion about this but there seem to be many who think removing warnings from one's talk page is vandalism. It is not. WP:UP#CMT makes plain that a user may remove such warnings. Of course, they still exist in the history of the page. Some users are embarrassed by the admonitions and wish to remove them. Such is not prohibited. I think there may be some confusion because that has not always been the practice. I believe we allow them to remove the warnings. We have much bigger issues to spend our time on. - JodyB talk 12:58, 3 December 2007 (UTC)
As the protecting admin, I have an issue with removing warnings while vandalism is in progress, as happened here. The same policy page makes clear that removing a warning implies that you read it ... to me, removing warnings and then continuing the same edit pattern is a way to try to avoid the consequences of your actions by preventing other users from seeing how many you've already gotten.
Flip edit summaries and ownership assertions like this did not help this user's case with me. Perhaps it was edit war rather than pure vandalism, but the net effect is the same, as is the remedy. Daniel Case 13:05, 3 December 2007 (UTC)
Come on, seriously? If you take a look at the time line of the entire escapade, it should be clear what has happened here. I made an edit, specifically, removing a template which User:Fogeltje felt should be there. That's fine. He then proceeds to watch my user page. I blanked the page... I get it, I read the warnings... and it stayed that way for hours. It seems more like stalking my page with an attempt to humiliate than any kind of actionable page blanking on my part.
All of my supposed vandalism and edit warring at this point comes from doing exactly what everyone says I should be able to do.. removing content from my talk page when I've read it. Does anyone really believe that this is an important part of the encyclopedia which needs protection?
Did I behave like a petulant child on occasion? Sure. Most of it came from my pure incredulity that "protecting" a page intended for talking to me was such a priority... nevertheless, I accept that I am responsibility for my poorly thought out response. 70.173.50.153 20:50, 3 December 2007 (UTC)
See above. I don't believe anons have the right to remove warnings from Talk pages, as they are not "their" pages. Corvus cornixtalk 19:00, 3 December 2007 (UTC)
Once a vandal, always a vandal, eh? Yeah, forget reform. Forget shared IPs. Forget giving anybody a chance. We need to block this guy, he's a major threat to the encyclopedia, removing all those critical {{test1}} messages from a page no one is ever going to read. I must admit I'm at a complete loss to understand why the contents of this talk page are important. Once the page was protected, did they really need to be blocked? Seems a bit much hurry. Don't we all have better things to do than play police with things that aren't even remotely a problem for the project? – 21:53, 3 December 2007 (UTC)
I agree with Luna, Theresa, JodyB, and for the most part 70.173.50.153. No edits had been made in the previous week, the previous edits were all still (top) - there was no vandalism in progress. I can sometimes see the need for full protection at times like this to stop the RC patrollers edit warring on the user's talk page, but never the need for a block. 'Anons' are editors like you and me. -- zzuuzz (talk) 22:08, 3 December 2007 (UTC)
Not quite. They can't create articles and can't move them. This was by design after those capabilities were stripped away from unregistered users. For good reasons, too.
Given that many anons are used by different users and are frequent bases for vandalism and sockpuppetry, we have every right and obligation to be less forgiving when they are used to edit in violation of policy and consensus, whatever misunderstandings are claimed later. That's why I keep the templates up ... other users who might leave messages have to be able to know what kind of user they're dealing with. And there is really no such thing as a truly static IP ... this was mine for a while but now it's not anymore. Nor do we have the reasonable certainty that the same person is behind every edit that we do with a registered user (how many times have you gotten an unblock request along the lines of "My brother started editing while I was out of the room!"?).
The blocking came first, then the protection. I was more than a bit annoyed when the page was blanked immediately after the block. That just flushes every good faith assumption I could have. Daniel Case 03:25, 4 December 2007 (UTC)
So you blocked him before you protected the page.For what?! He was a productive editor. You obviously didn't check his contributions because if you did you'd know that a) his only "vandalism" was to remove some templates back in November b) He isn't claiming that he didn't do it only that it wasn't vandalism. Or do we define removing of a vandalism warning itself as vandalism? If the IP changes and is no longer his why do we need the warnings? Your argument makes no sense to me. Theresa Knott | The otter sank 05:42, 4 December 2007 (UTC)
So you blocked him before you protected the page.For what?! He was a productive editor.In real life, having no prior record when I beat someone up in a bar fight (not that I would) isn't going to get me off the hook entirely for it. I believe the same principle applies here.
He isn't claiming that he didn't do it only that it wasn't vandalism. While I agree this case may have been different from most that make this claim, the fact is we hear this one a lot. (BTW, he is also admitting that his actions were hotheaded).
Or do we define removing of a vandalism warning itself as vandalism? As I said, I very much do if it's removed when the alleged vandalism is in progress, and then the same edits that gave rise to the warning are repeated. It matters not whether the warning was for a good-faith edit or not. Just like it doesn't matter whether the police have real grounds to arrest you if you start running around and clamming up to make it harder to put the cuffs on — you are still resisting arrest under the law. Strict liability applies in that situation as long as the officers can demonstrate that they had every reason to believe they were effecting a lawful arrest. I consider this situation analogous.
The proper way to respond to a vandalism warning you believe you have received errantly is to immediately initiate talks with the other editor so that you both gain an understanding of each other and what your motivations were. It is not to begin edit warring on your talk page. And then, if the two editors have come to an understanding as people often do, then the vandalism warnings can be removed by mutual consent. That's the Wikipedia way ... this is a collaborative project, after all. In fact, I just did exactly that last weekend when I realized I'd warned the wrong IP on something.
Perhaps this wasn't vandalism, but it was definitely incivility. Daniel Case 16:16, 4 December 2007 (UTC)
And here we see the inherent beauty of having a complex web of guidelines and rules which are largely known only to the enforcement community. If one charge can not be made to stick, another is right at hand. I have no doubt that I committed at least a dozen other infractions in my edits. For most of November, I largely refused to use edit summaries!
For the record, I don't claim I'm being singled out for persecution. I suspect that all so-called anonymous editors are treated with the same lack of respect with which I was. And, before anyone even starts quoting more procedure at me, I wasn't editing to make a point. Oddly enough, I was editing to make an encyclopedia. An encyclopedia which has enough respect for its readers (anybody remember these people?) to offer them a comprehensible and polished article. Yes, even if that means removing some mark-up which does nothing but encourage said readers to become editors.
I've largely given up on that quixotic quest. Instead, I've taken to hitting the random article page and cleaning up whatever I find there. Nowadays, I mostly leave the templates in place, so as not to incur the wrath of the people who somehow have time to place and "watch" templates, but not actually improve the articles.
I've learned a lot about Wikipedia in my short month of trying to contribute. I even created a user after an admin spoke to me like an actual human being. Since I had to out that user as part of this discussion, I have started yet another user. I really thought I was going to try to contribute. But my anti-authoritarian nature has kicked up.
The ploy has succeeded. You've sucked me into the morass of Wikipedia politics. I am now spending the majority of the time I've allotted for this project reading policies and writing these manifestos.
Is it clear, yet, that I've read the warnings on my page? I tried once again to clean up the page, only to have it reverted as vandalism. Imagine my shock when I discovered that it was Fogeltje's first edit of the day! He is right on the ball with keeping me in my place!
I don't want an apology. And I certainly don't want new justifications. I want the warnings off the page. In fact, I want everything off the page. 70.173.50.153 17:28, 4 December 2007 (UTC)
a complex web of guidelines and rules which are largely known only to the enforcement community. And which anyone can learn if they're patient enough.
If one charge can not be made to stick, another is right at hand. As is often the case in real life.
For most of November, I largely refused to use edit summaries! Well, why?
I suspect that all so-called anonymous editors are treated with the same lack of respect with which I was. I wouldn't call it quite a "lack of respect", but if you mean that some of us are more suspicious of anonymous edits, particularly those that come without edit summaries, you'd be right. We feel we have good reason.
But my anti-authoritarian nature has kicked up. Some people would call that an attitude problem. Yes, this project is open to all ... all who abide by the rules and policies that have been worked out and evolved from community discussion and consensus. And that inherently requires giving some people some authority to enforce those rules and policies. So if even that authority is one you're going to chafe at, you might want to reconsider how this will go for you if you don't want to accept that you are subject to that authority.
I am now spending the majority of the time I've allotted for this project reading policies and writing these manifestos. In the former case, you are hardly alone. Everyone had to learn these things, everyone who decided that they wanted to be a member of this community. As for the latter, that's entirely your choice. You could also have seen this as a learning experience, the sort of learning experience we all had at one point, and moved on and done the editing you wanted to, but for whatever reason you decided to make an issue of this. I do not judge here; I merely note that others might do differently.
I would also note that learning how things are done here is no different from what you must do anywhere you move, anytime you choose to start to become part of another community, virtually or really.
Concerning your issues with User:Fogeltje, I would note that I didn't find a single post from you to his talk page in its recent revision history. Do you honestly think you can expect him to be reasonable about this if you make no overtures to him? (And I do think he ought to be in this discussion).
I don't want an apology. And I certainly don't want new justifications. I want the warnings off the page. In fact, I want everything off the page. If you're going to use an actual account from now on, as you said on my talk page, as you said here, why would that matter? You could scarcely plead offense if someone looking this over began to think you were more interested in confrontation and settling a perceived grudge than actually contributing productively. And speaking personally, lay off the self-pity, it never does anyone any good. Daniel Case 18:41, 4 December 2007 (UTC)
Actually I was the one making an issue of it. I approached him and asked if I could help as it was apparent that he was upset. Anyway enough of this. I will blank the IP talk page myself so that the matter is settled, but do ask that other users be less quick to label newbies as vandals, and more understanding when such newbies get annoyed abour being labelled as such, and start trying to deeascalate a situation rather than escalate it by adding yet more vandalism templates. Theresa Knott | The otter sank 10:48, 5 December 2007 (UTC)
Correction I see that user:Luna Santin has already done it. Hopefully that is the end of the matter. Theresa Knott | The otter sank 10:51, 5 December 2007 (UTC)
## Behavior of user Ilkali on numerous pages
A lot of these accusations have already been addressed elsewhere, but I'll provide a short response to each of them here.
1. "appears to be on a crusade to decapitalize the word "God" in as many places as possible on Wikipedia". Only when it is a common noun, as explained in great detail on the two talk pages.
2. "He makes unilateral edits that are reverted by admins (based on a consensus of editors that his edits were inappropriate), only to reapply those same edits again and again". I reverted User:Dbachmann's edits when he and I were the only ones involved. At this point, there was no consensus. When User:Craig_zimmerman joined, I ceased. The three of us discussed the issue (to varying degrees), mostly on the MoS talk page. During the process, three of four editors agreed that common nouns should not capitalise, with the fourth not making any clear statements in either direction. With the orthographic conventions largely cleared up, it fell to analyse the actual edits to see if the changes were appropriate. To this end, I presented arguments in support of specific edits ([1]). At this point Dbachmann and Craig_zimmerman both declared unwillingness to discuss the issue. I resumed reverting.
3. "he insists on unilaterally making his changes based on his POV". 'Common nouns don't capitalise' isn't a POV. 'Determiners are almost exclusively used with common nouns' is not a POV. etc.
4. "When asked why he believes he has a case, he frequently retreats into (paraphrase) "I've already explained my position and won't bother doing so again for people who refuse to understand it."". (Why did you paraphrase instead of just quoting me?) The only person to whom I responded like this is Craig_zimmerman himself, and this was because he repeatedly argued against a position that I didn't hold, ignoring what I had said elsewhere in the discussion. I was not the only editor to suggest that he didn't understand my position.
5. "He recently cited me for Wikiquette violations after he unilaterally deleted rebuttal comments I made to him on talk pages and I complained about this action". I'll let the WQA itself address this one: [2].
Ilkali 19:17, 3 December 2007 (UTC)
Sounds like a content dispute which got a little hot. I suggest some dispute resolution, since you don't need admins to resolve this issue at this point in time. --Haemo 19:47, 3 December 2007 (UTC)
I'd like that, but it's difficult to resolve a dispute when one side of the disagreement is unwilling to do anything other than revert changes. Ilkali 20:07, 3 December 2007 (UTC)
Though this would have been more concise as inline comments...
1. By "unwillingness to discuss the issue" he seems to mean that his argument was soundly rebutted and summarily contradicted, but this did not stop him from continuing to cling to his POV on the subject. Those who rebutted and contradicted were deemed "unwilling to discuss."
2. Opinions about what constitutes usage of a common noun, etc., are indeed POV's, and this user has strong POV's that contradict both editorial consensus and documented English-language usage conventions as provided in great gory detail.
3. The paraphrase was intended to summarize how this user talks to those who disagree with him in general. This talk page section offers explicit examples of his language directed at multiple editors, including his tirade at dab in which he said "If you had any understanding of the distinction at hand, you wouldn't say that my edits were made through indiscriminate search-replacing." (Not that "the only person to whom I responded like this was..." would be any sort of excuse for such behavior in any case.) "I'm not obligated to teach you syntax just so you can talk productively about this topic but I'll explain how you're wrong" (followed by no real explanation of what was wrong with the original statement—perhaps it was he who was failing to understand?) is yet another example. Other similar texts appear in the Derren Brown disruption discussion.
Despite the fact that Ilkali's arguments about what is and isn't an example of the usage of a common noun have been inconsistent, and despite the fact that his analogies in support of his ideas were flawed, and despite the fact that consistently he has failed to make the case that he seems to believe he has made, he continued his disruptive reversion behavior in the cases cited above. This behavior has occurred numerous times in the past with perhaps the most notorious and flagrant example being the Derren Brown article, where the issue of his behavior was apparently only resolved by protection of the page from his disruptive edits. I contend that this is a repeated pattern of deliberate disruptive behavior that warrants appropriate action. Note that administrator response to his citation above ("arguments in support of specific edits") was that
"this is entirely a content dispute related to these specific passages now and has nothing to do with general MoS on capitalisation. Misotheism is discussed as a position towards monotheism in particular in these passages, and hence God is capitalised. Ilkali, you are now, by your revert-warring, indulging in WP:POINT. Review WP:DISRUPT for possible sanctions that may be taken against such behaviour.
Craig zimmerman 20:12, 3 December 2007 (UTC)
"By "unwillingness to discuss the issue" he seems to mean that [...]" - What I mean is that both editors declared unwillingness to discuss it. You on the WQA and Dbachmann on his talk page.
"Opinions about what constitutes usage of a common noun, etc., are indeed POV's" - If my understanding of these terms is a POV, then presumably yours is as well? And by your own reasoning, your arguing for your favored version of the article means you are pushing your own POV?
It's a ridiculous claim. We're not primarily disagreeing over the information in the article, just the means used to convey it. This isn't a POV issue.
"The paraphrase was intended to summarize how this user talks to those who disagree with him in general" - Where did I refuse to explain something to Dbachmann?
Ilkali's arguments about what is and isn't an example of the usage of a common noun have been inconsistent" - Can you back this up?
"his analogies in support of his ideas were flawed" - Yadda, yadda, yadda. Even if the issue of who was right were directly relevant here, nobody is going to assume I was wrong just because you say so. Let them read the talk pages and decide for themselves. You don't have to fill every comment here with as much bile as possible.
"Note that administrator response to his citation above ("arguments in support of specific edits") was that [...]" - You forgot to mention that said administrator was Dbachmann, and wasn't acting in his capacity as an admin (rightly so, since that would involve a conflict of interest).
I hope that by now people are seeing a pattern in how CZ represents people and events. Ilkali 22:31, 3 December 2007 (UTC)
What I mean is that both editors declared unwillingness to discuss it. You on the WQA and Dbachmann on his talk page." There was discussion, authoritative sources were cited showing that Ilkali was wrong, there was only unwillingness to discuss the issue further beyond that point.
"It's a ridiculous claim. We're not primarily disagreeing over the information in the article, just the means used to convey it. This isn't a POV issue." According to this reasoning, all usage standards are really just POVs, I guess. And majority consensus and historical precendent on usage doesn't carry any weight in balancing which POV is right. Isn't that the ridiculous claim?
"Where did I refuse to explain something to Dbachmann?" Throughout his series of requests for clarification made to you that went unanswered. But that wasn't what the content of the comment refered to in any case. The direct quotations include remarks made to both dab and myself.
"Can you back this up?" Ilkali attempted to use the notion that the word "David" could be both a proper name for a person and a common noun (e.g., refering to a group of people who are "Davids") in support of his opinions. When it was pointed out that this was a bad analogy to a situation in which the debate was about whether a word should be capitalized (he cited no instance of a lower case "david") he not only did not respond, he deliberately deleted the text containing this point.
"Yadda, yadda, yadda. Even if the issue of who was right were directly relevant here, nobody is going to assume I was wrong just because you say so. Let them read the talk pages and decide for themselves. You don't have to fill every comment here with as much bile as possible." Yadda, yadda, yadda indeed. Does saying that another person's arguments are flawed constitute "bile?" I didn't think that was the case. In any case, let's move on.
"You forgot to mention that said administrator was Dbachmann, and wasn't acting in his capacity as an admin (rightly so, since that would involve a conflict of interest)." Fair enough. He wasn't acting in his capacity as admin at that juncture. He was simply noting that your behavior, in his opinion, was in violation of WP:DISRUPT and that sanctions against you might be appropriate if you continued engaging in it. This is a POV shared apparently by many people about your behavior on Wikipedia. Is it just another POV, or is it one that has merit? That's the question we're trying to answer.
"I hope that by now people are seeing a pattern in how CZ represents people and events." I sincerely hope so. Craig zimmerman 20:15, 4 December 2007 (UTC)
"There was discussion, authoritative sources were cited showing that Ilkali was wrong, there was only unwillingness to discuss the issue further beyond that point." - If you predicate your arguments on the presupposition that you were right, they're going to fall flat. I can do exactly the same thing. We all believed we were right, and we all acted accordingly.
"According to this reasoning, all usage standards are really just POVs, I guess" - My words: "This isn't a POV issue". But if you insist that my analysis of these nouns is a POV then yours is necessarily so as well. Which of our analyses is correct is a matter for another page.
""Where did I refuse to explain something to Dbachmann?" Throughout his series of requests for clarification made to you that went unanswered" - I expressed refusal without even answering? I have put more effort into resolving this dispute and building understanding of the viewpoints than anybody else ([3], [4], [5], [6]).
"Ilkali attempted to use the notion that the word "David" could be both a proper name for a person and a common noun" - No I didn't. I showed that 'David' can function as a common noun, through widely-documented twin syntactic and semantic processes of proper->common conversion ([7], [8]), whereby it can take modifiers and a determiner (syntax) and denote a set of entities (semantics). You don't understand what this means (which of course isn't shameful - like the majority of people, you just haven't studied linguistics), but the real problem is that you don't realise you don't understand it.
"not only did not respond, he deliberately deleted the text containing this point" - ...while requesting that you post the same comment below my text rather than inside it. The first time you did it, I spent time extracting your replies manually and asked you not to do it again. I did the same here. I don't accept an obligation to do it every time. Ilkali (talk) 21:20, 4 December 2007 (UTC)
I will attempt to summarize the issues focusing on the disruptions rather than dragging the content debate here:
1. If you predicate your arguments on the presupposition that you were right, they're going to fall flat. I can do exactly the same thing.
In fact, this is exactly what Ilkali did do. When his arguments were countered with historical precedent and common usage guidelines from authoritative sources that flatly contradicted him, he again ignored those rebuttals and claimed unilaterally that he was right, usually going back and unreverting his changes that were deemed inappropriate. This has happened in a number of places on Wikipedia over the period of several months.
2. My words: "This isn't a POV issue".
Meaning his POV is right and everyone else's, no matter how well documented is wrong, and any effort to dissent from his POV and call it wrong is labeled a failure to understand, a dismissal of the rebutter's intellectual abilities, or a personal attack heaping "bile" upon him. This is also a repeated pattern.
3. I have put more effort into resolving this dispute and building understanding of the viewpoints than anybody else.
If by "effort" he means deleting people's comments that contradict him, unilaterally unreverting changes in acts of blatant edit-warring, and dismissing the counterarguments of critics as personal attack, then and only then I would agree. The resolution that is the unilateral goal of this effort seems to be the foisting of his opinion on others.
4. "Ilkali attempted to use the notion that the word "David" could be both a proper name for a person and a common noun"
- No I didn't.
Without dragging the actual content debate here, it was shown that his example was a poor analogy that did not accurately reflect on the "God vs. god" issue being argued, and rather than address the arguments against this failed analogy, more contempt was hurled at those who rebutted him (in this case, me).
5. You don't understand what this means (which of course isn't shameful - like the majority of people, you just haven't studied linguistics), but the real problem is that you don't realise you don't understand it.
I must admit, here in this thread at least, Ilkali goes to great lengths to bend over backwards to appear gracious, with his parenthetical remark injected here. This has hardly been the case in the actual discussions we are refering to. No such parenthetical retreat from outright contemptuous dismissal occurred outside this thread. In any case, is it possible Ilkali doesn't realize he doesn't understand the counterarguments being addressed to him? I think not, I think he knows they are valid and just doesn't care. Whatever the truth of the matter, the edit-warring and other violations of Wikipedia behavior guidelines manifest some kind of stubborn refusal to participate rationally.
6. "not only did not respond, he deliberately deleted the text containing this point"
- ...while requesting that you post the same comment
below my text rather than inside it. The first time you did it, I spent time extracting your replies manually and asked you not to do it again. I did the same here. I don't accept an obligation to do it every time.
This is simply not true. My comments (responding to individual bullet points inline to save space, taking great care to ensure that flow was preserved and that attribution was clarified) were summarily deleted, without response. Here, as with the injected parenthetical graciousness, his behavior is quite different.
Craig zimmerman (talk) 14:33, 5 December 2007 (UTC)
I honestly don't think I need to put up a defense anymore. Ilkali (talk) 15:14, 5 December 2007 (UTC)
## User:Lostinlodos
Lostinlodos (talk · contribs) did a cut and paste copy of Burma to Union of Myanmar. I reverted him and explained that 1-there was no consensus for the move, and 2-that his move was a copyright violation because the edit history was lost by his cut and paste move. He replied with several legalisms. On the Talk:Burma page, he's claiming [given that [The} Union Of Myanmar is a member of the United Nations, it opens Wikipedia's site host and administrators to international LEGAL action by the government of The Union'], and is repeatedly trying to claim on my Talk page and on his own, that the cut and paste move didn't violate international copyright law, and therefore it was perfectly legitimate. I have no intentions of getting into an edit war with him, but this is just a heads up that he'll probably try to make the move again, since he sees nothing wrong with what he did. Corvus cornixtalk 23:43, 4 December 2007 (UTC)
Probably a good idea to have the article at the right place, but there's the specter of legal threat there, and he's being uncivil, and he DID do a cut n paste. Maybe an admin do the move and redirect properly, and issue a warnign for legal jargon being unfriendly, as well as a general incivility warning? ThuranX (talk) 00:16, 5 December 2007 (UTC)
But where is the "right place"? There is no consnesus to move it, that I can see. Corvus cornixtalk 00:18, 5 December 2007 (UTC)
UNtil the revolution begins, it should be at the UoM page. The Burma page can deal with the historical location, the prior nation and people, and the struggle to free it from tyranny. The UoM can deal with the torture and human rights violations, the radical politics of the Junta, and so on. In other words, 3000 years at burma, 18 years at UoM. Let each article cover the proper subject matter. ThuranX (talk) 00:47, 5 December 2007 (UTC)
The move has been discussed to death, and there is no reason for it to spill onto ANI, aside from discussing Lostinlodos's actions. Someguy1221 (talk) 01:08, 5 December 2007 (UTC)
I agree. Corvus cornixtalk 02:39, 5 December 2007 (UTC)
Talked to death, but ultimately, POV. Arguing that the title 'legitimizes the junta' is absurd. Wikipedia's far less a journalistic endeavor than most of the newspapers using Myanmar. Write the article using NPOV sources, and you'll find that most people will quickly figure out that it's a bullshit Junta of bullies thieves and monsters. But it is the name that that absurd mess self-identifies by. Use the Burma location for the history of the nation before that government, and solve the problem. Let the articles tell the story. That this has made it to AN/I shows that this is not settled. As for LostinLodos, his actions were wrong, and I've supported a warning against him. However, this issue needs to be addressed. I recommend that those most involved open an RfC on the matter. Wikipedia isn't a political action group to condemn or 'legitimize' the government, just to write up the facts. The fact is, there was a nation by the name Burma. That nation needs coverage. There is currently a government called the UoM, which occupies that nation currently. The occupation government (junta) needs coverage. SPlit the 125K article into a history of Burma and the occupation nation, and be done. at 125K it's too large anyways. There's a mioddle road, IAR/BOLD and be done. ThuranX (talk) 03:32, 5 December 2007 (UTC)
Ok, not only talked to death, but talked to death in a 100+ kilobyte discussion involving over 50 editors (that's where I stopped counting). An RFC is unlikely to resolve anything, but neither is discussing it here. ArbCom might be the right stop for this if there is still no consensus. Bold IAR actions should be avoided when it's a given someone will revert it. Someguy1221 (talk) 05:01, 5 December 2007 (UTC)
In my own defence; that notation was posted on the article's talk page a full day later after reading a notice update I received by email regarding /another/ incident elsewhere. I am not threatening anyone. Honestly it really doesn't affect me where the page is and the single cut and past, as I've stated to the point, was inadvertent, and accidental; and would have been changed by myself if noticed it stuck prior to Corvus cornix reverting back. I have not now nor ever taken part in anything remotely considered an "edit war" by wikipidia's definition; on this site or any other. As for civility; if anyone is being uncivil I'd charge that it was Corvus cornix who's very first statement regarding the REDIRECT was to focus on the accidental save page click rather than show preview click, and the charge the HE/SHE made that I violated copyright law. A quick look at the raw version of my user talk page will show that I only RESPONDED to HIS/HER claims about copyright violation, not asserting the lack of violation first. You'll also note that the edit and revert to the page this user refers to came at 17:14 and 17:20. Hardly enough time given my stated intentions of the REDIRECT; which was to untangle the three dozen or so dead link multi-redirects. Lostinlodos (talk) 15:17, 5 December 2007 (UTC)
Mistakes happen; to even the most seasoned editors. Had Corvus cornix simply noted something to the regard of 'hey you did a copy and paste, that's naughty' and posted a link that would have been the end of it. I would have said sorry, oops, ... and corrected the error on my own, tagged an apology to the discussion page and THANKED CC for pointing it out. Instead he/she posted this as the very first comment notice "Edit warring over a country name is a good way to get blocked. And cutting and pasting the Talk page from one name to another is a copyright violation. Please don't move the article name again without consensus": (bold added to emphasize) which to me is spiteful, uncivilized, attacking and aggressive in nature; and also factually WRONG. THAT is why I strike up the no CRA violation defence. I realise out of context my statements can be turned against me; IN CONTEXT I believe it sheds more light on the situation and shows that I was attacked rather than ignoring 'rules' and 'regulations'. User_talk:Lostinlodos#Burma Lostinlodos (talk) 15:17, 5 December 2007 (UTC)
In a more stable location and more in direct line: my intentions were to:
A) correct the title and information of the page by a redirect or move (whichever was easier in the end) to the correct LEGALLY RECOGNIZED term for the plot of land under the various names (legally Union of Myanmar). Calling the country by any other name is, factually, at it's most bare level POV.
B) untangle and update the outward spread of redirects and links so that they all completed, something that desperately needs to be done. On some browsers (Opera/older Safari)and (on others, eg FireFox/Netscape) plugins/add-ons the pages fall dead after the first redirect. Others, such as the UNet browser, AOL Browser, and 'Zaa Browser the pages stop on the second redirect. Lostinlodos (talk) 15:17, 5 December 2007 (UTC)
## Colonel Warden (talk · contribs)
I noticed this very nasty personal attack made by Colonel Warden (talk · contribs) on Wikipedia:Articles_for_deletion/Frisbyterianism, I also noticed a consistent basis of personal attacks from him like [9], [10]. He was warned for the last link here, but he quickly blanked it. I'm close to giving a 24 hour block for this. Any objections. Thanks This is a Secret account 04:21, 5 December 2007 (UTC)
I shan't comment on the other topics, but the one "personal attack" mentioned is actually a fairly apropos play on the deletion subject at hand, and clearly intended as humor; I laughed, at least. :-) --Kim Bruning (talk) 04:27, 5 December 2007 (UTC)
Here is another, he was asked to refactor and I believe did so. Thanks, SqueakBox 04:33, 5 December 2007 (UTC)
I fail to see any personal attacks. The accusation of puppetry might be, if thoroughly groundless, but it looks like there's some back and forth there, so ...eh. As to the 'very nasty' one, it's funny, and clearly intended as humor. one in four is a maybe isn't much of a record of horrible incivility. I'd object to a block. ThuranX (talk) 05:23, 5 December 2007 (UTC)
No block, I don't think any offense was intended. Neil 11:37, 5 December 2007 (UTC)
## Intelligent design sockpuppet attack?
Suddenly, a number of different editors have taken to deleting whole sections of the Intelligent Design article, all using the same language. If somebody puts the section back, a different editor appears and deletes the section again. I'm not a regular there, so I don't know who's who, and I was told to report this suspected sockpuppet attack here. If it is not a sockpuppet attack then it is an edit war. Please help. AnteaterZot (talk) 06:18, 5 December 2007 (UTC)
Unless it's a mass of obvious single purpose accounts, you should try to assume good faith. There is an ongoing content dispute on the article's talk page. Someguy1221 (talk) 07:04, 5 December 2007 (UTC)
plus the suspected sock has over 3500 edits! One of the other "puppets" has over 15000 edits! Raul654 (a 'crat) is the most recent editor... he didn't protect the page...Balloonman (talk) 08:26, 5 December 2007 (UTC)
## User:Grant Chuggle is back
Resolved: IP blocked for 1 day by Tonywalton
I would normally file a suspected sock puppet case, but since he admitted to being a banned user here: User talk:41.241.73.254, I thought I could get him blocked, again, faster this way, since he admitted being blocked and using the IP to evade the block. Thank you.IrishLass (talk) 14:14, 5 December 2007 (UTC)
## Mid-scale puppetry - second opinion requested
Hey, I was hoping someone could give this a quick look, I think I'm dealing with around 20 socks but would appreciate some fresh eyes on the deal. The Rambling Man (talk) 15:29, 5 December 2007 (UTC)
This edit, and the one after it on my page, lead me to think (no, believe) that my adoptee has been using sock puppets to disrupt the Wikipedia. I am now going to A) talk to him about the policy; and B)label the other accounts that I know of as sockpuppets. I just don't know if any other action needs to be taken. I am deeply concerned that User:Iamandrewrice is never going to learn how to be a positive contributer and am at my wits end. Thanks. Jeffpw 12:46, 3 December 2007 (UTC)
I've been reviewing the behavior of Iamandrewrice since the initial thread on ANI. In his 3 weeks on Wikipedia, this user has managed to violate most of the core policies, including making legal threats, using sockpuppets and gross incivility to numerous editors. Jeffpw has had remarkable patience with Iamandrewrice, mentoring him and attempting to mold him into a productive user. However, this experiement has failed as the user is eithe[r unwilling or unable to follow the advice given to him by Jeff and many others. He's been blocked twice (legal threats, vandalism) and has recently implied that he's created multiple sockpuppets to evade blocks. Since there's no sign that Iamandrewrice's behavior is going to undergo a miraculous change, I'm requesting that this user be blocked indefinitely. I believe that a review of his contributions will lead other editors to the same conclusion. Chaz Beckett 13:00, 3 December 2007 (UTC)
I've been following this carefully and yes, Jeff deserves a very large chocolate barnstar. I'd agree that an indefblock appears to be warranted. I've asked Iamandrewrice for an explanation of the edits Jeff has concerns about, so far with no response to be told that I am not the person on the account (despite the edits Jeff brought up) Tonywalton | Talk 13:37, 3 December 2007 (UTC)
I too have followed this from the sideline since being in dispute with him. That episode resulted in Jeffpw's adoption of Iamandrewrice. He has since shown significant progress as an editor and has worked hard on several articles. However, the amount of incivility towards his mentor has been astounding as has Jeffpw's patience and goodwill for which I awarded him a barnstar and some encouraging words. If this is the path that Iamandrewrice has now chosen, as it would appear, then I guess an indef block is the only solution to this. That said, it really all boils down to how much Jeffpw can continue to mentor someone who at times seem more eager to prove himself right regardless of Jeffpw's firm warnings to stop acting out. I don't think anyone would blame Jeffpw for simply deciding to back out of this arrangement. EconomicsGuy 13:30, 3 December 2007 (UTC)
This seems to be a fairly unambiguous statement that Jeff's done just that (and no blame to him) Tonywalton | Talk 13:39, 3 December 2007 (UTC)
thats not true! i have not purposefully vandalised as you will see! My edits were all with good faith! and I was learning very much from jeff... Iamandrewrice 13:03, 3 December 2007 (UTC)
You are constantly incivil to Jeff, as in these edits [11] [12] [13]. This is how you're treating someone you're "...learning very much from"". Sorry, you've been informed that this type of behavior is not acceptable, but the inappropriate behavior has continued and possibly even worsened. Chaz Beckett 13:12, 3 December 2007 (UTC)
I am basically crying now! I put my trust in you as an adoptee... and was hoping you felt the same level of care... I was, and still am trying so hard... if you look at my edits, none of them are vandalistic ... EVEN that Monkton one, as that with good faith! Iamandrewrice 13:16, 3 December 2007 (UTC)
It seems to me that you have an inability to see past yourself. Jeff took on the responsibility of adopting you when others thought you were a lost cause. He has attempted to put you on the right path. Instead of being grateful you demonstrate a selfishness that is totally out of line with the way Wikipedia works. Then when you are challenged on your behaviour you attempt to put the blame back on the one person who had faith in you. "Crying"? My ass! You are one of those kids, for whatever reason, thinks it's always someone else's fault. Your behaviour is your fault, no-one else's. --WebHamster 13:32, 3 December 2007 (UTC)
A quick observation. Is it not more likely that User:SpidermanHero is a sock of User:Joeseth1992 - especially given User:SpidermanHero claims his name is Jose? Yes, the first message left on User:SpidermanHero was from User:iamandrewrice but that is arguably to be expected given User:iamandrewrice has claimed to know User:Joeseth1992 in real life. And User:Joeseth1992 did have his recent block extended for using socks to get around it. Also (and I haven't the diffs at the moment as I'm in a rush) User:iamandrewrice did claim User:Joeseth1992 wanted to improve the grammar on Wiki and User:SpidermanHero did make such an edit (albeit an incorrect one, capitalising a direction)[14]. I am convinced User:Joeseth1992 and User:iamandrewrice are separate people in real life (and they have left enough personal information about themselves on their user pages to make it apparent they are friends on MySpace and Bebo, for instance. My only main concern is that User:iamandrewrice has created the account to disparage the person his username suggests he is. That person does exist, and is connected to the person behind User:iamandrewrice on Bebo if nowhere else. Given all that can be obtained from the details both left (mainly email details in userboxes) is this not more likely to be a group of school friends that have got out of control, rather than socks? Whitstable 13:33, 3 December 2007 (UTC)
this diff would support that assertion. Someone came complaining that Iamandrerice had taken their name to use here. I expressed concern at that time, but it was decided Iamandrewrice could keep the name. Jeffpw 13:50, 3 December 2007 (UTC)
The sockpuppets are actually a very minor (and recent) part of the problem. Let's assume they're not sockpuppets, but a group of friends. Now we have a situation where Iamandrewrice is playing his usual game of creating drama and then playing the "I didn't know any better" card. This card made its first appearance when he made legal threats then, when blocked for this behavior, claimed to not know the definition of "legal action" (despite claiming on his user page that English is his native language and that he's studying English language and literature). So he's either lying about using sockpuppets or he's using sockpuppets, neither one is acceptable behavior. Chaz Beckett 13:38, 3 December 2007 (UTC)
Agree completely with that - and would also point to [this edit] to suggest that even if the two accounts are no the same person, they have been known to work together in such a way that is disruptive. I nearly filed a CU yesterday because of the amount of blocked users user:iamandrewrice had been contacting, but opted not to. But socks or not, I think the way user:Jeffpw has been treated by someone he has gone beyond the call of duty to help is so unfair that I am fuming about it, and I'm only looking at this from the outside! Whitstable 13:43, 3 December 2007 (UTC)
I just had a thought regarding checkuser - since the socks (or !socks) seem to admit to being pupils at the same school, isn't a checkuser likely to prove very little? The IP will be either the school's proxy or at least should be expected to be in the same IP block in any case. Tonywalton | Talk 13:54, 3 December 2007 (UTC)
He posts at times when one would normally expect a UK school to be closed, therefore it may be he logs in here from school and home. --Rodhullandemu (please reply here - contribs) 14:09, 3 December 2007 (UTC)
Good point, well made. Tonywalton | Talk 14:27, 3 December 2007 (UTC)
More thoughts: This | section here also makes me suspicious. And User:Christine118500 has been chasing around for adoption in a similar manner to how User:Joeseth1992 did. Whitstable 15:02, 3 December 2007 (UTC)
I was thinking that, too, Whitstable. And SpidermanHero is doing the same thing now, as well. It does seem as if they are one user with split-personality disorder, or a group of school friends who have decided to make Wikipedia their target for fun and games. It will be interesting to see what the checkuser report says (I filed it a while ago). Jeffpw 15:06, 3 December 2007 (UTC)
Agreed, Whitstable, and this edit might be seen as ill-advised at best, under the circumstances. Tonywalton | Talk 15:08, 3 December 2007 (UTC)
(undent) They (User:Iamandrewrice and User:Christine118500) both pass the duck test and are obvious sock/meat puppets of each other. I'm going to indef both and suggest that one of them may be unblocked only on stringent parole. — Coren (talk) 15:10, 3 December 2007 (UTC)
The other two do not seem quite as obvious to me, however (but very likely). — Coren (talk) 15:12, 3 December 2007 (UTC)
As per my comments above, User:Joeseth1992 and User:SpidermanHero appear to be the same person. Similar style, and the second account, created after Joeseth is blocked, claims to be named Jose? Whitstable 15:26, 3 December 2007 (UTC)
For information Iamandrewrice has now posted an unblock request (with a rationale 873 words long!) on his talkpage, in which he admits that SpidermanHero is a meatpuppet. I strongly feel that this editor has been told often enough about policy and had it explained point-by-point where it applies to his edits without success. (Leaving aside questions of possible puppetry) I support the retention of an indefinite block. Tonywalton | Talk 17:01, 3 December 2007 (UTC)
I agree with maintaining the indef block. There's a pattern of Iamandrewrice creating drama until blocked, then claiming it was all a misunderstanding, followed by being unblocked and then quickly returning to the inappropriate behavior. There are two possibilities here, either he's playing games seeing how much he can get away with or he's truly unable to understand how people are expected to behave here. Either one should result in an indef block. This nonsense has gone on long enough. Chaz Beckett 17:06, 3 December 2007 (UTC)
I agree, and would mention that Christine118500 has the same pattern. Tvoz |talk 17:09, 3 December 2007 (UTC)
As does Joeseth1992 Whitstable 17:12, 3 December 2007 (UTC)
Endorse the indef block. He had plenty of second chances and pretty much blew them all by arguing with Jeffpw rather than pay attention. There is no reason to believe that he will not simply return to his old pattern of disruption and acting out. Fact is he got a second chance that 99% of users who start out like he did never gets and he basically wasted that chance. EconomicsGuy 17:19, 3 December 2007 (UTC)
Preliminary checkuser shows that the account has been socking. I need a second opinion on the Christine one, hence it's not completed, but Iamandrewrice certainly has - Alison 17:27, 3 December 2007 (UTC)
As Christine118500's former adopter, and having observed Jeffpw's admirable attempts with Iamandrewrice, I endorse both blocks. I would add that Christine118500 admitted prior to being adopted that he had been blocked in the past (Christine118Maureen is clear, and others apparently); I discussed the matter with Isotope23, the admin who blocked the previous account, who said in reply that he was willing to let Christine118500 edit and try to reform. Sadly, he has not, and a block is warranted on that ground at least. I do not know whether Christine118500 and Iamandrewrice are the same individual. CU or a more detailed comparison of edit times and styles may reveal more, but it may matter little. BencherliteTalk 17:32, 3 December 2007 (UTC)
Per the CU Iamandrewrice, SpidermanHero, Joeseth1992 (and, interestingly, Radiation111 and Narnia101) are confirmed. The result on Christine118500 is pending. Tonywalton Talk 17:44, 3 December 2007 (UTC)
Following another unblock request (which I declined) and yet more verbose "apologies" and promises, I've blanked and full-protected User talk:Iamandrewrice. Tonywalton Talk 18:13, 3 December 2007 (UTC)
A new one just arrived. Blackhouse123 is claiming to be friends with Christine118500. He also made this edit which isn't very helpful. IrishGuy talk 18:33, 3 December 2007 (UTC)
Another? Just seen this edit by User:Burningandrew within four minutes of account creation. Whitstable 18:40, 3 December 2007 (UTC)
And another Dom58 the Second. They're kindly signing up at Petition to unblock User: Christine118500 (twice deleted). BencherliteTalk 18:45, 3 December 2007 (UTC)
Plenty more socks now identified by checkuser. I blocked a bunch of them already and the checkuser case has now been updated - Alison 19:06, 3 December 2007 (UTC)
I think, by now, any illusions of good faith are reasonably ignored. Block-protect-ignore. — Coren (talk) 19:16, 3 December 2007 (UTC)
Oh my God. I was out seeing clients for the last few hours, so missed these latest revelations. I said to Tonywalton yesterday that I thought I had adopted Rosemary's Baby. Now it is clear I really adopted Sybil. Oh well, it was a good learning experience for my next adoptee. Thanks to everyone who gave me support throughout this. Jeffpw 19:47, 3 December 2007 (UTC)
One Two Three Four,
How many More?
Five Six Seven Eight,
Well you'll just have to wait!
)
Christineandrew 21:35, 3 December 2007 (UTC)
And yet more, it seems. User:Andrewsclone just made this edit Oh, and see above post by User:Christineandrew Whitstable 21:37, 3 December 2007 (UTC)
Another just created: User talk:Andrewsbaby to quote user page
you know who i am people
laterz yeah? yeah...
Sigh Whitstable 21:41, 3 December 2007 (UTC)
confirmed / blocked the underlying IP - Alison 21:42, 3 December 2007 (UTC)
A static IP, I hope! Tonywalton Talk 22:12, 3 December 2007 (UTC)
A look at suspected puppet User talk:Dom56! also suggests the following are puppets: user:Guys09, User:Toast123 and User:Dr. Reeves Thanks Whitstable 22:10, 3 December 2007 (UTC)
See also Wikipedia:Suspected sock puppets/Dom56! for some more possible ones. BencherliteTalk 22:16, 3 December 2007 (UTC)
For information Further from Andrewsbaby (on their talkpage, now a protected redir to the userpage):
ok people
theres just one thing i want and then this will all stop... seriously... I want you to unblock my IP address... that is my only request... then I wont bother you with these accounts anymore... but doing that is just unfair...
Tonywalton Talk 22:17, 3 December 2007 (UTC)
We can't unblock the IP address unless the sockpuppeteer reveals which IP address is being used. We also know that more than one address has been used. --Yamla 22:27, 3 December 2007 (UTC)
Well, yes, (and I'm not sure about "can't". Would doing so be a good idea?). If they're complaining about collateral damage (for instance, and I'm speculating here) perhaps the reason they seem so keen to have an IP unblocked is that one of the autoblocks is going to hit something like a school proxy, with ensuing awkward explanations from themselves as to how their activities got it blocked. If they're on a dynamic IP then meh? they can easily get another one (as I'd guess they are doing). I'm not sure whether this, followed immediately by this may be of interest. Someone didn't log in. Tonywalton Talk 22:40, 3 December 2007 (UTC)
Hi again everyone... remember me now? ;) WiArthurWho 16:54, 4 December 2007 (UTC)
Sigh... the sad thing here is that you don't seem to understand that you weren't banned (yet!) just indef blocked. You could have just started over and no one would have blocked your new account had you stuck to good faith edits. Once again you turned out to be your own worst enemy here. EconomicsGuy 17:24, 4 December 2007 (UTC)
Sockpuppetry continues, I've reopened the checkuser request. I move for a formal ban on this vandal. --Yamla 17:31, 4 December 2007 (UTC)
Pr this our friend Iam... is but one puppet among many and not the puppeteer (I suspected this last night). Try Wiarthurhu, I too would support a ban, of course. Tonywalton Talk 17:38, 4 December 2007 (UTC)
• Though I know I should outraged, I can't help but feel sorry for the guy. If you can believe anything he has written, his MySpace says this is his birthday. Instead of spending it celebrating with friends, he is waging a one man war against a group of strangers on the internet. That's just plain sad. I don't mean to imply he should not be banned, I just still have an element of compassion for what is obviously a very troubled young. Jeffpw (talk) 21:25, 4 December 2007 (UTC)
And I posted this on Tony's page, but I will post it here, too: This user, whoever he is, has emailed me with a full explanation of what has transpired. He asked me to post it here, but I refused. I did, however, promise I would forward it to any admin or other office people who might wish to read it. Jeffpw (talk) 23:34, 4 December 2007 (UTC)
Support the ban. This ban is the very last option - he blew the second last chance in spectacular fashion when Jeff had to give up on him. I really thought that this was the exception to the rule - that he really could be turned into a constructive good faith editor very eager to learn. I even felt really bad about having assumed bad faith about two of his uploads and really wanted the guy to succeed. Now that we know he was just a sock himself I'm really disappointed. EconomicsGuy (talk) 12:01, 5 December 2007 (UTC)
I'm currently debating this with him on User talk:Benniguy. I support Tonywalton's indef block of that account as a preventive meassure but unless everyone else have given up on him maybe we can work out an arrangement where he is allowed back on one account with full disclosure and a strict civility parole and no arguing with people when he is told rather firmly not to do so. He needs to understand that and if he does I'm willing to assume good faith one very last time. EconomicsGuy (talk) 19:01, 5 December 2007 (UTC)
The user has repeatedly lied. See the below, for example, where Alison shows that the user falsely claimed an innocent third party was a sockpuppet account. Additionally, the user has claimed here to have a very limited number of sockpuppet accounts but elsewhere, claimed to have "thousands". --Yamla (talk) 20:36, 5 December 2007 (UTC)
I was unaware of the checkuser result when I posted this and debated with him. I agree, the lying is continuing. Full support for the community ban. EconomicsGuy (talk) 20:39, 5 December 2007 (UTC)
Agree with a ban. This vandal has been at it a for months now. They've been given several "second chances" with various accounts. Ban, block, and ignore.--Isotope23 talk 20:44, 5 December 2007 (UTC)
Hi, I've reverted a few actions by the self-admitted sock, on one of their comments they also confess to a few other socks; could someone please check into these and block as appropriate? Benjiboi 11:48, 5 December 2007 (UTC)
I must say, I'm more than a little dubious. --Yamla (talk) 16:26, 5 December 2007 (UTC)
Hey-ho. FWIW, I'm inclined to believe that Iamandrewrice and Christine118500 are not the same person. The initial link between them is probably me, in this way: I decided to stand down as Christine118500's adopter after this, and told Christine118500 so. The next day, I reviewed one of Joseth1992's unblock requests and refused it. I think Iamandrewrice saw my name on Joseth1992's page, saw this message on my talk page and came across Christine118500 that way. However, as far as I'm concerned, both of them have messed around far too much and fully deserve their indef blocks. BencherliteTalk 20:02, 5 December 2007 (UTC)
The original checkuser result explicitly indicated them as being separate but both socking - Alison 20:16, 5 December 2007 (UTC)
Confirmed - This guy is messing everyone around. Iamandrewrice (talk · contribs) = Fettes (talk · contribs) = Eastort (talk · contribs) = Orangestreetcat (talk · contribs) = Logitechfan (talk · contribs) = Donatenowkid (talk · contribs) - underlying IP blocked. Needless to say, the "confessed" accounts here ... aren't - Alison 20:24, 5 December 2007 (UTC)
Smells like a community ban... MaxSem(Han shot first!) 20:29, 5 December 2007 (UTC)
I support a community ban for both. Christine118500 had his last chance some time ago when he was allowed, by kind permission of an admin, to try and edit constructively with this account. He failed to do so. Iamandrewrice's behaviour is here for all to see. BencherliteTalk 20:35, 5 December 2007 (UTC)
I too support a community ban for both. This has burnt enough time that could have been used productively, has upset enough people (I'm not easily upset but I'm now getting paranoid about completely reasonable requests for assistance from new editors), and has just been too plain unpleasant, to be allowed to continue. Tonywalton Talk 22:51, 5 December 2007 (UTC)
User:Benniguy, an admitted sock of Iamandrewrice, made an extremely verbose posting here prior to me blocking them. The posting was subsequently blanked (quite correctly IMV) and Benniguy's talkpage protected. Before this was done I made a summary from that posting of what Iamandrewrice's actions to date had been. Those considering whether a ban may or may not be appropriate may care to see the summary and their response on this revision. Tonywalton Talk 23:13, 5 December 2007 (UTC)
### A message to Iamandrewrice
Just to confirm this, as being in personal contact with these vandals (hence why they have attempted to drag me into this affair. -_-). Joeseth1992 is certainly not Iamandrewrice, Lol, I know these two people, HOWEVER it doesn't matter the band of them are as bad as each other and should be banned... seriously, they've spent their time being sadacts creating accounts at home and at school for the good of nothing plus: attempting to get me involved (which was the height of their stupidity - didn't work). I don't know who SpidermanHero but I suspect his real name is Ben. Seriously, if I had been notified of this sooner I could have helped clear a few things up. That's of course if you don't suspect me a sockpuppet of any of these people. Fishyghost (talk) 20:37, 7 January 2008 (UTC)
Just to clear this up: There are three boys, all with the intent of fooling you around in an immature manner, all should be blocked. Fishyghost (talk) 20:41, 7 January 2008 (UTC)
## Peculiar actions of apparently non-vandalist editor
Resolved
This editor M1ss1ontomars2k4 has a number of perfectly ordinary edits under her(?) belt; but look at the last couple of articles created (including the deleted article now a redlink). Unless there's a secret nasty Mozart I don't know about, this is some kind of wack vandalism that seems uncharacteristic, complete with a really nasty Durova quip in the edit summary. --Orange Mike | Talk 20:36, 4 December 2007 (UTC)
• That's just ... odd. I've deleted the (English redirect) articles as CSD:R1, and will drop a note on the user's talk page. BLACKKITE 20:45, 4 December 2007 (UTC)
Beat you to it. I've indef blocked the account as it appears to have been compromised, and left a note on both the user's talk page and by email. This way, the editor will be able to reestablish his identity, change his password, and resume editing. — Coren (talk) 20:50, 4 December 2007 (UTC)
Yeah - the only reason I didn't block myself was because the edit immediately after the article creations was in line with the user's normal editing interests. Still, better safe than sorry, and we'll see what they say. BLACKKITE 20:52, 4 December 2007 (UTC)
I think that's a legit piece in the Mozart canon. We have an article on Leck mich im Arsch. Gimmetrow 20:55, 4 December 2007 (UTC)
(e/c) Don't jump to any conclusions or hasty blocks. Mozart wrote several canons with obscene lyrics for his friends. This was highlighted in a way by the Durova incident, as Durova and Guy have made baseless accusations against !! and Giano for discussing those canons. (That's what "obscene trolling, knows German" was about.)
I haven't looked into the edits thoroughly yet, but if you blocked someone just because you don't believe the thing about Mozart, you should (a) unblock immediately and (b) give the user a thorough, sincere apology lest you become the next Durova. rspeer / ɹəədsɹ 20:59, 4 December 2007 (UTC)
I've followed up by looking at the article. It was completely legit. It even had references so that you could tell it was legit. What we've seen is a prime example of What Not To Do When You're An Admin, especially the week after Durova. rspeer / ɹəədsɹ 21:07, 4 December 2007 (UTC)
Support immediate unblock. This appears to be K233/382e. Gimmetrow 20:59, 4 December 2007 (UTC)
Deleted article appears legit -- see the German Wikipedia Article. The edit summary may have been a little uncivil, but certainly not grounds for a block. Pastordavid 21:02, 4 December 2007 (UTC)
The German version does appear to be legit, however I deleted the two English redirects because they appeared unnecessary and I think that was a correct decision (doesn't the idiom mean "Kiss my ass" anyway?. BLACKKITE 21:07, 4 December 2007 (UTC)
(ec x3)Roughly translated, yes based on what my German friends tell me. Strictly translated: Lick me in the ass. I had to ask after I saw that one created. spryde | talk 21:15, 4 December 2007 (UTC)
I've heard multiple people refer to the piece as "Lick me in the ass nice and clean". I wouldn't be able to spell the German version if I were looking for it. There's no reason to delete the redirect. rspeer / ɹəədsɹ 21:21, 4 December 2007 (UTC)
Usually we'd just mention the English title in the article, but I don't have any real objection to restoring the redirects (they might be a target for vandalism, though). BLACKKITE 21:25, 4 December 2007 (UTC)
The edit summary looks like it was intended as a needed warning to make sure an admin wouldn't rush in and do exactly what Coren did anyway. Reading it as anything else is assuming bad faith. rspeer / ɹəədsɹ 21:13, 4 December 2007 (UTC)
Coren isn't responding, so I've unblocked. rspeer / ɹəədsɹ 21:14, 4 December 2007 (UTC)
• And I've restored the German titled article, minus the revision with the incivil edit summary. BLACKKITE 21:15, 4 December 2007 (UTC)
So, whatever indicated this account was compromised? I would like to know for future reference. Gimmetrow 21:17, 4 December 2007 (UTC)
• I think the original editor looked at the rather incivil edit summary involving Durova together with the titles of the English language articles, then looked at the the user's previous contributions and thought they didn't match well. I can see why they thought that, and certainly when I first looked at the user's contribs (by then, the German titled articles had been deleted, just leaving Lick me in the ass nice and clean), I was a little surprised too. BLACKKITE 21:22, 4 December 2007 (UTC)
• I hope that for you there's a difference between "a little surprised" and "reaching for the indef-block button". When a good editor does something odd, you can always ask them about it on their talk page. rspeer / ɹəədsɹ 21:29, 4 December 2007 (UTC)
• That's exactly what I did. By the time I'd left a note on their talk page (http://en.wikipedia.org/w/index.php?title=User_talk:M1ss1ontomars2k4&diff=prev&oldid=175778804), though, Coren had already blocked them. I tried to contact him on IRC to say I didn't think it was compromised due to the following edit, but he wasn't responding, and by that time the legitimacy of the article had been pointed out. BLACKKITE 21:35, 4 December 2007 (UTC)
• For the record, an immediate indef block is the only proper response to an account that appears compromised (and given how... uncharacteristic those edits appeard to be for an established editor, that was a reasonable conclusion). If the account wasn't compromised the editor suffers a few moments of inconvenience while things are sorted out— if it was compromised then damage gets limited and the editor's reputation doesn't suffer needlessly. You'll note the block reason makes it very clear the block was put in place not because of behavior, but because the security of the account was in doubt. At no point did I presume, or state, or act in a way consistent with my believing that M1ss1ontomars2k4 was anything but a good faith editor. Drama much? — Coren (talk) 23:42, 4 December 2007 (UTC)
So, whatever indicated this account was compromised? I would like to know for future reference,, because I don't see it. The editor didn't go on a vandal spree. A hair trigger seems to risk offending an editor. Fortunately, the editor didn't get offended, but if he did, it would have been a lot more difficult to undo than a couple bits of vandalism. Gimmetrow 01:26, 5 December 2007 (UTC)
• See [15] and User:!! for context. Although probably done completely innocently, it cannot be overstated how inappropriate an indef-block was here. Perhaps bring it to the board before a block next time? -- zzuuzz (talk) 21:27, 4 December 2007 (UTC)
A temporary indefblock of an account that seems to be compromised is not such a bad idea. The user can be easily unblocked if it is not the case. ≈ jossi ≈ (talk) 21:32, 4 December 2007 (UTC)
I must agree with the preemptive indefblock; it's much easier than reverting a bunch of articles. I've always been away for the entire time I've been blocked, seeing as people tend to unblock me before I even know I've been blocked. So I'd like to know what exactly can be edited by a blocked editor, because it wouldn't make sense if an editor couldn't explain his/her own actions in order to be unblocked. --M1ss1ontomars2k4 (talk) 21:47, 4 December 2007 (UTC)
• For exactly the reason you mention, when you're blocked you can still edit your talkpage, in order to post an unblock notice. BLACKKITE 21:53, 4 December 2007 (UTC)
I've gone ahead and undeleted the redirects and the article creation edit of Leck mir den Arsch fein recht schön sauber. I think it's important to preserve history. Before my undeletion, a non-admin could have suspected that the first sentence came from nowhere. The edit summary was uncivil but it didn't reveal any personally identifiable information - there is no reason that non-admins should not see it. Graham87 00:12, 6 December 2007 (UTC)
### Unblock is fine with me
Geesh. Stay away for ten minutes and see what happens! :-) I have no objection to the unblock. I blocked for the benefit of the editor, not to prevent him from editing.
For the record, the very nasty Durova crack screamed vandalism, but it was only normal to assume the account was compromised and not that a good editor suddenly went rogue. — Coren (talk) 21:34, 4 December 2007 (UTC)
Hehe...looks like you guys have figured it all out, so I won't put much explanation here. I cannot tell a lie; 'twas I who made those uncivil comments. Sorry for all the confusion, as it's mostly my fault. If you need additional proof that my account has indeed not been compromised, please feel free to ask! --M1ss1ontomars2k4 (talk) 21:35, 4 December 2007 (UTC)
P.S. Sorry BLACKkite, I'm a guy. The m1ss1on is just that--mission. My username refers to the Spirit and Opportunity missions. --M1ss1ontomars2k4 (talk) 21:35, 4 December 2007 (UTC)
Oops :) BLACKKITE 21:44, 4 December 2007 (UTC)
## Single issue poster, multiple accounts
Rachalupa 71.170.220.213 These accounts appear to be used by a single issue poster publicising their dispute with Interactive Brokers by posing as them and posting derogatory information. Examples - spam links to their website interactiveBrokersSucks.com - [16], false information and vandalism (category) - [17], creating an alternate similarly named (spoof?) article [18] -- John (Daytona2 · talk) 21:39, 4 December 2007 (UTC)
You only seem to mention one account - Rachalupa. 71.170.220.213 isn't an account, it's an IP address, otherwise known as an anon. While the edits certainly seem as if they should have been reverted (which they have been), and the link to the spoof website might be classed as defamation, I can't quite see what administrator intervention could be made here. the vandalism is the issue here and I can't see where admin interventoin is required. As far as I know there's no specific policy against a registered editor who isn't blocked editing as an anon. I've given Racahalupa a warning about placing attack links in articles. Tonywalton Talk 22:39, 4 December 2007 (UTC)
I have indef blocked the user account. A review of the edits show it is a vandalism only account. The IP I have blocked for 72 hours. If you or anyone wishes otherwise they may make the change. - JodyB talk 22:58, 4 December 2007 (UTC)
I strongly recommend you list this link for blacklisting at MediaWiki talk:Spam-blacklist. Any admin there can do this for you. Otherwise, most hard-core spammers will just get other accounts as necessary to keep adding their links; blocking has little effect. --A. B. (talk) 05:25, 6 December 2007 (UTC)
Hi, I'm a quite new user on Wikipedia.
I had some problems, with another user, from before I had even got a user-name on Wikipedia.
I was only having an ip-address, for a signature.
But even so, one user, kjetil r, knew that my usual username, for message-boards in Norway, was 'cons', and contacted me on Wikipedia, using this nick/user-name, when contacting me on Wikipedia, before I had got a user-name on Wikipedia.
(At least this is how I remember it).
And then, later, I was editing a page (grandiosa), and then another user, plased POV tags, I think they are called.
The other user and me, reach a compromise, like the other user refered to it as.
And then, seemingly from nowhere, the kjetil r user, suddently appeared, on this, the other page, and now on English Wikipedia, and not on Norwegian Wikipedia, where our first 'encounter' was.
And then the kjetil r user, placed new POV tags, almost imideatly after the first user had removed them, since consensium between me and the first user had been made.
So kjetil r wasn't involved in the discussion, but seemed to me to be surveiling me, and as soon as the first user and me had reached consius, then the kjetil r user appeared, seemingly from nowhere, and placed new tags, and disapeared again seemingly.
At least this is how it seemed to me.
I know I'm new on Wikipedia, but regardless of this, it seems to me that I have been followed and harassed in 'cyberspace' (that is, that the user must have been finding my user-name from somewhere on the internet, possibly a thread on a message-board, which I was linking in the beginning of my writing, before I understood all the things with the citations), and on two different Wikipedia editions.
This is how it seems to me.
So I was wondering what other people think of this.
And if I am on the right page, for this.
And how I should go forward regarding this.
So I hope that this is the right place to mention this, and I would be very grateful for advice on to go forward with this.
Thanks in advance for the help!
Johncons (talk) 22:07, 4 December 2007 (UTC)
I guess you mean your edits to Grandiosa such as this one, you appear to be using message boards and the like as sources - but they're clearly not acceptable sources, so it was entirely correct that this information be removed. -- Finlay McWalter | Talk 22:15, 4 December 2007 (UTC)
And please learn to use the preview function - you've filled the edit history of that article with hundreds (literally) of tiny changes. Please add or remove content in one (or for complex cases, a handful, if really necessary) of edits, not a run of dozens. -- Finlay McWalter | Talk 22:24, 4 December 2007 (UTC)
I know I'm a new user on Wikipedia.
I'll continue to try to learn the learn the rules and the procedures.
The reason I was writing here now, was regarding possible advice on the following and harassment.
So thanks in advance for help regarding this!
Johncons (talk) 22:27, 4 December 2007 (UTC)
You've not presented any evidence of following and harrassment. A review of your edits shows you've edited only one article, and repeatedly added inapproprate content to it. It's entirely appropriate for Wikipedia editors to remove that, and (as you've repeatedly added it back) to caution you and remind you of Wikipedia's rules. -- Finlay McWalter | Talk 22:30, 4 December 2007 (UTC)
This isn't the case that I'm writing about here.
I appreciate, that there are several things that can be talked about.
But I was thinking in the ways of, everything to it's time.
And now, I was thinking that it was time for the following/harassment episode.
And this was on the Norwegian message-board.
So if it's alright to focus on this case, with the user kjetil r?
Because if one mixes in to many cases, then it gets difficult to get the overview.
I'm not sure if this is making any sense?
Thanks in advance for the help!
Johncons (talk) 22:38, 4 December 2007 (UTC)
What happens on the Norwegian wikipedia is none of our business - complain there. What happens on some message board is none of our business - complain there. There is no evidence that kjetil or anyone else, other than you, have behaved badly on the English Wikipedia. On looking at Talk:Grandiosa it seems everyone has been very patient with you, but you've been consistently making the same baseless claims for days, and everyone has been very patient with you. It's becoming difficult to believe you're interested in solving disputes constructively. -- Finlay McWalter | Talk 22:48, 4 December 2007 (UTC)
Ok, then I don't really see what the point is, with me writing anything at all, if you don't belive what I'm writing.
Could you please confirm or not, regarding if there is any point at all that I write anything more?
Thanks in advance for the help!
Johncons (talk) 02:01, 5 December 2007 (UTC)
You might also want to consider abandoning your current username and starting a new account here under a different name, as long as you understand the basics of editing here. Skål! --Rodhullandemu (please reply here - contribs) 22:34, 4 December 2007 (UTC)
Okey, that might be a good idea.
I'll also see if I can find some of the evidence, with the intitial post.
So, if it's alright then I'll just bring that later.
Since I'm a bit new to this yet, then it could maybe take me some time to find this.
So if it's alright then I'll just return later, within a day or two, with the mentioned diffs.
If thats alright.
(Skål tilbake ja, selv om jeg ikke skal påstå at jeg har så mye øl her nå, men det får jeg heller ordne senere anledning.)
And sorry if I'm a bit harsh in these post, I think I need a break from Wikipedia, and then return tomorrow or something like that.
### Response by involved party User:Mayalld
This is becoming somewhat tiresome! User:Johncons has been involved in major league POV pushing (200+ edits over 3 days), which interested editors had attempted to tone down. They were faced with a stubborn insistence on adding the POV text.
I arrived as a disinterested editor, having noticed an unusual amount of activity on a single article from a new user account. Having spent a good deal of time reviewing 200 edits, to determine whether we had POV pushing or a case of established editors WP:OWNing an article, it became clear that this was POV pushing of a fringe theory. Accordingly I reverted back to the last good revision.
• Has an agenda. He believes that inconsistencies in accounts from a pizza maker as to whether they use soy protein or pork gelatin as a binder is obvious proof that they are actually using dead people.
• is convinced that the fact that a user on wiki spotted who he was from his POV pushing whilst he was editing as an IP is proof that he is being stalked
• believes that chatrooms are reliable sources
• demands that he should be able to add what he wants and that people should discuss removing the additions
• writes voluminously on talk pages, demanding that people answer his questions, and dismisses any response that tries to poinbt out that he is asking the wrong question.
• Is now engaging in wikilawyering by this vexatious report, and a similar report on WP:WQA. I believe that he hopes to drive away anybody who seeks to uphold policy against him.
Mayalld (talk) 22:17, 5 December 2007 (UTC)
## Côte d'Ivoire
Resolved
... blocked for 12 hrs Miranda 00:58, 5 December 2007 (UTC)
User:Robscure doing a cut and paste move to Ivory Coast trying to bypass WP:RM. Also Ivory Coast is listed in Category:Protected redirects but is apparently no longer protected. --Polaron | Talk 00:44, 5 December 2007 (UTC)
He broke 3RR also. Miranda 00:56, 5 December 2007 (UTC)
...and got blocked for it. - Philippe | Talk 01:19, 5 December 2007 (UTC)
Note that the Ivory Coast redirect is semi protected, not fully. Since this is a one time event, I don't think full protection is warranted. -- lucasbfr talk 11:01, 5 December 2007 (UTC)
No, I think full protection is warranted, and have instituted it. Attempts to (non-consensually) get this article to "Ivory Coast" are common; the article text had already been pasted over the redirect once before. In the case that a real consensus to move the article back to "Ivory Coast" emerges, it should be an admin making the move anyways, so this protection won't get in their way. Otherwise, there's no need for edits, since it is a redirect. Any objections? Picaroon (t) 01:26, 6 December 2007 (UTC)
## Mmbabies
Hello, I'd like to know if there is any way of stopping banned user User:Mmbabies. He has created a ridiculous amount of sock puppets and does not show any sign of stopping. What can we do to stop him? Thanks. This has gotten really out of hand. Cheers,JetLover (Report a mistake) 05:17, 5 December 2007 (UTC)
WP:RBI is currently the only thing we can do, given other, often more drastic, measures that have been taken (and pretty much failed). —Kurykh 05:55, 5 December 2007 (UTC)
...or we could just block the /8 on those IP ranges... it's not like Houston has anything worthwhile to add to the discourse.--Isotope23 talk 19:53, 5 December 2007 (UTC)
## Move-Protect Occupation of the Gaza Strip by Egypt
Can I get an admin to move/rename-protect the article Occupation of the Gaza Strip by Egypt? Another editor keeps moving it (here and here) to Rule of the Gaza Strip by Egypt to make a WP:POINT regarding a discussion on Talk:Israeli-Palestinian conflict regarding the use of the term "occupied" when referring to the Israeli-occupied territories.
I made the mistake of pointing out that other Occupied territories are referred to as "occupied", citing said article as an example (here). The article was promptly re-named in a "ta-da! problem solved!" kind of way (here and here), without even bothering to change the first line in the article itself, which still referred to the territories as "occupied".
I undid the move (here), only for it to be moved again as soon as it was noticed (here). I recently moved it back (here) and would like it to be protected to avoid this kind of WP:POINTish edit-warring.
Cheers and thanks, pedro gonnet - talk - 05.12.2007 09:19
i agree that the page should be protected from moving, only while keeping the "rule" word insrtead of the word "occupation" - pedro here, has been extremely aggressive with his POV and i don't quite yet see why it is of higher value than the one i am advocating for. JaakobouChalk Talk 09:47, 5 December 2007 (UTC)
p.s. here is the main page where we are "bickering" on [19]. JaakobouChalk Talk 09:48, 5 December 2007 (UTC)
(EC2) Well, he's done it again (here)... I'm not really into edit-warring, so could any admin please move the name back and move-protect the article? Cheers and thanks, pedro gonnet - talk - 05.12.2007 09:50
BTW, the same type of WP:POINTynes is being used on the article Gilad Shalit by the same editor regarding the use of the term "hostage" (latest revert here). I'm not sure if this warrants admin action, but it's along the same lines... Changing one article to make a point in a discussion regarding another article. Cheers, pedro gonnet - talk - 05.12.2007 09:53
i really don't want to bring down the level of conversation by linking to diffs of how you're "negotiating" your preferred version. no normative admin would implement "his version" before protecting a page. please consider resolving disputes within' the proper channels of WP:DR rather than "POINT" fingers at a person who's challenging your POV reverts. JaakobouChalk Talk 09:56, 5 December 2007 (UTC)
Amnesty International refers to him, in the linked source, as a hostage. What is your NPOV reasoning for not using the same language used by this commonly pro-palestinian group, Pedro? Kyaa the Catlord (talk) 10:03, 5 December 2007 (UTC)
(ec) An admin will invariably protect the wrong version. And you're both at fault for not seeking dispute resolution, as you're both revert warring. Start a seperate discussion just about this, start an RFC, get some third opinions. There's no point making an edit you know will be reverted. Please don't let content/wording disputes spill onto ANI. Someguy1221 (talk) 10:08, 5 December 2007 (UTC)
Yes, WP:DR is the right way to go and there is a RfC going on at Talk:Israeli-Palestinian conflict. The problem is that User:Jaakobou is using edits on other pages to push a point. What does he do, exactly?
1. In the dispute regarding "occupied" vs. "disputed" I point out that many other articles, e.g. Occupation of the Gaza Strip by Egypt (look at the nice list on List of military occupations for more examples), use the term "occupied". User:Jaakobou, in response, renames the article to Rule of the Gaza Strip by Egypt to not have to concede that point in the argument.
2. In the dispute regarding the use of the word "hostage" for Gilad Shalit, I point out that in the article Gilad Shalit the term "POW" and not "hostage" is used, as discussed on Talk:Gilad Shalit. User:Jaakobou, in response, replaces "POW" with "hostage" in Gilad Shalit adding a reference to a one-liner in a BBC article referring to all prisoners in the conflict as "hostages". A few days later, the use of the word "hostage" in the Gilad Shalit article is used as an argument for using that term.
Again, I'm all for WP:DR and we are currently involved in that process, but these edits are being used as a weapon in those discussions. Whenever I make an argument of the type "but article XY says that...", User:Jaakobou goes and changes article XY. This is not the way discussions should work. Editors should not go and modify articles with the sole purpose of pushing their line in a WP:DR.
Cheers, pedro gonnet - talk - 05.12.2007 10:39
please stop "POINT"ing fingers, you have nothing that is not content based and the request tha an admin revert to your version and block the page shows a lack of understanding on core policies. JaakobouChalk Talk 10:49, 5 December 2007 (UTC)
POW is not fitting. Was Gilad captured by the Palestinian Authority? No. He was captured by a gang of thugs. I reviewed that talk page and found no evidence of a discussion to use "POW" to refer to this hostage. Kyaa the Catlord (talk) 10:48, 5 December 2007 (UTC)
${\displaystyle \leftarrow }$ His status as a POW/hostage is a red herring/straw man. This is about modifying an article to make a point in a discussion elsewhere. Please stick to the topic. pedro gonnet - talk - 05.12.2007 10:59
No, you bring something up on ANI, expect that your own behavior will be questioned. You're a party to the mess on these pages just as much as Jaakabo is. Kyaa the Catlord (talk) 11:48, 5 December 2007 (UTC)
• Move protected by Future Perfect at Sunrise. I added the {{pp-move}} tag. The problems with dispute resolution failure alleged above are not surprising, and I think we as an administrative community need to kick a lot of this mess up to the ArbComm, little as they will want to see yet another Israeli-Palestinian case. GRBerry 15:50, 5 December 2007 (UTC)
In response to some of the above, I recommended starting a new RFC just on this issue, on this article's talk page. That existing RFC is a sprawling debate of "stuff," and this relatively minor debate over one word in an article's title might be better served if seperated entirely. Someguy1221 (talk) 18:51, 5 December 2007 (UTC)
I've started a discussion thread here. I would like to insist on the point, though, that this is not a content dispute -- it's disrupting Wikipedia to make a point. This goes pretty much in the same direction as the problems surrounding the "Allegations of XY apartheid" debate (here). Cheers, pedro gonnet - talk - 06.12.2007 07:05
## New sockpuppets of Bason0
Resolved: blocked and tagged
Hello administrators. A new suspected sockpuppet of Bason0 was confirmed in Wikipedia:Requests for checkuser/Case/Bason0#Bason0 (7th request). As a result, I request to block following user:
Onlyonlyrules has been blocked indefinitely, so above confirmed 2 accounts should be blocked as same. For Moneyisalldesune in the report, I file a WP:SSP with rational reason later. --Nightshadow28 (talk) 14:30, 5 December 2007 (UTC)
You really don't need to report here as a RFCU clerk like me will take care of it if the CU didn't. I just blocked them.RlevseTalk 19:09, 5 December 2007 (UTC)
## University of Windsor, Canada - Persistent vandalism after 5 previous blocks
More vandalism. 5 previous blocks have failed to prevent vandalism/disruption. Please block indefinitely. -- John (Daytona2 · talk) 15:31, 5 December 2007 (UTC)
We don't block IP addresses indefinitely. This one seems to be associated with the University of Windsor. It was previously subject to a one month schoolblock. There have been two recent unhelpful edits, as well as a number of good edits. My impression is that the IP is shared by many students. This should not be blocked unless there is a stronger sequence of bad edits. - Jehochman Talk 15:39, 5 December 2007 (UTC)
(ec)To get some perspective on this, the 5 previous blocks were between April 2006 and April 2007 (a 1 month block). No blocks since. Sporadic vandalism does occur, but given that this appears to be a proxy with, potentially, hundreds or even thousands of users behind it the level of vandalism seems quite low. Between November 1st to date there were 10 edits, only three of which appear to be vandalism. I would not support an indef block or in fact any block at this time. Tonywalton Talk 15:42, 5 December 2007 (UTC)
Yeah, long-term blocks are only really a good idea if the significant majority of edits coming through are vandalism. If recent editing has been mostly good, a block is only going to keep out good editors. Short blocks (an hour or two) can be very effective if one bloke gets bored at a lab terminal. Appreciate your bringing it up here for review, though. – 21:31, 5 December 2007 (UTC)
## Massive Sock Problem
We have a major sock problem on our hands, Peter zhou/JackyAustine has a stock pile of prebaked sock accounts lying around. I had a private ckeckuser done yesterday and I thought I had them all til now when it turns out there are more...
These are the following that have been confirmed by the private checkuser last night (EST):
• Peter Zhou = Ie9ue7o8 = E oa4ai3 = Quagliu = Ui9eo u0 = Crau = Ua0oe6
• Faeprao = Bievuigrei = Sibuikroiwia = Tlutrao = Lin7ba = Traoprerao
• Wreegau = Eiionoeoie = Eauiaeoe = Sliucrei
• Dliekruazia = Ie4 = O5ia5iu3
• Seajion = A2ao1 = Ue7ui5u4a3 = Io4 = Ai3eo0ia7 = Ea ei2u9oi6 = Oi4au2ao5 = I8ai9ea0oe =
Ei0ai1 = Dlu4mua4klea5 = Dre6biu7 = Ciepiuhu = Wiabiejiutlion = Paobroe
The following the most recent diffs of PZ/JA socks on the article China and Names of China:
Now, compare them to Peter zhou's contribution:
We need to deal with this as its causing much disruption on these two articles. nat.utoronto 17:56, 5 December 2007 (UTC)
Peter zhou seems to have been blocked indefinitely. Are you requesting a community ban? - Jehochman Talk 17:58, 5 December 2007 (UTC)
That and consensus for a temp. range block. nat.utoronto 18:04, 5 December 2007 (UTC)
I'm confused by the format of your results. Are these all Peter Zhou, or is each line a different sockmaster? —Random832 18:51, 5 December 2007 (UTC)
They are all PZ socks...that's how the checkuser's email message was formated. nat.utoronto 18:54, 5 December 2007 (UTC)
Support ban from wiki. Ask the CU that did the check to do the block of the IP. Why wasn't this at RFCU?RlevseTalk 19:15, 5 December 2007 (UTC)
This was a followup to Wikipedia:Requests for checkuser/Case/Peter zhou which was only just completed. The guy returned almost immediately, created another bunch of socks and got back to doing the exact same thing again. All of the above accounts are confirmed as being PZ. Indeed, it looks from this morning's edits that there are yet more still. Note that the underlying IP addresses are now also blocked. - Alison 20:03, 5 December 2007 (UTC)
A ban seems appropriate now that we have confirmation from checkuser. If no admin objects by the time this thread closes, we should add User:Peter zhou to Wikipedia:List of banned users as banned by the community. - Jehochman Talk 20:17, 5 December 2007 (UTC)
Can someone collect up the latest socks and file a report over on RFCU? There are quite a number of new socks already making their presence felt and it would be best to avoid confusion and log the case. Problem is, there are so many of them and it's happening so often - Alison 20:32, 5 December 2007 (UTC)
It would be nice if a few more administrators could watchlist China and Names of China. The sockmaster only edits the Etymology section of China, and the Zhongguo section of Names of China. If the edit contains the words "Renowned sinologist Boyé Lafayeete De Mente" then that's the sockpuppet edit. Mr. DeMente has written some (I am sure quite useful) etiquette books but this sockmaster is determined to make him into an international scholar. See the edit, ban the sock. SchmuckyTheCat (talk)
## Harassment
Both User:Funeral and User:Scarian, but mostly Funeral have been harassing me. They keep putting a template on my page saying I am a sockpuppet of another user. They all keep arguing on my page and i'm sick of it! I just want them to all leave me alone. I don't know if they should be punished, or how that works, but I just want to be left alone from their dispute. They obviously have serious problems with each other. Funeral also broke 3RR on my talk page. Deathbringer from the Sky (talk) 19:56, 5 December 2007 (UTC)
Edit warring over user/talkpages isn't a very good idea. If someone feels you are a sockpuppet, a request for checkuser or sock investigation request would be more appropriate. In the interim, try reading WP:CIVIL and WP:NPA Deathbringer from the Sky, or I predict your time editing here will be short.--Isotope23 talk 20:03, 5 December 2007 (UTC)
Resolved
The anonomus use is tehre for no other purpose that to be a vandal no constructuve edts can reasonably be found. Most recent edits have been vandalism only. [21] [22] --Lucy-marie (talk) 19:56, 5 December 2007 (UTC)
Should be blocked for the death threat if nothing else. --Rodhullandemu (please reply here - contribs) 20:13, 5 December 2007 (UTC)
This is not a "death threat"; it is just a misplaced angry rant of a person who confuses wikipedia with message boards. `'Míkka>t 20:16, 5 December 2007 (UTC)
There is no indications that the handful of edits are coming from the same user. No reason to block itih IP yet. In such cases you have to post a warning it user's talk page (User talk:82.33.153.39); please take a look how it is done and please do it yourself next time. `'Míkka>t 20:16, 5 December 2007 (UTC)
Resolved: blocked account per WP:USER and as a WP:SPA used for harassment only - Alison 22:12, 5 December 2007 (UTC)
This person apparently doesn't like me and thinks, well, see the user name User:BetterThanIrishLass. Comments harassing me were removed earlier from an anonymous IP address that he claims to be his. Regardless, his username is harassment and his only post was on my talk page harassing me. Thank you for your speedy attention to this. IrishLass (talk) 21:58, 5 December 2007 (UTC)
Sorry, reported this in wrong place. I moved the report to the proper section (I think). IrishLass (talk) 22:05, 5 December 2007 (UTC)
Done - account blocked - Alison 22:12, 5 December 2007 (UTC)
## Possible stalker?
The IP made two edits to Wikipedia:WikiProject Articles for creation today, insinuating that he or she stalks an underage girl, presumably from the same general area. I haven't done a WHOIS to determine the IP's location, but I'd like to ask the community to keep an eye on this, as it may require contacting local authorities at some point. Edits in question are here and here (they're identical, it was reposted after I reverted the first time). Thanks very much. GlassCobra 23:48, 5 December 2007 (UTC)
Note: User:ArielGold has requested that these edits be Oversighted, which is a good call; however, I would still appreciate more eyes on this IP's activities. Thanks! GlassCobra 23:58, 5 December 2007 (UTC)
It looks like a fixed ComCast IP address, probably just a kid messing around, but it's best we keep an eye on it. Chase me ladies, I'm the Cavalry (talk) 00:09, 6 December 2007 (UTC)
I've blocked the IP for a serious privacy violation - although it looks like it's been oversighted so you'll have to take my word for it. Ryan Postlethwaite 00:12, 6 December 2007 (UTC)
I saw it. It was correctly oversighted. --Rodhullandemu (please reply here - contribs) 00:40, 6 December 2007 (UTC)
good non-inflammatory block summary. I saw it before the oversight, and nobody could possibly have disagreed with doing that. I agree with the Cavalry that it is probably a stupid joke only, and overreaction might not be appropriate. DGG (talk) 00:18, 6 December 2007 (UTC)
Thank you all for the opinions, and while the information perhaps did not fall under the strict letter of the oversighting policy, the content was inappropriate, even as a joke, which is why I requested it be removed. I would like to thank the oversighters for their swift action in removing both of the edits from view. I too, think it is most likely a kid messing around, but as there is no way to know, I think the short block is an appropriate action, given the situation. ArielGold 00:36, 6 December 2007 (UTC)
Just out of curiosity, did anyone ensure that there is some form of information that can be provided to police in case this *is* a serious stalker incident? I'm all in favour of oversighting, but there are a lot of stalkers who've been written off as "just kids" that turned out to be quite serious. Risker (talk) 00:47, 6 December 2007 (UTC)
If needed, the admin who oversighted it can view the info. And yes, there was enough information given that it would be possible to at least narrow down the town, if necessary. ArielGold 01:20, 6 December 2007 (UTC)
## User:Rock Soldier keeps adding personnel sections to album articles, and without the consultation of those who wrote them
Resolved: content dispute
This specific editor above keeps insisting that personnel sections are added to album articles, just because WP:ALBUM has a guideline. This guideline has not been voted on by Wikipedia at large, yet he insists to invoke it as though it is official policy. I am not aware of any WikiProject guidelines having any official authority.
He has added personnel sections to three FAs I have written, and one GA, and plenty of other album articles. He does not major contributors to the articles, no nothing. Despite the fact the three FAs passed FAC without personnel sections, he still insists on adding them to FAs I have written. I am frankly fed up of having to revert him, and want this to stop. He needs to stop adding personnel sections to articles others are major contributors to, and without consulting them. These articles he is adding personnel sections to he doesn't contribute to whatsoever. He is being a disruption. LuciferMorgan (talk) 00:18, 6 December 2007 (UTC)
Should we fully protect once it becomes an FA? I don't really have an opinion on the content dispute, but there definitely appears to be some ownership issues here. --OnoremDil 00:27, 6 December 2007 (UTC)
Ownership issues? There is absolutely no ownership issues whatsoever, for the record. If you would like to insinuate as such though, then feel free to say. I take such false accusations very seriously. Some of these articles passed FAC without personnel sections, so they need none now. Also, when this "editor" is adding these sections he isn't even consulting the contributors. LuciferMorgan (talk) 01:30, 6 December 2007 (UTC)
There is no requirement to consult people before making changes. Would it have been appropriate? Absolutely. Is this appropriate as your first attempt at discussion with him? How about your second attempt? Absolutely not. And yes, without question your comments here and some of your recent edit summaries lead me to believe you have ownership issues. --OnoremDil 01:39, 6 December 2007 (UTC)
I left him a note on his page. For future reference, dispute resolution is a more appropriate venue than WP:ANI for content disputes. Someguy1221 (talk) 00:53, 6 December 2007 (UTC)
No it isn't more appropriate, actually. I have no time for that, and no time to deal with disruptive editors like Rock Soldier. LuciferMorgan (talk) 01:30, 6 December 2007 (UTC)
It's not "disruptive" — he disagrees with you over a content issue, and it's only "disruptive" because it consumes your time disagreeing with him. He appears to be acting fully in good faith. Admins are not necessary here. --Haemo (talk) 01:37, 6 December 2007 (UTC)
He isn't acting in good faith. This is utter rubbish. LuciferMorgan (talk) 02:28, 6 December 2007 (UTC)
How are Rock Soldier's edits any more or any less disruptive than your edits, LuciferMorgan? Remember that there are always at least two people involved in an edit war. What SomeGuy1221 has told Rock Soldier applies to you as well: "Revert warring over edits that don't obviously violate official policy is never an acceptable behavior (even in such cases where you feel it is appropriate ..." This is a content dispute, and both sides should use talk pages and perhaps the help of a relevant WikiProject to come to a consensus, but there's nothing that requires admin intervention (yet?). AecisBrievenbus 01:39, 6 December 2007 (UTC)
Are you accusing my edits of being disruptive? That's an utterly disgusting accusation, and I don't welcome it in any way whatsoever. Also, your suggestion to use the help of a "relevant WikiProject" is rather silly. WikiProjects are mostly inactive, and it was me who wrote those three FAs, who researched them and so on, not some WikiProject. It's replies like yours that are disruptive. LuciferMorgan (talk) 02:31, 6 December 2007 (UTC)
LuciferMorgan, I understand it can be frustrating when someone makes changes you disagree with, but you are in fact bringing this problem to the wrong board. There is no policy requiring other editors to check with you before editing any article including articles you've worked on extensively. You have licensed your contributions under the GFDL, which gives other people the right to edit them. It's also the basic foundation of Wikipedia. Because Rock Soldier is not doing anything patently against the rules, there is no need for administrator action here. You need to pursue on the steps listed at dispute resolution. I'd suggest you try to talk to the other user about his changes first, if you haven't already. If you have and have gotten no where, mediation might be helpful.
Administrators do not have any special authority to arbitrate content disputes or determine whose changes are worthwhile and whose aren't. All we have are a handful of tools that are not available to other users, and the use of any of those tools in this situation would be inappropriate. Natalie (talk) 03:28, 6 December 2007 (UTC)
## From WP:AIV
The following was copied from WP:AIV. Chase me ladies, I'm the Cavalry (talk) 01:38, 6 December 2007 (UTC)
• IP address brief reason for listing (keep it short) Chase me ladies, I'm the Cavalry (talk) 01:38, 6 December 2007 (UTC) -->
• Nergaal -Repeated copyright violator. He keeps copying and pasting articles from http://www.ici.ro/romania/en/ into wikipedia texts. The texts on www.ici.ro are NOT tagged as free to reuse/copy. Sorry if I don't know the proper reporting format but i have to bring this up. Please don't be too harsh on him, he might mean well but the crux of the matter is that those texts are not published with an explicit free-to-copy/reuse license. For example:
a) http://en.wikipedia.org/w/index.php?title=Bran_Castle&diff=175932409&oldid=175913851
is the same text from http://www.ici.ro/romania/en/turism/c_bran.html complete with the original typos in that text
b) - page created by him: Trei_Ierarhi_Monastery
is the same text from http://www.ici.ro/romania/en/orase/manastiri/trei_i11.html
c) - another page created by him: Suceviţa_Monastery
is the same text from http://www.ici.ro/romania/en/turism/m_sucevita.html and so on... Galaad2 (talk) 01:05, 6 December 2007 (UTC)
• Gave him a warning about this. It really is better handled at WP:AN/I. Daniel Case (talk) 01:16, 6 December 2007 (UTC)
## Fmatmi's user page
I would like some additional input on a dispute over whether or not User:Fmatmi should be allowed to state "The internet is to the Mormon Church what the printing press was to the Catholic Church" on his user page. This statement seems to me to be a violation of WP:SOAP: It is a confrontational statement of opinion unrelated to internal Wikipedia operations. I removed the sentence twice and said that it could be reinstated if it were reworded to be less divisive and confrontational.
Fmatmi has reverted my changes twice, stating that "Indepent third parties do not think it is an attack, it is an attack in your opinion only because you are an apologist. Move to your talk page or requrest arbitration."
So, what do you think? Is it a violation of WP:SOAP or not? —Remember the dot (talk) 02:49, 6 December 2007 (UTC)
I couldn't say for sure (I personally think it's soapboxing), but given what is stated above, someone needs to give him a bit of clue on civility here. -Jéské (Blah v^_^v) 03:03, 6 December 2007 (UTC)
If it's just one line, then no. If it's part of a major section of his/her page then yes. Either way I would have thought it more appropos of the Scientologists rather than the Mormons! ;) --WebHamster 03:06, 6 December 2007 (UTC)
His entire user page consisted of "The internet is to the Mormon Church what the printing press was to the Catholic Church" followed by his signature. He later added slightly more content, but this statement was still the main thing that people would have read. —Remember the dot (talk) 03:18, 6 December 2007 (UTC)
Most modern scholars point to the printing press as the midwife of the Reformation and hence the downfall of the Catholic Church not its base of power. Just clarifying. Carlossuarez46 (talk) 05:22, 6 December 2007 (UTC)
I'm inclined to let this one go. I don't see any significant harm. - Philippe | Talk 05:30, 6 December 2007 (UTC)
## Scott Keeler
Resolved: User blocked by
|
|
Hilbert-Schmidt and compact operators
I am new to this site and i dont really know how to ask questions properly, so i am really sorry if i did something wrong.
My question is if there is a way to prove that a Hilbert-Schmidt operator is compact from the definition of compact operators.
I can prove the result by noticing that: a Hilbert-Schmidt operator is the limit of a sequence of finite-rank operators, the limit of a sequence of compact operators is compact, and a finite-rank operator is compact.
And i was wondering if there is a more direct approach to the problem. Something like taking a bounded sequence $\{x_n\}_{n\in N}$ in a Hilbert space $H$, and getting a convergent subsequence from $\{T(x_n\}_{n\in N}$, where T is a Hilbert-Schmidt operator.
|
|
# How does ML algorithms treat unseen data, a conceptual discussion
I want to predict the occurrence of certain events, but these events only occur say 5% of the time in my data, hence in 95% of the data there is nothing to learn.
In order to teach the ML algo something I have learned to single out the 5% and drop the rest of the data. Let us say that I want to predict if a picture is of a dog or a cat. In my data 2.5% of the pictures are of dogs and 2.5% of cats, the rest are just random pictures. So, I single out the cat and dog pictures and label them so that the ML algo can learn from that. Am I broadly right so far?
So, if I train my algo on only cat and dog pictures and get a satisfactory accuracy, what will then happen in live usage when 95% of the pictures are not of cats or dogs? I.e. I show my model a picture of a house, what does it predict? Will my algo always predict either cat or dog, or will it somehow tell me that it has no clue what this picture is?
Any thoughts?
Define two flag variables: flag_is_cat and flag_is_dog, which take on values of 1 if the picture shows a cat or dog, respectively, and 0 otherwise. Define another flag that takes on the value of 1 if the picture contains either a cat or a dog. In a word, label the data.
If you train the model using all of the pictures, even those with neither a cat nor a dog, then the model outputs a probability that the picture contains a cat, a probability that it contains a dog, and another that it contains either. This is the approach mentioned by @marco_gorelli . Dividing the probability that the picture has a cat by the probability that it has either a cat or a dog gives the probability that the picture has a cat conditional on the picture having at least one of them.
Alternatively, if you train a model using only those pictures that contain either a cat or a dog, then the model would output the probability that a cat is contained in the picture and that a dog is contained in the picture conditional on at least one of them being in the picture.
• tnx for your comments. From my (limited) experience using NNs, a model using all pictures would converge to always predict 0 (neither cat nor dog) because then the model would be right 95% of the time, even when balancing the class weights. I will try to experiment with what you say in your last paragraph. chrs. – cJc Aug 10 at 15:43
• @cJc Have you considered using all three types of pictures, but showing the cat and dog pictures more often than the no-animal pictures, so that the model ends up seeing each of the three kinds of pictures equally often? – Tanner Swett Aug 10 at 16:30
• @TannerSwett It makes logical sense what you are saying. I was hoping there was a universally accepted best practise to my question, but I guess this is more art than science. I will look into it, chrs. – cJc Aug 10 at 20:19
From what I can remember from Andrew Ng's Deep Learning course on Coursera, he recommends making vectors of the kind $(y, b_x, b_y, b_w, b_h, c_0, c_1),$ where:
• $y$ indicates whether the picture contains one of the objects you're looking at (so it'd be $1$ in $5\%$ of your examples and $0$ in the others);
• $b_x$, $b_y$ indicate the $x$ and $y$ coordinates of where the picture's midpoint is found;
• $b_w$, $b_h$ indicate the width and height of the bounding boxes of your image;
• $c_0$ is $1$ if the picture contains a dog and $0$ otherwise;
• $c_1$ is $1$ if the picture contains a cat and $0$ otherwise.
So, for example, a picture of a dog would get tagged as $(1, .3, .7, .2, .2, 1, 0)$, a picture of a cat of the same size in the same position would get tagged as $(1, .3, .7, .2, .2, 0, 1)$, and a picture with neither would have $0$ as its first coordinate and it wouldn't matter what the other coordinates were, as the initial $0$ has already signalled that the picture doesn't contain either of the objects we're seeking.
|
|
# Why aren't Faraday's law of induction and Maxwell-Ampere's law symmetric? [duplicate]
I don't see Faraday's law of induction and Maxwell-Ampere's law are totally symmetric in the sense that Maxwell-Ampere's law has a factor of $ϵ_0μ_0$: \begin{align} \nabla\times\mathbf E&=-\frac{\partial\mathbf B}{\partial t} \\ \nabla\times\mathbf B&=\color{blue}{\mu_0\varepsilon_0}\frac{\partial\mathbf E}{\partial t} \end{align} Can anyone explain me why.
## marked as duplicate by Rob Jeffries, Kyle Kanos, Jon Custer, John Rennie, MikeNov 30 '17 at 17:04
• – Rob Jeffries Nov 9 '17 at 11:22
Faraday's law and Ampere's law are manifestly asymmetric, as Faraday's law is a homogeneous equation and Ampere's law is not. Therefore, I assume you mean in the absence of a current density, in which case the equations become (in SI units) $$\nabla \times \vec E = -\frac{\partial \vec B}{\partial t}$$ and $$\nabla \times \vec B = \epsilon_0 \mu_0 \frac{\partial \vec E}{\partial t}$$
There is a necessary asymmetry present because $\vec E$ and $\vec B$ have different dimensions, as do $\nabla$ and $\frac{\partial}{\partial t}$.
In the gaussian unit system, the electric field and magnetic field have the same units, so the equations become $$\nabla \times \vec E = -\frac{1}{c} \frac{\partial \vec B}{\partial t}$$ $$\nabla \times \vec B = \frac{1}{c} \frac{\partial \vec E}{\partial t}$$
We can do even better and use natural units where $c=1$, in which case we have $$\nabla \times \vec E = -\frac{\partial \vec B}{\partial t}$$ $$\nabla \times \vec B = \frac{\partial \vec E}{\partial t}$$
The lesson here is that it's important to be able to see through the outward appearance of an equation to its inner structure. Often times, constants can be "absorbed" by redefining certain quantities and/or changing unit systems. This is one aspect of a larger concept called nondimensionalization, and it's an important skill to develop as time goes on.
• @YassineSifeddine If you're satisfied with this answer, then you can mark it "accepted." Otherwise, if you wish for more clarification, then let me know. – J. Murray Nov 9 '17 at 3:25
• To be honest i hope you to clarify me more please. – Yassine Sifeddine Nov 9 '17 at 3:28
• Can you explain what is causing you trouble? – J. Murray Nov 9 '17 at 3:40
• For exemple if we add magnetic monopoles gauss laws for electric field and magnetic field will be symmetric but that's not the case for amper and faraday laws. – Yassine Sifeddine Nov 9 '17 at 3:47
• They are not precisely the same because of the minus sign, but why should they be? – J. Murray Nov 9 '17 at 3:48
|
|
# List NTFS Permissions on all Folders
In this guide, I’ll show you how to list the NTFS permissions for all folders and subfolders.
I’ll also show you how to export the NTFS permissions to a CSV file.
Check it out.
## Option 1: List and Export NTFS Permissions using GUI Tool.
For this first option, I’ll be using a GUI NTFS Reporting Tool to get a list of NTFS Permissions. The NTFS Reporter is part of a toolkit that includes 13 tools for managing Active Directory.
### Step 1: Select Folder Path
Enter or browse to the folder where you are wanting to get NTFS permissions. You can select the folder depth which means how many subfolders deep you want to get permissions. You can also select to display the results in a tree view or grid view.
Next, click the run button at the top to get the NTFS folder permissions report.
Now I’ve got a list of NTFS permissions for the root folder, and two subfolders deep. The report includes the path, the account, directory owner, permissions, applies to, and inheritance.
Very fast and easy to use.
### Step 2: Export NTFS Permissions to CSV
To export the list, select the export button and select “Export All Rows”
Here is an example export.
As you can see the GUI NTFS permissions reporting tool makes it very easy to get folder permissions and export them to CSV.
Key Features
• Select folder depth
• Easily export the report to CSV
• Browse results in tree or column grid
• Filter and sort on any column
• Add/Remove and rearrange the columns
## Option 2: Get NTFS Folder Permissions using PowerShell
To get NTFS folder permissions with PowerShell, the get-acl cmdlet is used. The one drawback to this command is that it doesn’t get subfolder (recursive) permissions. To get around this, you can use the get-childitem command and pipe it to the get-acl command. See examples below.
### Example 1: Get NTFS Permissions on the Root Directory
For this example, I’ll get the NTFS permissions for my shared folder “\\srv-vm1\share”.
get-acl \\srv-vm1\share
The output doesn’t look so great, it’s missing a lot of valuable information. To see more details you can pipe the results to format-list.
get-acl \\srv-vm1\share | format-list
That looks much better. I can now see the full permissions on this folder.
### Example 2: Get NTFS Permissions on Subfolders with PowerShell
To get subfolder permissions, you will need to use the get-childitem command to first get all of the folders. Then it is sent to the get-acl command to get the permissions. Below is an example.
Get-ChildItem -Directory -Path "\\srv-vm1\share" -Recurse -Force | get-acl | format-list
You can see it is going through each subfolder in the root of my share folder and getting the permissions for each one.
You can now export this report to a CSV or text file. The below command will export to a text file.
Get-ChildItem -Directory -Path "\\srv-vm1\share" -Recurse -Force | get-acl | format-list | out-file c:\it\ntfs-report.txt
## Summary
In this guide, I showed you two options to list NTFS permissions and export the report to CSV.
The GUI NTFS tool is very easy to use and is a great alternative for those that don’t want to deal with PowerShell Scripts. With PowerShell, it’s a little harder to get NTFS permissions as by default it only shows root folder permissions.
Either option will work for creating NTFS permission reports.
## Resources
• Get-Acl – Microsoft command documentation. This command was used to get a security descriptor from resources (folders in this guide).
• Get-Childitem – Microsoft command documentation. This is the command used to get subfolders.
• NTFS Permissions Reporting Tool – This is the GUI tool used in option 1.
## Recommended Tool: Permissions Analyzer for Active Directory
This FREE tool lets you get instant visibility into user and group permissions and allows you to quickly check user or group permissions for files, network, and folder shares.
You can analyze user permissions based on an individual user or group membership.
### 4 thoughts on “List NTFS Permissions on all Folders”
1. Completely useful! I got one directory that is bringing me an exported CSV file with 165441 rows. Imagine doing one directory at the time.
Thank you Robert!
|
|
Help protect the Great Barrier Reef with TensorFlow on Kaggle
# tf.math.unsorted_segment_prod
Computes the product along segments of a tensor.
Read the section on segmentation for an explanation of segments.
This operator is similar to the unsorted segment sum operator found (here). Instead of computing the sum over segments, it computes the product of all entries belonging to a segment such that:
$$output_i = \prod_{j...} data[j...]$$ where the product is over tuples j... such that segment_ids[j...] == i.
#### For example:
c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 4, 6, 6, 4],
# [5, 6, 7, 8]]
If there is no entry for a given segment ID i, it outputs 1.
If the given segment ID i is negative, then the corresponding value is dropped, and will not be included in the result.
data A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16, complex128, half, uint32, uint64.
segment_ids A Tensor. Must be one of the following types: int32, int64. A tensor whose shape is a prefix of data.shape.
num_segments A Tensor. Must be one of the following types: int32, int64.
name A name for the operation (optional).
A Tensor. Has the same type as data.
[]
[]
|
|
lilypond-user
[Top][All Lists]
## Re: Organization of the piese part by part, not staff by staff
From: Mats Bengtsson Subject: Re: Organization of the piese part by part, not staff by staff Date: Fri, 11 Jan 2019 09:13:47 +0100 User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.2.1
On 2019-01-10 21:19, Павел Буданов wrote:
Hello everybody!
This case is usually used: we are instancing staffs, voices and putting notes into them. This is simply example:
\new GrandStaff <<
\new Staff \rightHand
\new Staff \leftHand
>>
But I want to organizate the piece in other case. For example, I want to engrave any rondo. In first, I will instancing staff structure and write initialing:
\new GrandStaff <<
\new Staff { \key c \minor ... }
\new Staff { \clef bass \key c \minor ... }
>>
Then I declare the variables with music:
refren = <<
\relative c'' { g8 c e es4. f8 d4. es8 c2 }
\relative { r8 r4 c8 es g c c, f g b c, }
>>
episodeA = <<
... >>
episodeB = <<
... >>
And I will add this music to score block:
\refren \bar "||" \episodeA \refren \episodeB
Is this case possible? If so, how to implement it correctly, where is the documentation written about it? Sorry for my bad english.
You probably already know this, but in addition to the answers you already have received, the simplest answer for the simplest special case, when you only have a single voice of music, is of course:
episodeA = {c d e f}
episodeB = {g f e d }
ending= {c1}
% Pasting it together:
{\episodeA \episodeB \ending}
However, as soon as you have several parallel voices/staves in each of the sections, then you either could paste each full voice together as described above, or use any of the techniques that Valentine mentioned in his reply, for example.
/Mats
--
=============================================
Mats Bengtsson, Prof.
Information Science and Engineering
School of Electrical Engin. and Comp. Science
Royal Institute of Technology (KTH)
SE-100 44 STOCKHOLM
Sweden
Phone: (+46) 8 790 8463
|
|
# NCERT solution for class 9 science natural resources ( Chapter 14)
#### Solution for Exercise Questions
1. Why is the atmosphere essential for life?
The atmosphere is essential for life because of the following reasons:
1. The atmosphere constitutes of various main gases like O2, N2, and CO2
2. Photosynthesis is due to earth’s atmosphere
3. The constant temperature of the earth is the cause of the earth’s atmosphere
4. Processes like respiration, burning, and combustion are due to the atmosphere
5. The atmosphere is the main reason to restrict UV rays into the earth.
2. Why is water essential for life?
Water is essential for life because of the following reasons:
1. All the biological activities are respiration, digestion and other biological reactions are supported by water
2. Living beings are composed of more than 70% of water
3. Transportation of substances from one form to another takes place due to the presence of water.
3. How are living organisms dependent on the soil? Are organisms that live in water totally independent of soil as a resource?
All living organisms on the earth directly or indirectly dependent on soil for a living. Plants obtain water and minerals through the soil and prepare their food. Other living organisms that live in water are entirely not totally independent of soil because the microbes growing on the soil in water are the primary producers. Primary producers are the main and chief element of the food chain. Various microbes found in soil help in the decomposition of dead plants and animals in water which helps in returning the nutrients and elements back to the water.
4. You have seen weather reports on television and in newspapers. How do you think we are able to predict the weather?
Weather is studied as the collection of various elements like high and low temperatures, humidity, rainfall, wind speed and more using various figures and facts with relevant instruments. Hence on the data collected by the meteorologists, we are able to forecast the weather.
5. We know that many human activities lead to increasing levels of pollution of the air, water-bodies, and soil. Do you think that isolating these activities to specific and limited areas would help in reducing pollution?
Human activities are the main reason for the pollution in the air. Air is the medium to spread the pollutants into various sources like water and soil. Hence we can say that limiting activities certain place will definitely help the air pollution to be controlled to some extent.
6. Write a note on how forests influence the quality of our air, soil and water resources.
Forests influence the quality of our air, soil and water resources by:
1. Forests help in purifying and increasing the content of oxygen in the air by absorbing carbon dioxide and releasing oxygen.
2. Forest helps in restoring water resources.
3. Forest helps in the prevention of soil erosion.
#### Solution for Intext Question
Q. How is our atmosphere different from the atmospheres on Venus and Mars?
Earth's atmosphere is different from Venus and Mars in the composition of air. Earth’s atmosphere is composed of various gases like oxygen, carbon dioxide, nitrogen, water vapour along with various gases in small quantities. Planets like Venus and Mars have more than 95% of carbon dioxide in the air making the existence of life impossible.
Q. How does the atmosphere act as a blanket?
The atmosphere act as a blanket as:
(i) It regulates the temperature throughout the day making it a comfortable place to stay.
(ii) The ozone in the atmosphere is responsible for not allowing harmful ultra-violet ray to pass and reach us.
Q. What causes winds?
All the movements of air resulting in diverse atmospheric phenomena are caused by the uneven heating of the atmosphere in different regions of the Earth. On being heated more the air raises up and hence low pressure is created. Hence the air in high pressure occupy the low-pressure region causing the wind. But various other factors also influence these winds, such as the rotation of the Earth and the presence of mountain ranges in the paths of the wind are a couple of these factors.
Q. How are clouds formed?
During the day time in the presence of sunlight water from sources like well, lake, pond, sea, river and various other sources get evaporated and this water vapour rises up with the hot air. At a particular height, the air cools and the water vapour condenses to form minute droplets to form clouds.
Q. List any three human activities that you think would lead to air pollution.
Three human activities that would lead to air pollution are:
1. Burning of fuels like petroleum, kerosene, and coal in the atmosphere
2. The smoke released from manufacturing industries
3. Smoke from vehicles
Q. Why do organisms need water?
Organisms need water for various purposes like:
1. To carry out cellular process needs water
2. Photosynthesis in plants
3. Transportation of substances in the body takes place through water.
Q. What is the major source of fresh water in the city/town/village where you live?
The major source of fresh water in the city/town/village are:
1. Rainfall
2. Underground water from wells
3. Water sources like pond, river, and lake
Q. Do you know of any activity which may be polluting this water source?
Activities which pollutes water sources are:
1. Dumping waste products in the river, sea or any other water bodies.
2. Factory waste
3. Sewage waste.
Q. How is soil formed?
The outermost layer of our Earth is called the crust and the minerals found in this layer supply a variety of nutrients to life-forms. But these minerals will not be available to the organisms if the minerals are bound up in huge rocks. Over long periods of time, thousands and millions of years, the rocks at or near the surface of the Earth are broken down by various physical, chemical and some biological processes. The end product of this breaking down is the fine particles of soil.
The factors or processes that make soil are:
1. The Sun
2. Water
3. Wind
4. Living organism
Q. What is soil erosion?
Removal of the most fertile top layer of soil through water, wind is called soil erosion.
Q. What are the methods of preventing or reducing soil erosion?
The methods of preventing or reducing soil erosion are:
1. By planting trees or afforestation.
2. By preventing overgrazing.
3. By preventing removal of top soil.
Q. What are the different states in which water is found during the water cycle?
Water is found in three states during the water cycle, which are:
1. A solid-state in the form of Snow, ice
2. Liquid state in the form river water, underground water
3. Gaseous state in the form water vapour
Q. Name two biologically important compounds that contain both oxygen and nitrogen
Two biologically important compounds that contain both oxygen and nitrogen are:
1. DNA (Deoxyribonucleic Acid) and RNA (Ribonucleic Acid)
2. Amino acids
Q. List any three human activities which would lead to an increase in the carbon dioxide content of the air.
Three human activities which would lead to an increase in the carbon dioxide content of the air are:
1. Breathing process
2. Burning of petrol, coal, and fuel
3. Using fridge, air conditioners, and oven
Q. What is the greenhouse effect?
Gases like Carbon dioxide and methane trap sun’s radiation and does not allow it to go back and hence causing warming of the atmosphere, resulting in the greenhouse effect.
Q. What are the two forms of oxygen found in the atmosphere?
Two forms of oxygen found in the atmosphere are:
1. Oxygen- $$O_2$$
2. Ozone- $$O_3$$
|
|
# Skipping Bases
$\large 123_4 \qquad 123_5 \qquad 123_6$
The above shows three numbers, each written in a different base representation. Which of these numbers has the largest value?
×
|
|
+1.617.933.5480
+1.866.649.0192
# Q: Assume X is normally distributed
Assume X is normally distributed with a mean of 5 and a standard deviation of 4. Determine the value for x that solves each of the following:
(a) P(X > x) = 0.5
(b) P(X > x) = 0.95
(c) P(x < x="">< 9)="0.2">
(d) P(3 < x="">< x)="">
(e) P(-x < x="">< x)="">
0
0
KIRAN K
0
0
0
0
KIRAN K
0
0
Given that Mean m =10 SD s = 2 Standard Normal Variable Z is given by Z =( X - m ) /
0
0
KIRAN K
0
0
Related Questions in Theory of probability
Question Status: Solved
|
|
Can you remove a factor from your model if it has a significant effect, but the removal improves AIC and R square?
I have a complex problem but the title sums it up pretty easily.
I have four types of cages that manipulate water flow, but I also have an actual measure of water flow from inside the cages. I'm wondering if I can just use one or the other, if I should include both, or if I should nest them.
The best fit seems to be with just the actual measure of flow, but if I use them both, the cage type has a significant effect.
Any tips?
• It should not be possible to increase $R^2$ by removing a variable. It is possible to improve (i.e. reduce) AIC, though. – Richard Hardy Apr 13 '18 at 17:23
• Sorry, you're right, but the difference between the Rsquare and the adjusted Rsquare does decrease with the removal of the factor, which I thought was a good sign...right? – Nathan Haag Apr 13 '18 at 18:36
• No, I do not think this would indicate anything interesting. Simply the adjustment factor changes as there are fewer variables. – Richard Hardy Apr 13 '18 at 18:38
• The answer has to depend on your analytical objectives. If you're trying to determine whether cage type is associated with flow, then you don't want to drop cage type. If you're only trying to predict flow, or if you will later encounter many more cage types than appear in your data, then you might not use cage type--but you wouldn't use statistical significance as a criterion to determine that. You would use some appropriate measure of predictive accuracy. – whuber Apr 13 '18 at 19:13
• Some prominent researchers like Rob J. Hyndman (here) argue that statistical significance should not be used as a criterion for variable selection, while AIC may be, especially if the goal is forecasting. – Richard Hardy Apr 13 '18 at 19:19
As @whuber suggested, you may be primarily interested in (1) the effect of cage type on your dependent variable (DV), (2) the effect of water flow on your DV or (3) predicting your DV.
For (1), you can report this effect with or without adjusting for water flow.
For (2), you can report this effect with or without adjusting for cage type.
The reported effects will have different meaning - for instance, assuming cage type and water flow have fixed effects, the effect of water flow on the DV will apply to all cages of the same type (with adjustment for cage type) or to all cages irrespective of their type (without adjustment).
So, for (1) or (2), you really need to figure out what you are interested in reporting at the end of the day and go from there.
For (3), @whuber already gave you valuable hints.
• The basic question of this chapter of my PhD is comparing how 2 mussel species react under different water flow regimes, so I guess I'm leaning toward (1) or (2). I personally don't care about the cage types because they're not an industry standard, I just made them out of ABS piping (although they're easy to make, so if anyone wants to repeat it, there could be some novelty in knowing the effect they have). My advisor suggested just using the actual measure of water flow, but I wanted to be sure I wasn't eliminating important information. – Nathan Haag Apr 14 '18 at 23:30
|
|
# Tag Info
19
These are all good questions. Perhaps I can answer a few of them at once. The equation describing the violation of current conservation is $$\partial^\mu j_\mu=f(g)\epsilon^{\mu\nu\rho\sigma}F_{\mu\nu}F_{\rho\sigma}$$ where $f(g)$ is some function of the coupling constant $g$. It is not possible to write any other candidate answer by dimensional analysis ...
14
The easiest way to see imaginary time used is in elementary quantum mechanics in one dimension. (This is the explanation cribbed from wikipedia). Suppose we're looking at a tunneling-through-a-barrier problem. We start with the Schrodinger equation: $$-\frac{\hbar^2}{2m}\frac{d^2\psi(x)}{dx^2}+V(x)\psi(x) = E\psi(x)$$ Make the ansatz $$\psi(x) ... 9 I will add to twistor59 answer. Hawking liked the concept of imaginary time \tau=\mathrm{i}t because it transforms a Lorentzian metric$$ds^2 = -c^2 dt^2 + dx^2 + dy^2 + dz^2$$into a four dimensional like Euclidean metric$$ds^2 = +c^2 d\tau^2 + dx^2 + dy^2 + dz^2$$Hawking and others believed that a quantum gravity theory could be developed in this ... 9 Let us look at the instantons of an ordinary pure Yang-Mills theory for gauge group G in four Euclidean dimensions: An instanton is a local minimum of the action$$ S_{YM}[A] = \int \mathrm{tr}(F \wedge \star F)$$which is, on \mathbb{R}^4, precisely given by the (anti-)self-dual solutions F = \pm \star F. For (anti-)self-dual solutions, ... 5 It's Stokes's theorem. Consider a field F = dA + A \wedge A such that A is pure gauge at infinity, that is, \lim_{x\to\infty} A(x) = \omega\, d \omega^{-1} for some \omega : S^3 \to SU(2) \sim S^3 where \omega is a function on the 3-sphere because the limit can depend on the direction out to infinity. In differential forms the first expression is ... 5 The reason why classical solutions add a "lot" to the path integral is that their action (phase) is stationary i.e. almost the same phase as the action (phase) in their reasonably large vicinity of the configuration space; one gets positive interference as a consequence. More generic paths cancel with the adjacent ones whose phases are different and random. ... 4 I) This is discussed around eq. (23.7.1) on p. 462 in Ref. 1. The task is to perform the path integral$$\tag{1} \int_{BC} [d\phi]e^{\frac{i}{\hbar}S[\phi]} ~=~\sum_{\nu}\int\! du \int_{BC_0} [d\phi_q]e^{\frac{i}{\hbar}S[\phi_{cl}+\phi_{\nu,u}+\phi_q]} $$over fields \phi with some (possible inhomogeneous) boundary conditions BC. This is done by ... 4 This is apparently too long for a comment, so it's going to be fleshed out into at least (hopefully) a partial answer. A big problem I see with this is determining which theories can live in the same universe. In this way, I think Liang Kong's Mathematical Theory of Anyon Condensation provides a way forward. To reconstruct your set up: Consider three ... 4 Fermionic zero modes on an instanton background are in one-to-one correspondence with solutions to the Dirac equation$$(i D_\mu \gamma^\mu - m)\Psi = 0$$where the partial derivative D_\mu contains the gauge field term with the gauge field defining the instanton solution substituted into it. We want to study these solutions in the Euclidean spacetime. ... 4 For the pure e.g. d=4 gauge theory instanton and gauge field perturbations around it, there is no negative mode – the counterpart of the bound state. It's the only one among the 3 classes that is absent here. One may see this absence by noticing that the gauge theory may be embedded into a supersymmetric theory with the same gauge-field degrees of ... 4 A soliton is a localized, non-dispersive solution of a nonlinear theory in Euclidean space. It certainly is a real object: you have a famous story about a certain John Russell who observed soliton-like waves made by a boat on a river (wikipedia knows everything about it!) The so-called morning glory clouds in Australia ... 3 There is no better definition than what Wikipedia offers - in general, a topological excitation is a (field) state, i.e. a localized quantity since fields depend on spacetime, whose integral is a topological invariant. One prime example are Yang-Mills theories in 4D, where the integral \int \mathrm{Tr}(F\wedge F), as essentially the second Chern class of ... 3 The sphaleron is kind of the opposite of the instanton, and kind of the same. Let's make that statement precise: An instanton is a local minimum of the action that mediates vacuum tunneling (link to an answer of mine how and why instantons do that). The sphaleron sits in-between the vacua, in a certain sense, it is the instanton "in the middle of ... 3 Instanton calculations involve integrals over collective coordinates. One of these is the instanton size \rho. Reliable instanton calculations are those for which the integral over instanton sizes is dominated by small sizes so that (for asymptotically free theories) the coupling constant is small and higher order corrections in the semi-classical ... 3 Yes, you can recover the Seiberg-Witten prepotential from the Darboux coordinates X_\gamma (and indeed also from their "semiflat" versions X_\gamma^{sf}). The reason is the asymptotic property X_\gamma \sim exp(\pi R Z_\gamma / \zeta) as \zeta \rightarrow 0 (up to a \zeta-independent constant). Thus knowing X_\gamma is sufficient to recover ... 2 TO have an instanton solution, you need to map the (euclideanized) "spacetime at infinity" to the group manifold. In the case of SU(2), both the spacetime at infinity and the group manifold are S^3 and instantons are characterized by the integers. I hope you understand that much, at least for SU(2). If you're interested in 4d instantons, they are ... 2 I think the path-integral is a complete red herring here! I'll try to convince you that Wick rotation yields completely equivalent way of writing the Lagrangian in classical field theory. Consider a classical action$$S[x] = \int L[x(t)] dt where $x:\mathbb{R} \to \mathcal{M}$ for some target manifold $\mathcal{M}$. The Lagrangian is schematically given ...
2
Where a string carves out a $2$-dimensional world-sheet and a point particle carves out a $1$-dimensional world-line of spacetime, the instanton carves out a $0$-dimensional world-point. Counting only spatial dimensions, a string is $1$-dimensional and a point particle is $0$-dimensional. By logical extension, an instanton has dimension $-1$, if we only ...
2
Okay, I cannot give you a full understanding of what is going on, but I can make the objects we are dealing with more precise: There are two spaces here: The moduli space $M_\text{sh}(r,k)$ of framed torsion-free coherent sheaves of rank $r$ and second Chern class $k$ on the projective scheme $\mathbb{P}^2$ viewed as a complex analytic space with its ...
2
1) If $\phi$ is non-singular, you can safely multiply both sides by $\phi$ and get $\square \phi = \phi*0 = 0$. If $\phi$ is singular you can't do this because $\phi * 0$ is undefined. Equivalently, where $1/\phi=0$ you can have $\square\phi$ nonzero, as shown in (4.64). The singularities in $\phi$ will map to zeroes in $1/\phi$. 2) I believe only the ...
2
I'm not sure why you are asking, because you seem to mention the answer already. This problem has been studied thoroughly in the late 70's by Belavin et al and 't Hooft. As far as I understand, the quantum vacuum is the lowest energy eigenstate of a Hamiltonian. It turns out that the classical solutions to the equations of motion (of a particle, or a field) ...
1
1) NO 2) The usual way of doing it is to first solve the instanton solution in euclidean time, which is equivalent to obtaining soliton solution of a given potential. Since you have read the book, I am not going to explain how it is done for this case. Then, plugin your instanton solution to the euclidean action and evaluate it. Since ...
1
Let me first refer you to three references pedagogically treating Instantons in quantum mechanics: 1)Riccardo Rattazzi's lecture notes treating instantons in nonsupersymmetric quantum mechanics. In these notes the anharmonic oscillator model is elaborated with great detail 2) Philip Argyres lecture notes treating instantons in supersymmetric quantum ...
1
There seems to be a slight confusion about the meaning of solution: The principle of least action leads to the equation of motion (Euler-Lagrange equation), which correspond to a minimum of the action functional. These equations can have multiple solutions, so there is no contradiction in the formalism. There can multiple solutions that minimize the energy, ...
1
A vacuum is a field configuration that is pure gauge, i.e. $A = g^{-1}\mathrm{d}g$ for a gauge transformation $g$, and hence $F = \mathrm{d}_A A = 0$ (for $\mathrm{d}_A$ the gauge covariant derivative). An instanton is a local minimum of the action, which is given by an (anti-)self-dual configuration $F = \pm \star F$. It is not a vacuum for non-zero ...
1
I know this is a year old question, but I am going to attempt an answer. As far as I can tell, this is not really a caveat. The reason for this is that I can always set the overall phase of the quark mass determinant to be zero with a chiral U(1) transformation. For a discussion of this see for example the chapter on theta vacua in Weinberg's QFT book. The ...
1
Maybe this would be better as a comment, since it is not a full answer, but I don't have enough reputation for that. The most important ambiguity is that there is an infinite number of functions that have the same asymptotic expansion. As an example, if $f(g)$ has some asymptotic expansion in $g$ as $g \to 0$ than $f(g) + e^{-1/g^2}$ has exactly the same ...
1
Instantons are characterized by the winding number and a set of collective parameters (e.g. location of the centers of the instantons, their sizes and the inequivalent orientations in the global group space / space-time). Quantum fluctuations of a unit winding number instanton can either leave the collective parameters unchanged (non-zero modes), or change ...
Only top voted, non community-wiki answers of a minimum length are eligible
|
|
GR 8677927796770177 | # Login | Register
GR9677 #64
Problem
GREPhysics.NET Official Solution Alternate Solutions
This problem is still being typed.
Advanced Topics$\Rightarrow$}Nuclear Physics
In symmetric fission, the change in kinetic energy is just the change in binding energy. The change in binding energy for a $N-nucleon$ heavy nucleus is the difference in energy between the initial un-fissioned heavy nucleus and the final 2 medium-sized nuclei,
$\Delta E = 2\times 0.5 N \times 8MeV/nucleon - N \times 7MeV/nucleon = N \times 1MeV/nucleon$.
For a heavy nucleus, one has $N\approx 200$, and thus one arrives at choice (C).
(This is due to David Schaich.)
Alternate Solutions
There are no Alternate Solutions for this problem. Be the first to post one!
ddk1315
2014-02-09 12:23:10
asdfuogh: 934 is the rest mass of a proton, it has nothing to do with binding energy here.
The way to answer this question is by first calculating the binding energy per nucleon in a fission reaction, which as show above is 1MeV, and then a quick look at the options available should make it obvious that the only sensible asnwer is C, as it corresponds to 200 nucleons, (D,E are certainly to small for fusion, and I have never heard of an atom that has 938 nucleons, let alone 1876!, so its not A or B either)
2013-03-30 10:51:37
I don't understand this problem. does the initial binding energy get converted to the binding energy of the two medium sized nuclei plus the kinetic energy of those nuclei? If thats the case the kinetic energy is -200 mev which makes no sense.
asdfuogh
2011-10-09 21:56:36
I'd pick something less than 934 MeV because that's the energy you get if you had a 100% conversion of a proton. Then, I'd pick higher than 8 MeV because it's the binding energy per a nucleon.. but that reason is, well, I don't think it's very rational. But at least we get rid of two other answers!
Setareh
2011-10-07 14:07:00
I didn't understand where the "0.5" came from in Yosun's equation: 2*0.5*N*8MeV/nucleon! Can anyone help me?
mets5012011-10-09 16:31:02 There are two products of the decay, with each having half the number of nucleons of the original one, hence $2\times 0.5N$ (that's what fission does).
ticklecricket
2010-11-11 20:46:43
Who gets to decide what is heavy and what is medium-weight?
testtest
2010-09-20 11:32:42
Why is the final state particles have *higher* total binding energy after fission than the original neucleus???
pkamal2010-10-02 20:46:18 The final binding energy is indeed smaller. Its the final binding energy/nucleon that's higher.
petr1243
2008-02-16 21:28:31
Q=(# of nucleons)(|change in energy per nucleon|)rnrnUsing the mass number of a heavy nucleon(A=200)rnrnQ = (200)(8-7)Mev = 200Mev
grace2010-11-07 18:29:56 I agree with you
relain
2007-10-31 10:19:08
also it might help to remember that iron is the most stable nucleus with A ~ 50 and then uranium is about 250 (232, 252 whatever) so then if this is a large unstable nucleus, and each nucleon gves 1mev you're gonna get about 200...
Void
2005-11-10 06:49:25
I've seen this number (200 MeV/fission) appear in a lot of elementary physics books and even a lot of lectures. If you forget everything else, just remember 200 MeV per fission...
LaTeX syntax supported through dollar sign wrappers $, ex.,$\alpha^2_0$produces $\alpha^2_0$. type this... to get...$\int_0^\infty$$\int_0^\infty$$\partial$$\partial$$\Rightarrow$$\Rightarrow$$\ddot{x},\dot{x}$$\ddot{x},\dot{x}$$\sqrt{z}$$\sqrt{z}$$\langle my \rangle$$\langle my \rangle$$\left( abacadabra \right)_{me}$$\left( abacadabra \right)_{me}$$\vec{E}$$\vec{E}$$\frac{a}{b}\$ $\frac{a}{b}$
|
|
# Predicted probabilities from probit
Assume following probit model:
$y_i$ = $\phi$($\beta_0$+$\beta_1x_1$+$\beta_2x_1^2$+$\beta_3d_1$+$\beta_4d_2$) where $d_1$ and $d_2$ are dummies
or in Stata:
probit y_i x1 xsq d1 d2
Now I want to predict the probabilities $P(\hat{y_i} = 1)$ for each observation x. This seems very simple but I keep failing to program it in Stata.
I tried:
predict pr, xb
But this gives me values greater than 1.
Any ideas?
• You are retrieving the linear index. Try predict predicted_probabilities, pr. Mar 17 '15 at 11:14
• Previous question title "Plot predicted probabilities of probit against observations" had nothing to do with the question. Mar 17 '15 at 11:21
This is on the face of it a Stata question, but there is a statistical confusion at its core. Here is wrong and right syntax for what you want exemplified.
. sysuse auto, clear
(1978 Automobile Data)
. probit foreign mpg weight
Iteration 0: log likelihood = -45.03321
Iteration 1: log likelihood = -29.244141
Iteration 2: log likelihood = -27.041557
Iteration 3: log likelihood = -26.84658
Iteration 4: log likelihood = -26.844189
Iteration 5: log likelihood = -26.844189
Probit regression Number of obs = 74
LR chi2(2) = 36.38
Prob > chi2 = 0.0000
Log likelihood = -26.844189 Pseudo R2 = 0.4039
------------------------------------------------------------------------------
foreign | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
mpg | -.1039503 .0515689 -2.02 0.044 -.2050235 -.0028772
weight | -.0023355 .0005661 -4.13 0.000 -.003445 -.0012261
_cons | 8.275464 2.554142 3.24 0.001 3.269438 13.28149
------------------------------------------------------------------------------
. predict pr
(option pr assumed; Pr(foreign))
. predict xb, xb
. su pr xb
Variable | Obs Mean Std. Dev. Min Max
-------------+--------------------------------------------------------
pr | 74 .294487 .3074146 9.52e-06 .9029781
xb | 74 -.9904795 1.376307 -4.275976 1.298709
As the help explains (just read help probit postestimation), the default for predict after probit is to give predicted probabilities, and that is what you want. By insisting on xb, you got the linear predictor. You can get what you want by pushing your predictions through the cumulative standard normal (in Stata normal()) but just using the default gets you there directly. In essence you want a back-transformation to the probability scale, but that is so common a need that Stata (and presumably all good statistical software) provides it directly.
|
|
# Calculating Missing Amounts Required: For each of the following independent cases (A–E), compute...
Calculating Missing Amounts
Required:
For each of the following independent cases (A–E), compute the missing values in the table below.
|
|
Python: Deep and Shallow Copy Object
# Python: Deep and Shallow Copy Object
### Introduction
In this article, we'll take a look at how to deep and shallow copy the objects in Python.
The short answer is that you can use methods of the copy module, for both operations:
import copy
shallow_copy_list = copy.copy(original_list)
deepcopy_list = copy.deepcopy(original_list)
Though, what does it mean to copy something in a shallow or deep fashion?
In the proceeding sections, we'll dive into what these terms mean, how Python treats object references and objects in memory, and why these two methods work the way they do.
### Shallow Copy an Object in Python
When we use assignment statements (=) in Python to create copies of compound objects, such as lists or class instances or basically any objects that contain some other objects, Python does not clone the object itself. Instead, it simply binds the reference to the targeted object.
Imagine that we have a list with the following elements in it:
original_list =[[1,2,3], [4,5,6], ["X", "Y", "Z"]]
If we try to copy our original list using the assignment statement as follows:
shallow_copy_list = original_list
print(shallow_copy_list)
It may look like that we cloned our object and now have two of them:
[[1,2,3], [4,5,6], ['X', 'Y', 'Z']]
But, do we really have two objects? No, we don't. We have two reference variables pointing to the same object in memory. This can easily be verified by printing the ID of the object in memory for both of these:
id(original_list) # 4517445712
id(shallow_copy_list) # 4517445712
A more tangible proof of this can be observed by attempting to change a value in either of "the two lists" - while in reality, we change the same list, and both pointers point to the same object in memory.
Let's access the last element of the object pointed to by original_list:
# Last element of last element
original_list[-1][-1] = "ZZZ"
print(original_list)
This results in:
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'ZZZ']]
Knowing that both reference variables point to the same object, printing shallow_copy_list will return the same result:
print(shallow_copy_list)
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'ZZZ']]
Shallow Copying is the process of copying a reference to an object and storing it in a new variable. The original_list and shallow_copy_list are merely references that point to the same addresses in memory (RAM), that store the values of [[1, 2, 3], [4, 5, 6], ['X', 'Y', 'ZZZ']].
It is also possible to create a shallow copy of an object using a slice of the entire list and assignment statement:
slice_shallow_copy_list = original_list[:]
Another way to shallow copy is to use the copy module of Python’s standard library.
To use the copy module, we must first import it:
import copy
Now we can use the copy() method of the copy module:
## Free eBook: Git Essentials
Check out our hands-on, practical guide to learning Git, with best-practices, industry-accepted standards, and included cheat sheet. Stop Googling Git commands and actually learn it!
second_shallow_copy_list = copy.copy(original_list)
Print them both to see that if they reference the same values:
print(original_list)
print(second_shallow_copy_list)
As expected, they do:
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'ZZZ']]
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'ZZZ']]
Usually, you would want to copy a compound object, for example at the beginning of a method, then modify the clone, but keep the original object as it was in order to use it again later sometime.
To achieve this, we need to deep copy the object. Now let's learn what is a deep copy and how to deep copy a compound object.
### Deep Copy an Object in Python
Deep copying an object means really cloning the object and its values into a new copy (instance) in memory, with those same values.
Rather than creating a new reference to the same values, with the deep copy, we can actually create a new object that is independent of the original data but contains the same values.
In a typical deep copy process, first, a new object reference is created, then all the child objects are added to the parent object recursively.
This way, unlike a shallow copy, making any modification to the original object, does not reflect in the copy object (or vice versa).
Here is a simple illustration of a typical deep copy:
To deep copy an object in Python, we use the deepcopy() method of the copy module.
Let's import the copy module and create a deep copy of a list:
import copy
original_list = [[1,2,3], [4,5,6], ["X", "Y", "Z"]]
deepcopy_list = copy.deepcopy(original_list)
Now let's print our lists to make sure the outputs are the same, as well as their IDs as proof of their uniqueness:
print(id(original_list), original_list)
print(id(deepcopy_list), deepcopy_list)
The output confirms that we've created ourselves a genuine copy:
4517599280, [[1, 2, 3], [4, 5, 6], ['X', 'Y', 'Z']]
4517599424, [[1, 2, 3], [4, 5, 6], ['X', 'Y', 'Z']]
Now let's try to modify our original list by changing the last element of the last list to the "O", and then print it to see the result:
original_list[-1][-1] = "O"
print(original_list)
We get the results as expected:
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'O']]
Now if we go ahead and try to print our copy list:
print(deepcopy_list)
The previous modification did not reflect on this list:
[[1, 2, 3], [4, 5, 6], ['X', 'Y', 'Z']]
Remember that the copy() and deepcopy() methods are applicable on other compound objects. This means that you can use them to create copies of class instances as well.
### Conclusion
In this article, we learned what it means to shallow copy and to deep copy an object.
We also learned that we can use the copy() method of the copy module to create a shallow copy, and the deepcopy() method to create a deep copy of the compound objects.
Last Updated: September 27th, 2021
Get tutorials, guides, and dev jobs in your inbox.
Ruslan HasanovAuthor
Full-stack software developer.
Python, C#, Linux.
Project
### Real-Time Road Sign Detection with YOLOv5
# python# machine learning# computer vision# pytorch
If you drive - there's a chance you enjoy cruising down the road. A responsible driver pays attention to the road signs, and adjusts their...
David Landup
Details
Project
### Data Visualization in Python: The Collatz Conjecture
# python# matplotlib# data visualization
The Collatz Conjecture is a notorious conjecture in mathematics. A conjecture is a conclusion based on existing evidence - however, a conjecture cannot be proven....
Details
|
|
import itertools
import numpy as np
from qubo_nn.problems.subgraph_isomorphism import SubGraphIsomorphism
from qubo_nn.problems.util import gen_graph
class GraphIsomorphism(SubGraphIsomorphism):
def __init__(self, cfg, graph1, graph2):
super(GraphIsomorphism, self).__init__(cfg, graph1, graph2, a=1, b=2)
@classmethod
def gen_problems(self, cfg, n_problems, size, seed=None, **kwargs):
graphs1 = gen_graph(n_problems, size, seed)
graphs2 = gen_graph(n_problems, size, seed)
return [
{"graph1": graph1, "graph2": graph2}
for graph1, graph2 in zip(graphs1, graphs2)
]
|
|
#**************************
# Logistic Regression
# for DNA N6-Adenine Methylation
# Tian Tian
# tt72@njit.edu
#**************************
import sys
import numpy as np
import itertools
import multiprocessing
#**************************
# import modules
#**************************
from joblib import Parallel, delayed
from collections import OrderedDict
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import *
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from numpy import genfromtxt
#**************************
# load data
#**************************
name = sys.argv[1]
index = sys.argv[2]
print 'loading kmer data'
train = h5py.File('kmer/LR_m6A_'+name+'.train_'+index+'.hdf5', 'r')
test = h5py.File('kmer/LR_m6A_'+name+'.test_'+index+'.hdf5', 'r')
valid = h5py.File('kmer/LR_m6A_'+name+'.valid_'+index+'.hdf5', 'r')
Y_train = np.array(train['Y'])
Y_test = np.array(test['Y'])
Y_valid = np.array(valid['Y'])
X_train = np.transpose(np.array(train['X']))
X_valid = np.transpose(np.array(valid['X']))
Y_train_array = np.concatenate((Y_train,Y_valid), axis=0)
Y_train_array = (Y_train_array.ravel()).tolist()
X_train_array = np.concatenate((X_train,X_valid), axis=0)
X_test_array = np.transpose(np.array(test['X']))
Y_test_array = (Y_test.ravel()).tolist()
#**************************
# Data standardization
# Same scaling was applied for both test and train data
#**************************
scaler = StandardScaler()
scaler.fit(X_train_array)
X_train_array = scaler.transform(X_train_array)
X_test_array = scaler.transform(X_test_array)
#**************************
# Logistic Regression
#**************************
print 'training LR model'
clf = LogisticRegressionCV(Cs=10.0**-np.arange(-4,4), cv=5, penalty="l2", solver="sag", n_jobs=-1, max_iter=2000, verbose=1)
LR_fit = clf.fit(X_train_array, Y_train_array)
predict_proba = LR_fit.predict_proba(X_test_array)
predict_class = LR_fit.predict(X_test_array)
df = DataFrame({'groundtruth':Y_test_array, 'predict_proba':predict_proba[:,1].flatten().tolist()}, index = range(len(predict_proba[:,1].flatten().tolist())))
df.to_csv('rslt_len'+str(length)+'_index'+str(indx)+'.csv', index = False)
false_positive_rate, true_positive_rate, thresholds = roc_curve(Y_test_array, predict_proba[:,1])
auc_test = auc(false_positive_rate, true_positive_rate)
mcc_test = matthews_corrcoef(Y_test_array, predict_class)
prf = precision_recall_fscore_support(Y_test_array, predict_class)
#**************************
# Output results
#**************************
print 'auc:', auc_test
print 'mcc:', mcc_test
print 'precision, recall, f1score:', prf
joblib.dump(clf, 'LR_len_'+name+'_'+index+'.pkl')
|
|
# %%
#Standard Library Modules
import pandas
import sys
import unittest
import os
import numpy
#set current working directory to where this file is saved
thisdir = os.path.dirname(os.path.abspath(__file__)) + "\\"
os.chdir(thisdir)
# Add higher directory to python module's path
sys.path.append("..")
#Local Application Modules
from main import DocSim_Function
# %% Unit Test for average_similarity function
class TestCase(unittest.TestCase): # inherit from unittest.TestCase
# Test On Random Data Frame for calculation
def test_calculation(self):
df1 = pandas.DataFrame(numpy.array([1.0, 2.0, 3.0]),
columns=['a'])
df3 = pandas.DataFrame(numpy.array([[1.0, 1.0, 1.0],
[4.0, 5.0, 2.0],
[7.0, 8.0, 3.0]]),
columns=['a', 'b', 'c'])
# Test
self.assertEqual(DocSim_Function.\
average_similarity(df1, mode = 'normal'), 2.5)
self.assertEqual(DocSim_Function.\
average_similarity(df1, mode = 'pairwise'), 2)
self.assertEqual(DocSim_Function.\
average_similarity(df3, mode = 'normal'), 2.5)
self.assertEqual(DocSim_Function.\
average_similarity(df3, mode = 'pairwise'), 2)
# Test mode = normal after doc_sim()
def test_normal_model(self):
# Set up
d1 = "plot: two teen couples go to a church party, drink and then drive."
d2 = "films adapted from comic books have had plenty of success , whether they're about superheroes ( batman , superman , spawn ) , or geared toward kids ( casper ) or the arthouse crowd ( ghost world ) , but there's never really been a comic book like from hell before . "
d3 = "every now and then a movie comes along from a suspect studio , with every indication that it will be a stinker , and to everybody's surprise ( perhaps even the studio ) the film becomes a critical darling . "
d4 = "damn that y2k bug . "
data = {
'DocumentID': [1,2,3,4],
'RawText':[d1,d1,d1,d1]
}
documents = pandas.DataFrame.from_dict(data)
output = DocSim_Function.doc_sim(document_matrix = documents,
text_col_name = 'RawText', mode = 'normal',
method = 'cosine', remove_stopwords = False,
filler_words = [], stem = False,
tfidf = False, LSA = False)
output2 = DocSim_Function.doc_sim(document_matrix = documents,
text_col_name = 'RawText', mode = 'normal',
method = 'cosine', remove_stopwords = True,
filler_words = [], stem = True,
tfidf = True, LSA = True)
# Test
self.assertEqual(round(DocSim_Function.average_similarity(output), 0),
1)
self.assertEqual(round(DocSim_Function.average_similarity(output2), 0),
1)
# Test mode = pairwise after doc_sim()
def test_pairwise_model(self):
d1 = "plot: two teen couples go to a church party, drink and then drive."
d2 = "films adapted from comic books have had plenty of success , whether they're about superheroes ( batman , superman , spawn ) , or geared toward kids ( casper ) or the arthouse crowd ( ghost world ) , but there's never really been a comic book like from hell before . "
d3 = "every now and then a movie comes along from a suspect studio , with every indication that it will be a stinker , and to everybody's surprise ( perhaps even the studio ) the film becomes a critical darling . "
d4 = "damn that y2k bug . "
data = {
'DocumentID': [1,2,3,4],
'RawText':[d2,d2,d2,d2]
}
documents = pandas.DataFrame.from_dict(data)
output = DocSim_Function.doc_sim(document_matrix = documents,
text_col_name = 'RawText', mode = 'pairwise',
method = 'cosine', remove_stopwords = False,
filler_words = [], stem = False,
tfidf = False, LSA = False)
output2 = DocSim_Function.doc_sim(document_matrix = documents,
text_col_name = 'RawText', mode = 'pairwise',
method = 'cosine', remove_stopwords = True,
filler_words = [], stem = True,
tfidf = True, LSA = True)
# Test
self.assertEqual(round(DocSim_Function.\
average_similarity(output, mode = 'pairwise'),
4),
round(output.iloc[1, 3], 4))
self.assertEqual(round(DocSim_Function.\
average_similarity(output2, mode = 'pairwise'),
4),
round(output2.iloc[1, 3], 4))
if __name__ == '__main__':
# Start unit test
print("=========== Unit Testing is initiated =========\n")
unittest.main(exit=False)
# # %%
# #Standard Library Modules
# import pandas
# import sys
# import unittest
# import os
# import numpy
# #set current working directory to where this file is saved
# thisdir = os.path.dirname(os.path.abspath(__file__)) + "\\"
# os.chdir(thisdir)
# # Add higher directory to python module's path
# sys.path.append("..")
# #Local Application Modules
# from main import DocSim_Function
# d1 = "plot: two teen couples go to a church party, drink and then drive."
# d2 = "films adapted from comic books have had plenty of success , whether they're about superheroes ( batman , superman , spawn ) , or geared toward kids ( casper ) or the arthouse crowd ( ghost world ) , but there's never really been a comic book like from hell before . "
# d3 = "every now and then a movie comes along from a suspect studio , with every indication that it will be a stinker , and to everybody's surprise ( perhaps even the studio ) the film becomes a critical darling . "
# d4 = "damn that y2k bug . "
# data = {
# 'DocumentID': [1,2,3,4],
# 'RawText':[d1,d2,d2,d2]
# }
# documents = pandas.DataFrame.from_dict(data)
# output = DocSim_Function.doc_sim(document_matrix = documents,
# text_col_name = 'RawText', mode = 'pairwise',
# method = 'cosine', remove_stopwords = False,
# filler_words = [], stem = False,
# tfidf = False, LSA = False)
# output.head()
# # %%
# DocSim_Function.average_similarity(output)
# # %%
|
|
#!/usr/bin/python
import math
import random
import string
import sys
import numpy as np
from midiutil.MidiFile import MIDIFile
from midigen import heightmap
from midigen import dither
if len(sys.argv) > 1:
seed = str(sys.argv[1])
else:
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
seed = ''.join(random.choice(chars) for i in range(8))
iter = 4
# length of rhythm in sixteenth notes
length = 2**iter
smoothing = 1
init = [1, 0]
stress_list = heightmap.heightmap_1D(iter, smoothing, seed + "stress", init)
threshold = dither.dither_1D(iter)
rhythm = [threshold[i] < stress_list[i] for i in range(length)]
#= write MIDI file =============================================================
output_file = MIDIFile(1)
track = 0
time = 0
channel = 0
pitch = 60
duration = 1/4
track_name = "1D rhythm " + seed + " {} bar".format(length // 16)
if (length // 16) > 1:
track_name += "s"
output_file.addTrackName(track, time, track_name)
for index, play_note in enumerate(rhythm):
if play_note:
time = index/4
# no zero velocity notes
volume = math.ceil(stress_list[index]*127)
volume = max(volume, 1)
output_file.addNote(track, channel, pitch, time, duration, volume)
filename = track_name + ".mid"
try:
with open(filename, 'wb') as outf:
output_file.writeFile(outf)
print("Created file {}".format(filename))
except:
print("Write failed.")
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 PyePAL authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the PAL sklearn class"""
import numpy as np
import pytest
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, Matern
from sklearn.model_selection import GridSearchCV
from sklearn.utils.validation import check_is_fitted
from pyepal.pal.pal_sklearn import PALSklearn
np.random.seed(10)
def test_pal_sklearn(make_random_dataset):
"""Test that we can create a instanec of the PAL sklearn class"""
X, y = make_random_dataset # pylint:disable=invalid-name
gpr = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
pal_sklearn_instance = PALSklearn(X, [gpr, gpr, gpr], 3)
pal_sklearn_instance.update_train_set(
np.array([1, 2, 3, 4, 5]), y[np.array([1, 2, 3, 4, 5]), :]
)
assert pal_sklearn_instance.models[0].kernel.length_scale == 1
pal_sklearn_instance._train() # pylint:disable=protected-access
assert pal_sklearn_instance.models[0].kernel_.length_scale != 1
def test_gridsearch_object(binh_korn_points):
"""Test the initialization of PALSklearn with a GridsearchCV object"""
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70])
grid_search_0 = GridSearchCV(
GaussianProcessRegressor(), {"kernel": [RBF(), Matern()]}
)
grid_search_1 = GridSearchCV(
GaussianProcessRegressor(), {"kernel": [RBF(), Matern()]}
)
with pytest.raises(ValueError):
palinstance = PALSklearn(
X_binh_korn, [grid_search_0, grid_search_1], 2, beta_scale=1
)
grid_search_0.fit(X_binh_korn, y_binh_korn[:, 0])
grid_search_1.fit(X_binh_korn, y_binh_korn[:, 1])
palinstance = PALSklearn(
X_binh_korn, [grid_search_0, grid_search_1], 2, beta_scale=1
)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step()
assert len(idx) == 1
assert idx[0] not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for model in palinstance.models:
assert check_is_fitted(model) is None
def test_orchestration_run_one_step(make_random_dataset, binh_korn_points):
"""Test if the orchestration works.
In the base class it should raise an error as without
prediction function we cannot do anything
"""
X, y = make_random_dataset # pylint:disable=invalid-name
gpr_0 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
gpr_1 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
gpr_2 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
palinstance = PALSklearn(X, [gpr_0, gpr_1, gpr_2], 3, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y[sample_idx])
idx = palinstance.run_one_step()
assert len(idx) == 1
assert idx[0] not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for model in palinstance.models:
assert check_is_fitted(model) is None
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70])
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step()
assert len(idx) == 1
assert idx[0] not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70]
assert palinstance.number_sampled_points == len(sample_idx)
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
def test_augment_design_space(make_random_dataset):
"""Test if the reclassification step in the design step
agumentation method works"""
X, y = make_random_dataset # pylint:disable=invalid-name
gpr_0 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
gpr_1 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
gpr_2 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=5)
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
palinstance = PALSklearn(X, [gpr_0, gpr_1, gpr_2], 3, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y[sample_idx])
_ = palinstance.run_one_step()
X_new = X + 1 # pylint:disable=invalid-name
palinstance.augment_design_space(X_new, classify=True, clean_classify=False)
assert palinstance.number_design_points == 200
assert palinstance.number_sampled_points == len(sample_idx)
# Adding new design points should not mess up with the models
for model in palinstance.models:
assert check_is_fitted(model) is None
# Now, test the `clean_classify` flag
gpr_0 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=3)
gpr_1 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=3)
gpr_2 = GaussianProcessRegressor(RBF(), normalize_y=True, n_restarts_optimizer=3)
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
palinstance = PALSklearn(X, [gpr_0, gpr_1, gpr_2], 3, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y[sample_idx])
_ = palinstance.run_one_step()
X_new = X + np.full((1, 10), 1) # pylint:disable=invalid-name
palinstance.augment_design_space(X_new)
assert palinstance.number_design_points == 200
assert palinstance.number_sampled_points == len(sample_idx)
def test_augment_design_space_bk(binh_korn_points, binh_korn_points_finer):
"""Test the augment function by using a finer sampling of the Binh-Korn function
for augmentation"""
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
(
X_binh_korn_finer, # pylint:disable=invalid-name
_,
) = binh_korn_points_finer
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
new_idx = palinstance.run_one_step()
palinstance.update_train_set(new_idx, y_binh_korn[new_idx])
number_pareto_optimal_points_old = palinstance.number_pareto_optimal_points
palinstance.augment_design_space(X_binh_korn_finer)
assert palinstance.number_discarded_points == 0
assert palinstance.number_pareto_optimal_points > number_pareto_optimal_points_old
def test_orchestration_run_one_step_batch( # pylint:disable=too-many-statements
binh_korn_points,
):
"""Test the batch sampling"""
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=10)
assert len(idx) == 10
assert len(np.unique(idx)) == 10
for index in idx:
assert index not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
# scaling up beta
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1 / 3)
palinstance.cross_val_points = 0
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=10)
for index in idx:
assert index not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
# smaller initial set
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1 / 3)
palinstance.cross_val_points = 0
sample_idx = np.array([1, 10, 20, 40, 70, 90])
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=10)
for index in idx:
assert index not in [1, 10, 20, 40, 70, 90]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
# smaller initial set and beta scale
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1 / 9)
palinstance.cross_val_points = 0
sample_idx = np.array([1, 10, 20, 40, 70, 90])
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=10)
for index in idx:
assert index not in [1, 10, 20, 40, 70, 90]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
# smaller initial set and beta scale and different kernel
gpr_0 = GaussianProcessRegressor(
Matern(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
Matern(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1 / 9)
palinstance.cross_val_points = 0
sample_idx = np.array([1, 10, 20, 40, 70, 90])
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=10)
for index in idx:
assert index not in [1, 10, 20, 40, 70, 90]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
# test using the "fixed" epsilon
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=6, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=6, random_state=10
)
palinstance = PALSklearn(
X_binh_korn,
[gpr_0, gpr_1],
2,
beta_scale=1 / 9,
ranges=np.ptp(y_binh_korn, axis=0),
)
assert palinstance.uses_fixed_epsilon
palinstance.cross_val_points = 0
sample_idx = np.array([1, 10, 20, 40, 70, 90])
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step(batch_size=1)
for index in idx:
assert index not in [1, 10, 20, 40, 70, 90]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
def test_orchestration_run_one_step_parallel(binh_korn_points):
"""Test the parallel processing"""
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
gpr_0 = GaussianProcessRegressor(
Matern(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
Matern(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1 / 9, n_jobs=2)
sample_idx = np.array([1, 10, 20, 40, 70, 90])
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
palinstance.cross_val_points = 0
idx = palinstance.run_one_step(batch_size=10)
for index in idx:
assert index not in [1, 10, 20, 40, 70, 90]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
for model in palinstance.models:
assert check_is_fitted(model) is None
def test_orchestration_run_one_step_missing_data(binh_korn_points):
"""Test that the model also works with missing observations"""
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70])
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1)
palinstance.cross_val_points = 0
# make some of the observations missing
y_binh_korn[:10, 1] = np.nan
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
idx = palinstance.run_one_step()
assert idx[0] not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70]
assert palinstance.number_sampled_points > 0
assert sum(palinstance.unclassified) > 0
assert sum(palinstance.discarded) == 0
def test_crossvalidate(binh_korn_points):
"""Test the crossvalidation routine"""
gpr_0 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
gpr_1 = GaussianProcessRegressor(
RBF(), normalize_y=True, n_restarts_optimizer=5, random_state=10
)
X_binh_korn, y_binh_korn = binh_korn_points # pylint:disable=invalid-name
sample_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 60, 70])
palinstance = PALSklearn(X_binh_korn, [gpr_0, gpr_1], 2, beta_scale=1)
palinstance.update_train_set(sample_idx, y_binh_korn[sample_idx])
original_sample_mask = palinstance.sampled
cross_val_error = palinstance._crossvalidate() # pylint:disable=protected-access
assert (palinstance.sampled_indices == sample_idx).all()
assert (palinstance.sampled == original_sample_mask).all()
assert isinstance(cross_val_error, float)
assert np.abs(cross_val_error) > 0
|
|
#!/usr/bin/env python
import sys
import json
import numpy as np
from logging import warning
IGNORE = set(['[CLS]', '[SEP]'])
def argparser():
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('file', nargs='+', metavar='JSONL',
help='BERT extract_features.py output')
return ap
def is_continuation(token):
return token.startswith('##')
def save_vector(pieces, values, vectors):
if not pieces:
raise ValueError('no pieces')
word = pieces[0]
for p in pieces[1:]:
word += p[2:]
v = np.mean(values, axis=0)
if word not in vectors:
vectors[word] = (v, 1)
else:
vectors[word] = (vectors[word][0]+v, vectors[word][1]+1)
def load_vectors(fn, vectors, options):
total = 0
with open(fn) as f:
for ln, l in enumerate(f, start=1):
data = json.loads(l)
curr_pieces, curr_values = [], []
for d in data['features']:
token = d['token']
if token in IGNORE:
continue
layers = d['layers']
values = []
for layer in layers:
values.append(np.array(layer['values']))
# values = np.mean(values, axis=0)
values = np.concatenate(values, axis=0)
if is_continuation(token):
if not curr_pieces:
raise ValueError('line-initial "{}"'.format(token))
curr_pieces.append(token)
curr_values.append(values)
else:
# not continuation, i.e. new token
if curr_pieces:
save_vector(curr_pieces, curr_values, vectors)
total += 1
curr_pieces = [token]
curr_values = [values]
# process last
if curr_pieces is not None:
save_vector(curr_pieces, curr_values, vectors)
total += 1
print('loaded {} vectors for {} tokens from {}'.format(
total, len(vectors), fn), file=sys.stderr)
return vectors
def main(argv):
args = argparser().parse_args(argv[1:])
# maintain a running sum and count of vectors per word
vectors = {}
for fn in args.file:
vectors_by_token = load_vectors(fn, vectors, args)
# divide by count for average vector
vectors = { w: v/c for w, (v, c) in vectors.items() }
if not vectors:
warning('no vectors loaded, exiting')
return 1
tokens = len(vectors)
dim = len(next(iter(vectors.values())))
print(tokens, dim) # word2vec header line
for t, v in vectors.items():
#v = np.mean(vectors, axis=0)
v /= np.linalg.norm(v)
print(t, ' '.join('{:.4f}'.format(i) for i in v))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# Copyright (c) 2020 Hartmut Kaiser
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# #1258: np.random.randn does not work
from phylanx import Phylanx
import numpy as np
@Phylanx
def generate():
return np.random.randn(100)
result = generate()
assert np.shape(result) == (100,), result
|
|
import numpy as np
import torchvision.models.segmentation
import torch
import torchvision.transforms as tf
Learning_Rate=1e-5
width=height=900 # image width and height
batchSize=1
#---------------------Create training image ---------------------------------------------------------
def ReadRandomImage():
FillLevel=np.random.random() # Set random fill level
Img=np.zeros([900,900,3],np.uint8) # Create black image
Img[0:int(FillLevel*900),:]=255 # Fill the image with white up to FillLevel
transformImg=tf.Compose([tf.ToPILImage(),tf.Resize((height,width)),tf.ToTensor(),tf.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
Img=transformImg(Img) # Transform to pytorch
return Img,FillLevel
#--------------Load batch of images-----------------------------------------------------
def LoadBatch(): # Load batch of images
images = torch.zeros([batchSize,3,height,width])
FillLevel = torch.zeros([batchSize])
for i in range(batchSize):
images[i],FillLevel[i]=ReadRandomImage()
return images,FillLevel
#--------------Load and set net and optimizer-------------------------------------
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # Set device GPU or CPU where the training will take place
Net = torchvision.models.resnet18(pretrained=True) # Load net
Net.fc = torch.nn.Linear(in_features=512, out_features=1, bias=True) # Change final layer to predict one value
Net = Net.to(device)
optimizer = torch.optim.Adam(params=Net.parameters(),lr=Learning_Rate) # Create adam optimizer
#----------------Train--------------------------------------------------------------------------
AverageLoss=np.zeros([50]) # Save average loss for display
for itr in range(500001): # Training loop
images,GTFillLevel=LoadBatch() # Load taining batch
images=torch.autograd.Variable(images,requires_grad=False).to(device) # Load image
GTFillLevel = torch.autograd.Variable(GTFillLevel, requires_grad=False).to(device) # Load Ground truth fill level
PredLevel=Net(images) # make prediction
Net.zero_grad()
Loss=torch.abs(PredLevel-GTFillLevel).mean()
Loss.backward() # Backpropogate loss
optimizer.step() # Apply gradient descent change to weight
AverageLoss[itr%50]=Loss.data.cpu().numpy() # Save loss average
print(itr,") Loss=",Loss.data.cpu().numpy(),'AverageLoss',AverageLoss.mean()) # Display loss
if itr % 200 == 0: # Save model
print("Saving Model" +str(itr) + ".torch") #Save model weight
torch.save(Net.state_dict(), str(itr) + ".torch")
|
|
# Licensed under a MIT style license - see LICENSE.rst
"""MUSE-PHANGS target sample module
"""
__authors__ = "Eric Emsellem"
__copyright__ = "(c) 2017, ESO + CRAL"
__license__ = "MIT License"
__contact__ = " <eric.emsellem@eso.org>"
# Standard modules
import os
from os.path import join as joinpath
import numpy as np
from src.pymusepipe import util_pipe as upipe
from src.pymusepipe import MusePipe
from src.pymusepipe import (PHANGS_reduc_config,
default_short_PHANGS_filter_list,
default_short_filter_list)
from src.pymusepipe.init_musepipe import InitMuseParameters
from src.pymusepipe.combine import MusePointings
from src.pymusepipe import rotate_pixtables
from .prep_recipes_pipe import dic_products_scipost
from .version import __version__ as version_pack
from astropy.table import Table
# ----------------- Galaxies and Pointings ----------------#
# Sample of galaxies
# For each galaxy, we provide the pointings numbers and the run attached to that pointing
dic_SAMPLE_example = {
"NGC628": ['P100', {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0}],
"NGC1087": ['P101', {1:1}],
}
# ----------------- Galaxies and Pointings ----------------#
#=============== Useful function ==========================#
def insert_suffix(filename, suffix=""):
"""Create a new filename including the
suffix in the name
Input
-----
filename: str
suffix: str
"""
# Create new name
sfilename, extension = os.path.splitext(filename)
return "{0}_{1}{2}".format(sfilename, suffix, extension)
def update_calib_file(filename, subfolder="", folder_config=""):
"""Update the rcfile with a new root
Input
-----
filename: str
Name of the input filename
folder_config: str
Default is "". Name of folder for filename
subfolder: str
Name of subfolder to add in the path
"""
full_filename = joinpath(folder_config, filename)
if full_filename is None:
upipe.print_error("ERROR: input filename is None")
return ""
# Testing existence of filename
if not os.path.isfile(full_filename) :
upipe.print_error("ERROR: input filename {inputname} cannot be found. ".format(
inputname=full_filename))
return ""
# If it exists, open and read it
old_rc = open(full_filename)
lines = old_rc.readlines()
# Create new file
new_filename = insert_suffix(full_filename, subfolder)
new_rc = open(new_filename, 'w')
# loop on lines
for line in lines :
sline = line.split()
if sline[0] != "root":
new_rc.write(line)
continue
if not os.path.isdir(sline[1]):
upipe.print_warning("{} not an existing folder (from rcfile)".format(sline[1]))
newline = line.replace(sline[1], joinpath(sline[1], subfolder))
new_rc.write(newline)
new_rc.close()
old_rc.close()
return new_filename
#------------ End of Useful functions -------------#
####################################################
# Defining Dictionary with running functions
####################################################
class PipeDict(dict) :
"""Dictionary with extra attributes
"""
def __init__(self, *args, **kwargs) :
self.update(*args, **kwargs)
self._initialised = False
def __setitem__(self, key, value):
"""Setting the item by using the dictionary of the new values
"""
for funcname in dir(value):
if callable(getattr(value, funcname)) and ("run" in funcname):
setattr(self, funcname, self.run_on_all_keys(funcname))
super(PipeDict, self).__setitem__(key, value)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, "
"got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def run_on_all_keys(self, funcname):
"""Runs the given function on all the keys
"""
def _function(**kwargs):
for key in self:
getattr(self[key], funcname)(**kwargs)
return _function
####################################################
# Defining classes to get samples and objects
####################################################
class MusePipeTarget(object):
def __init__(self, targetname="",
subfolder='P100', list_pointings=None):
self.targetname = targetname
self.subfolder = subfolder
self.list_pointings = list_pointings
self.pipes = PipeDict()
class MusePipeSample(object):
def __init__(self, TargetDic, rc_filename=None, cal_filename=None,
folder_config="", first_recipe=1, **kwargs) :
"""Using a given dictionary to initialise the sample
That dictionary should include the names of the targets
as keys and the subfolder plus pointings to consider
Input
-----
TargetDic: dic
Dictionary of targets. Keys are target names.
Values for each target name should be a list of 2 parameters.
- The first one is the name of the subfolder (e.g. 'P101')
- The second one is the list of pointings, itself a dictionary
with a 0 or 1 for each pointing number depending on whether
this should be included in the reduction or not.
Results can be seen in self.dic_targets dictionary.
rc_filename: str
Default to None
cal_filename: str
Default to None
PHANGS: bool
Default to False. If True, will use default configuration dictionary
from config_pipe.
"""
self.sample = TargetDic
self.targetnames = list(TargetDic.keys())
self.first_recipe = first_recipe
self.__phangs = kwargs.pop("PHANGS", False)
self.verbose = kwargs.pop("verbose", False)
# Reading configuration filenames
if rc_filename is None or cal_filename is None:
upipe.print_error("rc_filename and/or cal_filename is None. Please define both.")
return
self.cal_filename = cal_filename
self.rc_filename = rc_filename
self.folder_config = folder_config
# Initialisation of rc and cal files
self._init_calib_files()
# Initialisation of targets
self.init_pipes = kwargs.pop("init_pipes", True)
self. add_targetname = kwargs.pop("add_targetname", True)
self._init_targets()
def _init_calib_files(self):
"""Initialise the calibration files with the new
name using the subfolders
"""
# Getting the right input for rc and cal names
folder_config, rc_filename, cal_filename = self._get_calib_filenames()
# First read the root folder
init_cal_params = InitMuseParameters(folder_config=folder_config,
rc_filename=rc_filename,
cal_filename=cal_filename,
verbose=self.verbose)
self.root_path = init_cal_params.root
self._subfolders = np.unique([self.sample[targetname][0]
for targetname in self.targetnames])
for subfolder in self._subfolders:
update_calib_file(rc_filename, subfolder, folder_config=folder_config)
update_calib_file(cal_filename, subfolder, folder_config=folder_config)
def _get_calib_filenames(self, targetname=None):
"""Get calibration file names
Input
----
targetname: str
Returns
-------
folder_name: str
rcname: str
calname: str
"""
# using targetname or not
if targetname is None:
name_rc = self.rc_filename
name_cal = self.cal_filename
else:
name_rc = insert_suffix(self.rc_filename, self.targets[targetname].subfolder)
name_cal = insert_suffix(self.cal_filename, self.targets[targetname].subfolder)
folder_config = self.folder_config
# Checking the folders
folder_rc, rc_filename_target = os.path.split(joinpath(folder_config, name_rc))
folder_cal, cal_filename_target = os.path.split(joinpath(folder_config, name_cal))
if rc_filename_target=="" or cal_filename_target=="":
upipe.print_error("Missing a calibration name file")
return
if folder_rc == folder_cal:
folder_config = folder_rc
else:
rc_filename_target = joinpath(folder_rc, rc_filename_target)
cal_filename_target = joinpath(folder_cal, cal_filename_target)
folder_config = ""
return folder_config, rc_filename_target, cal_filename_target
def _init_targets(self):
"""Initialise the targets using the dictionary
Returning self.targets with the pointings to consider
"""
self.targets = {}
self.pipes = {}
self.pipes_combine = {}
self.pipes_mosaic = {}
for targetname in self.targetnames:
subfolder = self.sample[targetname][0]
lpoints = self.sample[targetname][1]
list_pointings = []
for lp in lpoints:
if lpoints[lp] == 1:
list_pointings.append(lp)
# Defining the MusePipe for that target
self.targets[targetname] = MusePipeTarget(targetname=targetname,
subfolder=subfolder,
list_pointings=list_pointings)
# Shortcut to call the musepipe instance
self.pipes[targetname] = self.targets[targetname].pipes
folder_config, rc_filename, cal_filename = self._get_calib_filenames(targetname)
self.targets[targetname].rc_filename = rc_filename
self.targets[targetname].cal_filename = cal_filename
self.targets[targetname].folder_config = folder_config
init_params_target = InitMuseParameters(rc_filename=rc_filename,
cal_filename=cal_filename,
folder_config=folder_config,
verbose=self.verbose)
self.targets[targetname].root_path = init_params_target.root
self.targets[targetname].data_path = joinpath(init_params_target.root, targetname)
self.pipes[targetname].root_path = init_params_target.root
self.pipes[targetname].data_path = joinpath(init_params_target.root, targetname)
init_comb_target = MusePointings(targetname=targetname,
list_pointings=list_pointings,
rc_filename=rc_filename,
cal_filename=cal_filename,
folder_config=folder_config,
verbose=self.verbose)
self.targets[targetname].combcubes_path = init_comb_target.paths.cubes
if self.init_pipes:
self.set_pipe_target(targetname)
def _check_pointings(self, targetname, list_pointings):
"""Check if pointing is in the list of pointings
Returns the list of pointings if ok. If not, return an empty list
Input
-----
targetname: str
name of the target
list_pointings: list
List of integer (pointings).
Returns
-------
list_pointings: list
Empty if input list of pointings is not fully in defined list.
"""
# Info of the pointings and extracting the observing run for each pointing
target_pointings = self.targets[targetname].list_pointings
# if no list_pointings we just do them all
if list_pointings == None:
list_pointings = target_pointings
else:
if any([_ not in target_pointings for _ in list_pointings]) :
upipe.print_error("ERROR: no pointing {0} for the given target".format(
list_pointings))
return []
return list_pointings
def _check_targetname(self, targetname):
"""Check if targetname is in list
Input
-----
targetname: str
Returns
-------
status: bool
True if yes, False if no.
"""
if targetname not in self.targetnames:
upipe.print_error("ERROR: no Target named {name} in the defined sample".format(
name=targetname))
return False
else:
return True
def set_pipe_target(self, targetname=None, list_pointings=None,
**kwargs):
"""Create the musepipe instance for that target and list of pointings
Input
-----
targetname: str
Name of the target
list_pointings: list
Pointing numbers. Default is None (meaning all pointings
indicated in the dictonary will be reduced)
config_args: dic
Dictionary including extra configuration parameters to pass
to MusePipe. This allows to define a global configuration.
If self.__phangs is set to True, this is overwritten with the default
PHANGS configuration parameters as provided in config_pipe.py.
"""
verbose = kwargs.pop("verbose", self.verbose)
# Check if targetname is valid
if not self._check_targetname(targetname):
return
# Galaxy name
upipe.print_info("=== Initialising MusePipe for Target {name} ===".format(name=targetname))
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# Get the filename and extension of log file
log_filename, log_fileext = os.path.splitext(kwargs.pop("log_filename",
"{0}_{1}.log".format(targetname, version_pack)))
# Reading extra arguments from config dictionary
if self.__phangs:
config_args = PHANGS_reduc_config
# Set overwrite to False to keep existing tables
config_args['overwrite_astropy_table'] = False
else:
config_args = kwargs.pop("config_args", None)
first_recipe = kwargs.pop("first_recipe", 1)
last_recipe = kwargs.pop("last_recipe", None)
# Over-writing the arguments in kwargs from config dictionary
if config_args is not None:
for attr in config_args:
if attr not in kwargs:
kwargs[attr] = config_args[attr]
# extracting the kwargs
list_kwargs = ', '.join(['{}={!r}'.format(k, v) for k, v in kwargs.items()])
# Config files
rc_filename = self.targets[targetname].rc_filename
cal_filename = self.targets[targetname].cal_filename
folder_config = self.targets[targetname].folder_config
# Loop on the pointings
for pointing in list_pointings:
upipe.print_info("Initialise Pipe for Target = {0:10s} / Pointing {1:02d} ".format(
targetname, pointing))
# New log file name with pointing included
log_filename_pointing = "{0}_P{1:02d}{2}".format(
log_filename, pointing, log_fileext)
# Setting up the names of the output files
python_command = ("mypipe = musepipe.MusePipe(targetname='{0}', "
"pointing={1}, folder_config='{2}', "
"rc_filename='{3}', " "cal_filename='{4}', "
"log_filename='{5}', verbose={6}, "
"{7})".format(targetname, pointing, folder_config,
rc_filename, cal_filename, log_filename_pointing,
verbose, list_kwargs))
# Creating the musepipe instance, using the shortcut
self.pipes[targetname][pointing] = MusePipe(
targetname=targetname, pointing=pointing,
folder_config=folder_config, rc_filename=rc_filename,
cal_filename=cal_filename, log_filename=log_filename_pointing,
first_recipe=first_recipe, last_recipe=last_recipe,
init_raw_table=False, verbose=verbose, **kwargs)
# Saving the command
self.pipes[targetname][pointing].history = python_command
# Setting back verbose to True to make sure we have a full account
self.pipes[targetname][pointing].verbose = True
upipe.print_info(python_command, pipe=self)
upipe.print_info("End of Pipe initialisation")
self.pipes[targetname]._initialised = True
def _get_path_data(self, targetname, pointing):
"""Get the path for the data
Parameters
----------
targetname: str
Name of the target
pointing: int
Number for the pointing
Returns
-------
path_data
"""
return self.pipes[targetname][pointing].paths.data
def _get_path_files(self, targetname, pointing, expotype="OBJECT"):
"""Get the path for the files of a certain expotype
Parameters
----------
targetname: str
Name of the target
pointing: int
Number for the pointing
Returns
-------
path_files
"""
return self.pipes[targetname][pointing]._get_path_files(expotype)
def reduce_all_targets(self, **kwargs):
"""Reduce all targets already initialised
Input
-----
first_recipe: int or str
One of the recipe to start with
last_recipe: int or str
One of the recipe to end with
"""
for target in self.targets:
upipe.print_info("=== Start Reduction of Target {name} ===".format(name=target))
self.reduce_target(targetname=target, **kwargs)
upipe.print_info("=== End Reduction of Target {name} ===".format(name=target))
def reduce_target_prealign(self, targetname=None, list_pointings=None, **kwargs):
"""Reduce target for all steps before pre-alignment (included)
Input
-----
targetname: str
Name of the target
list_pointings: list
Pointing numbers. Default is None (meaning all pointings
indicated in the dictonary will be reduced)
"""
self.reduce_target(targetname=targetname, list_pointings=list_pointings,
last_recipe="prep_align", **kwargs)
def reduce_target_postalign(self, targetname=None, list_pointings=None, **kwargs):
"""Reduce target for all steps after pre-alignment
Input
-----
targetname: str
Name of the target
list_pointings: list
Pointing numbers. Default is None (meaning all pointings
indicated in the dictonary will be reduced)
"""
self.reduce_target(targetname=targetname, list_pointings=list_pointings,
first_recipe="align_bypointing", **kwargs)
def run_target_scipost_perexpo(self, targetname=None, list_pointings=None,
folder_offset_table=None, offset_table_name=None,
**kwargs):
"""Build the cube per exposure using a given WCS
Args:
targetname:
list_pointings:
**kwargs:
Returns:
"""
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# WCS imposed by setting the reference
add_targetname = kwargs.pop("add_targetname", self.add_targetname)
prefix_all = kwargs.pop("prefix_all", "")
if add_targetname:
prefix_all = "{0}_{1}".format(targetname, prefix_all)
save = kwargs.pop("save", 'cube')
wcs_auto = kwargs.pop("wcs_auto", True)
if not wcs_auto:
name_wcs = kwargs.pop("name_wcs", None)
# Fetch the default folder for the WCS files which is the folder
# of the Combined cubes
# self.init_combine(targetname=targetname, folder_offset_table=folder_offset_table,
# offset_table_name=offset_table_name)
# default_comb_folder = self.pipes_combine[targetname].paths.cubes
default_comb_folder = self.targets[targetname].combcubes_path
# Now fetch the value set by the user
folder_ref_wcs = kwargs.pop("folder_ref_wcs", default_comb_folder)
if self.__phangs:
filter_list = kwargs.pop("filter_list", default_short_PHANGS_filter_list)
else:
filter_list = kwargs.pop("filter_list", default_short_filter_list)
# Running the scipost_perexpo for all pointings individually
for pointing in list_pointings:
if wcs_auto:
cube_suffix = dic_products_scipost['cube'][0]
if add_targetname:
cube_suffix = "{0}_{1}".format(targetname, cube_suffix)
name_wcs = "{0}_P{1:02d}.fits".format(cube_suffix,
np.int(pointing))
suffix = "_WCS_P{0:02d}".format(np.int(pointing))
kwargs_pointing = {'ref_wcs': name_wcs,
'suffix': suffix,
'folder_ref_wcs': folder_ref_wcs,
'sof_filename': 'scipost_wcs',
'dir_products': default_comb_folder,
'offset_table_name': offset_table_name,
'folder_offset_table': folder_offset_table,
'offset_list': True,
'filter_list': filter_list,
'prefix_all': prefix_all,
'save': save}
kwargs.update(kwargs_pointing)
self.pipes[targetname][pointing].run_scipost_perexpo(**kwargs)
def run_target_recipe(self, recipe_name, targetname=None,
list_pointings=None, **kwargs):
"""Run just one recipe on target
Input
-----
recipe_name: str
targetname: str
Name of the target
list_pointings: list
Pointing numbers. Default is None (meaning all pointings
indicated in the dictonary will be reduced)
"""
# General print out
upipe.print_info("---- Starting the Recipe {0} for Target={1} "
"----".format(recipe_name, targetname))
kwargs_recipe = {}
for key, default in zip(['fraction', 'skymethod', 'illum'],
[0.8, "model", True]):
_ = kwargs.pop(key, default)
kwargs_recipe[key] = default
# some parameters which depend on the pointings for this recipe
kwargs_per_pointing = kwargs.pop("kwargs_per_pointing", {})
# Initialise the pipe if needed
self.set_pipe_target(targetname=targetname, list_pointings=list_pointings,
first_recipe=recipe_name, last_recipe=recipe_name, **kwargs)
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# Loop on the pointings
for pointing in list_pointings:
upipe.print_info("====== START - POINTING {0:2d} "
"======".format(pointing))
param_recipes = {}
if pointing in kwargs_per_pointing:
param_recipes[recipe_name] = kwargs_per_pointing[pointing]
# Initialise raw tables if not already done (takes some time)
if not self.pipes[targetname][pointing]._raw_table_initialised:
self.pipes[targetname][pointing].init_raw_table(overwrite=True)
if self.__phangs:
self.pipes[targetname][pointing].run_phangs_recipes(param_recipes=param_recipes,
**kwargs_recipe)
else:
self.pipes[targetname][pointing].run_recipes(param_recipes=param_recipes,
**kwargs_recipe)
upipe.print_info("====== END - POINTING {0:2d} ======".format(pointing))
def reduce_target(self, targetname=None, list_pointings=None, **kwargs):
"""Reduce one target for a list of pointings
Input
-----
targetname: str
Name of the target
list_pointings: list
Pointing numbers. Default is None (meaning all pointings
indicated in the dictonary will be reduced)
first_recipe: str or int [1]
last_recipe: str or int [max of all recipes]
Name or number of the first and last recipes to process
"""
# General print out
upipe.print_info("---- Starting the Data Reduction for Target={0} ----".format(
targetname))
kwargs_recipe = {}
for key, default in zip(['fraction', 'skymethod', 'illum'], [0.8, "model", True]):
item = kwargs.pop(key, default)
kwargs_recipe[key] = default
# Initialise the pipe if needed
if not self.pipes[targetname]._initialised \
or "first_recipe" in kwargs or "last_recipe" in kwargs:
self.set_pipe_target(targetname=targetname, list_pointings=list_pointings, **kwargs)
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# Loop on the pointings
for pointing in list_pointings:
upipe.print_info("====== START - POINTING {0:2d} ======".format(pointing))
# Initialise raw tables if not already done (takes some time)
if not self.pipes[targetname][pointing]._raw_table_initialised:
self.pipes[targetname][pointing].init_raw_table(overwrite=True)
if self.__phangs:
self.pipes[targetname][pointing].run_phangs_recipes(**kwargs_recipe)
else:
self.pipes[targetname][pointing].run_recipes(**kwargs_recipe)
upipe.print_info("====== END - POINTING {0:2d} ======".format(pointing))
def rotate_pixtables_target(self, targetname=None, list_pointings=None,
folder_offset_table=None, offset_table_name=None,
fakemode=False, **kwargs):
"""Rotate all pixel table of a certain targetname and pointings
"""
# General print out
upipe.print_info("---- Starting the PIXTABLE ROTATION "
"for Target={0} ----".format(targetname))
# Initialise the pipe if needed
if not self.pipes[targetname]._initialised \
or "first_recipe" in kwargs or "last_recipe" in kwargs:
self.set_pipe_target(targetname=targetname,
list_pointings=list_pointings, **kwargs)
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# add_targetname = kwargs.pop("add_targetname", self.add_targetname)
prefix = kwargs.pop("prefix", "")
# if add_targetname:
# prefix = "{}_{}".format(targetname, prefix)
if folder_offset_table is None:
folder_offset_table = self.pipes[targetname][list_pointings[0]].paths.alignment
offset_table = Table.read(joinpath(folder_offset_table, offset_table_name))
offset_table.sort(["POINTING_OBS", "IEXPO_OBS"])
# Loop on the pointings
for row in offset_table:
iexpo = row['IEXPO_OBS']
pointing = row['POINTING_OBS']
tpls = row['TPL_START']
angle = row['ROTANGLE']
upipe.print_info("Rotation ={0} Deg for Pointing={1:02d}, "
"TPLS={2} - Expo {3:02d}".format(
angle, pointing, tpls, iexpo))
folder_expos = self._get_path_files(targetname, pointing)
name_suffix = "{0}_{1:04d}".format(tpls, iexpo)
rotate_pixtables(folder=folder_expos, name_suffix=name_suffix,
list_ifu=None, angle=angle, fakemode=fakemode,
prefix=prefix, **kwargs)
def init_mosaic(self, targetname=None, list_pointings="all", **kwargs):
"""Prepare the combination of targets
Input
-----
targetname: str [None]
Name of target
list_pointings: list [or "all"=default]
List of pointings (e.g., [1,2,3])
"""
# Check if pointings are valid
list_pointings = self._check_pointings(targetname, list_pointings)
if len(list_pointings) == 0:
return
# Make a list for the masking of the cubes to take into account
list_pointing_names = ["P{0:02d}".format(np.int(pointing))
for pointing in list_pointings]
folder_ref_wcs = kwargs.pop("folder_ref_wcs", comb_folder)
self.pipes_mosaic[targetname] = MuseCubeMosaic(output_wcs=...,
folder_cubes=...,
prefix_cubes="DATACUBE_FINAL_WCS",
list_suffix=list_pointing_names)
def mosaic(self, targetname=None, list_pointings="all", **kwargs):
self.init_mosaic(targetname=targetname, list_pointings=list_pointings,
**kwargs)
# Doing the mosaic with mad
self.pipes_mosaic[targetname].madcombine()
# Constructing the images for that mosaic
if self.__phangs:
filter_list = kwargs.pop("filter_list", default_PHANGS_filter_list)
else:
filter_list = kwargs.pop("filter_list", default_filter_list)
def init_combine(self, targetname=None, list_pointings="all",
folder_offset_table=None, offset_table_name=None, **kwargs):
"""Prepare the combination of targets
Input
-----
targetname: str [None]
Name of target
list_pointings: list [or "all"=default]
List of pointings (e.g., [1,2,3])
offset_table_name: str
Name of Offset table
"""
log_filename = kwargs.pop("log_filename", "{0}_combine_{1}.log".format(targetname, version_pack))
self.pipes_combine[targetname] = MusePointings(targetname=targetname,
list_pointings=list_pointings,
rc_filename=self.targets[targetname].rc_filename,
cal_filename=self.targets[targetname].cal_filename,
folder_config=self.targets[targetname].folder_config,
offset_table_name=offset_table_name,
folder_offset_table=folder_offset_table,
log_filename=log_filename, **kwargs)
def combine_target(self, targetname=None, **kwargs):
"""Run the combine recipe. Shortcut for combine[targetname].run_combine()
"""
self.init_combine(targetname=targetname, **kwargs)
self.pipes_combine[targetname].run_combine()
def combine_target_pointings_withmasks(self, targetname=None,
combine=True, masks=True, perpointing_combine=True, mosaic_wcs=True,
**kwargs):
"""Run the combine for individual exposures first building up
a mask.
"""
self.init_combine(targetname=targetname, **kwargs)
self.pipes_combine[targetname].run_combine_all_single_pointings_withmasks(
combine=combine, masks=masks, perpointing_combine=perpointing_combine,
mosaic_wcs=mosaic_wcs, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 10:48:57 2020
@author: Manuel Camargo
"""
import os
import subprocess
import copy
import multiprocessing
from multiprocessing import Pool
import itertools
import traceback
import numpy as np
import pandas as pd
import math
import random
from hyperopt import tpe
from hyperopt import Trials, hp, fmin, STATUS_OK, STATUS_FAIL
import utils.support as sup
from utils.support import timeit
import readers.log_splitter as ls
import readers.log_reader as lr
from support_modules.writers import xes_writer as xes
from support_modules.writers import xml_writer as xml
import analyzers.sim_evaluator as sim
import core_modules.sequences_generator.structure_miner as sm
import core_modules.sequences_generator.structure_params_miner as spm
from tqdm import tqdm
import time
class StructureOptimizer():
"""
Hyperparameter-optimizer class
"""
class Decorators(object):
@classmethod
def safe_exec(cls, method):
"""
Decorator to safe execute methods and return the state
----------
method : Any method.
Returns
-------
dict : execution status
"""
def safety_check(*args, **kw):
status = kw.get('status', method.__name__.upper())
response = {'values': [], 'status': status}
if status == STATUS_OK:
try:
response['values'] = method(*args)
except Exception as e:
print(e)
traceback.print_exc()
response['status'] = STATUS_FAIL
return response
return safety_check
def __init__(self, settings, log):
"""constructor"""
self.space = self.define_search_space(settings)
# Read inputs
self.log = log
self._split_timeline(0.8, settings['read_options']['one_timestamp'])
self.org_log = copy.deepcopy(log)
self.org_log_train = copy.deepcopy(self.log_train)
self.org_log_valdn = copy.deepcopy(self.log_valdn)
# Load settings
self.settings = settings
self.temp_output = os.path.join('output_files', sup.folder_id())
if not os.path.exists(self.temp_output):
os.makedirs(self.temp_output)
self.file_name = os.path.join(self.temp_output, sup.file_id(prefix='OP_'))
# Results file
if not os.path.exists(self.file_name):
open(self.file_name, 'w').close()
# Trials object to track progress
self.bayes_trials = Trials()
self.best_output = None
self.best_parms = dict()
self.best_similarity = 0
@staticmethod
def define_search_space(settings):
var_dim = {'alg_manag': hp.choice('alg_manag',
settings['alg_manag']),
'gate_management': hp.choice('gate_management',
settings['gate_management'])}
if settings['mining_alg'] in ['sm1', 'sm3']:
var_dim['epsilon'] = hp.uniform('epsilon',
settings['epsilon'][0],
settings['epsilon'][1])
var_dim['eta'] = hp.uniform('eta',
settings['eta'][0],
settings['eta'][1])
elif settings['mining_alg'] == 'sm2':
var_dim['concurrency'] = hp.uniform('concurrency',
settings['concurrency'][0],
settings['concurrency'][1])
csettings = copy.deepcopy(settings)
for key in var_dim.keys():
csettings.pop(key, None)
space = {**var_dim, **csettings}
return space
def execute_trials(self):
parameters = (
spm.StructureParametersMiner.mine_resources(
self.settings, self.log_train))
self.log_train = copy.deepcopy(self.org_log_train)
def exec_pipeline(trial_stg):
print('train split:',
len(pd.DataFrame(self.log_train.data).caseid.unique()),
', valdn split:',
len(pd.DataFrame(self.log_valdn).caseid.unique()),
sep=' ')
# Vars initialization
status = STATUS_OK
exec_times = dict()
sim_values = []
# Path redefinition
rsp = self._temp_path_redef(trial_stg,
status=status,
log_time=exec_times)
status = rsp['status']
trial_stg = rsp['values'] if status == STATUS_OK else trial_stg
# Structure mining
rsp = self._mine_structure(trial_stg,
status=status,
log_time=exec_times)
status = rsp['status']
# Parameters extraction
rsp = self._extract_parameters(trial_stg,
rsp['values'],
copy.deepcopy(parameters),
status=status,
log_time=exec_times)
status = rsp['status']
# Simulate model
rsp = self._simulate(trial_stg,
self.log_valdn,
status=status,
log_time=exec_times)
status = rsp['status']
sim_values = rsp['values'] if status == STATUS_OK else sim_values
# Save times
self._save_times(exec_times, trial_stg, self.temp_output)
# Optimizer results
rsp = self._define_response(trial_stg, status, sim_values)
# reinstate log
self.log = copy.deepcopy(self.org_log)
self.log_train = copy.deepcopy(self.org_log_train)
self.log_valdn = copy.deepcopy(self.org_log_valdn)
print("-- End of trial --")
return rsp
# Optimize
best = fmin(fn=exec_pipeline,
space=self.space,
algo=tpe.suggest,
max_evals=self.settings['max_eval'],
trials=self.bayes_trials,
show_progressbar=False)
# Save results
try:
results = (pd.DataFrame(self.bayes_trials.results)
.sort_values('loss', ascending=bool))
self.best_output = (results[results.status=='ok']
.head(1).iloc[0].output)
self.best_parms = best
self.best_similarity = (results[results.status=='ok']
.head(1).iloc[0].loss)
except Exception as e:
print(e)
pass
@timeit(rec_name='PATH_DEF')
@Decorators.safe_exec
def _temp_path_redef(self, settings, **kwargs) -> None:
# Paths redefinition
settings['output'] = os.path.join(self.temp_output, sup.folder_id())
if settings['alg_manag'] == 'repair':
settings['aligninfo'] = os.path.join(
settings['output'],
'CaseTypeAlignmentResults.csv')
settings['aligntype'] = os.path.join(
settings['output'],
'AlignmentStatistics.csv')
# Output folder creation
if not os.path.exists(settings['output']):
os.makedirs(settings['output'])
os.makedirs(os.path.join(settings['output'], 'sim_data'))
# Create customized event-log for the external tools
xes.XesWriter(self.log_train, settings)
return settings
@timeit(rec_name='MINING_STRUCTURE')
@Decorators.safe_exec
def _mine_structure(self, settings, **kwargs) -> None:
structure_miner = sm.StructureMiner(settings, self.log_train)
structure_miner.execute_pipeline()
if structure_miner.is_safe:
return [structure_miner.bpmn, structure_miner.process_graph]
else:
raise RuntimeError('Mining Structure error')
@timeit(rec_name='EXTRACTING_PARAMS')
@Decorators.safe_exec
def _extract_parameters(self, settings, structure, parameters,
**kwargs) -> None:
bpmn, process_graph = structure
p_extractor = spm.StructureParametersMiner(self.log_train,
bpmn,
process_graph,
settings)
num_inst = len(self.log_valdn.caseid.unique())
# Get minimum date
start_time = (self.log_valdn
.start_timestamp
.min().strftime("%Y-%m-%dT%H:%M:%S.%f+00:00"))
p_extractor.extract_parameters(num_inst,
start_time,
parameters['resource_pool'])
if p_extractor.is_safe:
parameters = {**parameters, **p_extractor.parameters}
# print parameters in xml bimp format
xml.print_parameters(os.path.join(
settings['output'],
settings['file'].split('.')[0]+'.bpmn'),
os.path.join(settings['output'],
settings['file'].split('.')[0]+'.bpmn'),
parameters)
self.log_valdn.rename(columns={'user': 'resource'}, inplace=True)
self.log_valdn['source'] = 'log'
self.log_valdn['run_num'] = 0
self.log_valdn['role'] = 'SYSTEM'
self.log_valdn = self.log_valdn[
~self.log_valdn.task.isin(['Start', 'End'])]
else:
raise RuntimeError('Parameters extraction error')
@timeit(rec_name='SIMULATION_EVAL')
@Decorators.safe_exec
def _simulate(self, settings, data,**kwargs) -> list:
def pbar_async(p, msg):
pbar = tqdm(total=reps, desc=msg)
processed = 0
while not p.ready():
cprocesed = (reps - p._number_left)
if processed < cprocesed:
increment = cprocesed - processed
pbar.update(n=increment)
processed = cprocesed
time.sleep(1)
pbar.update(n=(reps - processed))
p.wait()
pbar.close()
reps = settings['repetitions']
cpu_count = multiprocessing.cpu_count()
w_count = reps if reps <= cpu_count else cpu_count
pool = Pool(processes=w_count)
# Simulate
args = [(settings, rep) for rep in range(reps)]
p = pool.map_async(self.execute_simulator, args)
pbar_async(p, 'simulating:')
# Read simulated logs
p = pool.map_async(self.read_stats, args)
pbar_async(p, 'reading simulated logs:')
# Evaluate
args = [(settings, data, log) for log in p.get()]
if len(self.log_valdn.caseid.unique()) > 1000:
pool.close()
results = [self.evaluate_logs(arg)
for arg in tqdm(args, 'evaluating results:')]
# Save results
sim_values = list(itertools.chain(*results))
else:
p = pool.map_async(self.evaluate_logs, args)
pbar_async(p, 'evaluating results:')
pool.close()
# Save results
sim_values = list(itertools.chain(*p.get()))
return sim_values
@staticmethod
def read_stats(args):
def read(settings, rep):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
m_settings = dict()
m_settings['output'] = settings['output']
m_settings['file'] = settings['file']
column_names = {'resource': 'user'}
m_settings['read_options'] = settings['read_options']
m_settings['read_options']['timeformat'] = '%Y-%m-%d %H:%M:%S.%f'
m_settings['read_options']['column_names'] = column_names
temp = lr.LogReader(os.path.join(
m_settings['output'], 'sim_data',
m_settings['file'].split('.')[0] + '_'+str(rep + 1)+'.csv'),
m_settings['read_options'],
verbose=False)
temp = pd.DataFrame(temp.data)
temp.rename(columns={'user': 'resource'}, inplace=True)
temp['role'] = temp['resource']
temp['source'] = 'simulation'
temp['run_num'] = rep + 1
temp = temp[~temp.task.isin(['Start', 'End'])]
return temp
return read(*args)
@staticmethod
def evaluate_logs(args):
def evaluate(settings, data, sim_log):
"""Reads the simulation results stats
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
rep = (sim_log.iloc[0].run_num)
sim_values = list()
evaluator = sim.SimilarityEvaluator(
data,
sim_log,
settings,
max_cases=1000)
evaluator.measure_distance('dl')
sim_values.append({**{'run_num': rep}, **evaluator.similarity})
return sim_values
return evaluate(*args)
@staticmethod
def execute_simulator(args):
def sim_call(settings, rep):
"""Executes BIMP Simulations.
Args:
settings (dict): Path to jar and file names
rep (int): repetition number
"""
args = ['java', '-jar', settings['bimp_path'],
os.path.join(settings['output'],
settings['file'].split('.')[0]+'.bpmn'),
'-csv',
os.path.join(settings['output'], 'sim_data',
settings['file']
.split('.')[0]+'_'+str(rep+1)+'.csv')]
subprocess.run(args, check=True, stdout=subprocess.PIPE)
sim_call(*args)
@staticmethod
def _save_times(times, settings, temp_output):
if times:
times = [{**{'output': settings['output']}, **times}]
log_file = os.path.join(temp_output, 'execution_times.csv')
if not os.path.exists(log_file):
open(log_file, 'w').close()
if os.path.getsize(log_file) > 0:
sup.create_csv_file(times, log_file, mode='a')
else:
sup.create_csv_file_header(times, log_file)
def _define_response(self, settings, status, sim_values, **kwargs) -> None:
response = dict()
measurements = list()
data = {'alg_manag': settings['alg_manag'],
'gate_management': settings['gate_management'],
'output': settings['output']}
# Miner parms
if settings['mining_alg'] in ['sm1', 'sm3']:
data['epsilon'] = settings['epsilon']
data['eta'] = settings['eta']
elif settings['mining_alg'] == 'sm2':
data['concurrency'] = settings['concurrency']
else:
raise ValueError(settings['mining_alg'])
similarity = 0
# response['params'] = settings
response['output'] = settings['output']
if status == STATUS_OK:
similarity = np.mean([x['sim_val'] for x in sim_values])
loss = (1 - similarity)
response['loss'] = loss
response['status'] = status if loss > 0 else STATUS_FAIL
for sim_val in sim_values:
measurements.append({
**{'similarity': sim_val['sim_val'],
'sim_metric': sim_val['metric'],
'status': response['status']},
**data})
else:
response['status'] = status
measurements.append({**{'similarity': 0,
'sim_metric': 'dl',
'status': response['status']},
**data})
if os.path.getsize(self.file_name) > 0:
sup.create_csv_file(measurements, self.file_name, mode='a')
else:
sup.create_csv_file_header(measurements, self.file_name)
return response
def _split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to peform split-validation.
prefered method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
size : float, validation percentage.
one_ts : bool, Support only one timestamp.
"""
# Split log data
splitter = ls.LogSplitter(self.log.data)
# train, valdn = splitter.split_log('random', size, one_ts)
train, valdn = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(self.log.data)
# Check size and change time splitting method if necesary
if len(valdn) < int(total_events*0.1):
train, valdn = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
valdn = pd.DataFrame(valdn)
train = pd.DataFrame(train)
# If the log is big sample train partition
train = self._sample_log(train)
# Save partitions
self.log_valdn = (valdn.sort_values(key, ascending=True)
.reset_index(drop=True))
self.log_train = copy.deepcopy(self.log)
self.log_train.set_data(train.sort_values(key, ascending=True)
.reset_index(drop=True).to_dict('records'))
@staticmethod
def _sample_log(train):
def sample_size(p_size, c_level, c_interval):
"""
p_size : population size.
c_level : confidence level.
c_interval : confidence interval.
"""
c_level_constant = {50: .67, 68: .99, 90: 1.64, 95: 1.96, 99: 2.57}
Z = 0.0
p = 0.5
e = c_interval/100.0
N = p_size
n_0 = 0.0
n = 0.0
# DEVIATIONS FOR THAT CONFIDENCE LEVEL
Z = c_level_constant[c_level]
# CALC SAMPLE SIZE
n_0 = ((Z**2) * p * (1-p)) / (e**2)
# ADJUST SAMPLE SIZE FOR FINITE POPULATION
n = n_0 / (1 + ((n_0 - 1) / float(N)) )
return int(math.ceil(n)) # THE SAMPLE SIZE
cases = list(train.caseid.unique())
if len(cases) > 1000:
sample_sz = sample_size(len(cases), 95.0, 3.0)
scases = random.sample(cases, sample_sz)
train = train[train.caseid.isin(scases)]
return train
|
|
import retro
import gym
import numpy as np
from DQ import DuelingDQNPrioritizedReplay
from matplotlib import pyplot as plt
import cv2
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
# B is do nothing
# down
# def __init__(self, env):
# super(SonicDiscretizer, self).__init__(env)
# buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
# actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
# ['DOWN', 'B'], ['B']]
# self._actions = []
# for action in actions:
# arr = np.array([False] * 12)
# for button in action:
# arr[buttons.index(button)] = True
# self._actions.append(arr)
# self.action_space = gym.spaces.Discrete(len(self._actions))
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['RIGHT'], ['DOWN', 'B'], ['B'], ['RIGHT', 'DOWN']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a):
return self._actions[a].copy()
# function starter.
if __name__ == "__main__":
file = open("sonicQ_reward.txt", "w")
# load SonicGame
env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
# ActionWrapper for sonicGame
env = SonicDiscretizer(env)
print(env.action_space)
# env = env.unwrapped
# initialize the argument for DeepQ network.
# N_A = 7
N_A = env.action_space.n
N_S = 1120
MEMORY_CAPACITY = 50000
TARGET_REP_ITER = 1000
MAX_EPISODES = 3000
E_GREEDY = 0.95
E_INCREMENT = 0.00001
GAMMA = 0.99
LR = 0.0001
BATCH_SIZE = 32
HIDDEN = [400, 400]
RENDER = True
RL = DuelingDQNPrioritizedReplay(n_actions=N_A, n_features=N_S,
learning_rate=LR, e_greedy=E_GREEDY,
reward_decay=GAMMA, hidden=HIDDEN,
batch_size=BATCH_SIZE, replace_target_iter=TARGET_REP_ITER,
memory_size=MEMORY_CAPACITY, e_greedy_increment=E_INCREMENT, output_graph=True)
total_steps = 0
rewardY = []
iterX = []
for iteration in range(MAX_EPISODES):
ob = env.reset()
ac = env.action_space.sample()
# use cv2 to scale the image down.
inx, iny, inc = env.observation_space.shape
inx = int(inx / 8)
iny = int(iny / 8)
n = 0
fitness_max_current = 0
fitness_current = 0
x_pos_max = 0
x_pos_end = 0
counter = 0
input_array = []
output_array = []
while True:
# if total_steps > MEMORY_CAPACITY:
# env.render()
# ---------- scale the input state ----------
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
for x in ob:
for y in x:
input_array.append(y)
current_state = np.asarray(input_array)
input_array.clear()
# get next action from the current state
action = RL.choose_action(current_state)
next_ob, reward, done, info = env.step(action)
# ---------- scale the output state ----------
next_state = cv2.resize(next_ob, (inx, iny))
next_state = cv2.cvtColor(next_state, cv2.COLOR_BGR2GRAY)
next_state = np.reshape(next_state, (inx, iny))
for x in next_state:
for y in x:
output_array.append(y)
next_state = np.asarray(output_array)
output_array.clear()
x_pos_end = info['screen_x_end']
xpos = info['x']
if xpos > x_pos_max:
fitness_current += 1
x_pos_max = xpos
if xpos == x_pos_end and xpos > 500:
fitness_current += 100000
break
if fitness_current > fitness_max_current:
fitness_max_current = fitness_current
counter = 0
else:
counter += 1
RL.store_transition(current_state, action, fitness_current, next_state)
if total_steps > MEMORY_CAPACITY:
RL.learn()
if done or counter == 250:
# iterX.append(iteration)
# rewardY.append(fitness_current)
print(iteration)
print(fitness_current)
file.write(str(fitness_current) + '\n')
# print("-----------")
break
if iteration == 1000:
file.close();
# if iteration == 200:
# plt.plot(iterX, rewardY)
# plt.suptitle('Sonic Reward Chart')
# plt.ylabel('Sonic Reward')
# plt.xlabel('Iteration')
# plt.show()
ob = next_ob
total_steps += 1
|
|
import numpy as np
from pdb import set_trace
from tinylib import logmass_statistic
from contcnet import MultivariateGaussain
from utmLib.clses import Timer
from utmLib.ml.GBN import GBN
def predict_wrapper(model, test):
pred = []
for item in test:
unknown = np.where( np.isnan(item) )[0]
pred.append( model._predict_(item, unknown) )
return np.array(pred)
def run_exp(dataset, options):
print('Running GBN ....')
np.random.seed(options.seed)
_train, _valid, test_set = dataset
# no need valid, stack valid data into train data
train = np.vstack([_train, _valid])
myclock = Timer()
g = GBN.chowliu_tree(train)
m = GBN(g).fit(train, var_thresh = 4e-3)
myclock.ring('CLGBN fit')
train_logmass = [np.log(m.mass(x)) for x in train]
test_logmass = [np.log(m.mass(x)) for x in options.gt]
print('CLGBN train logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(train_logmass) ))
print('CLGBN test logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(test_logmass) ))
myclock.ring('CLGBN logmass computation')
mg = MultivariateGaussain().fit(train)
myclock.ring('MG fit')
train_logmass = mg.mass(train, logmode = True)
test_logmass = mg.mass(options.gt, logmode = True)
print('MG train logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(train_logmass) ))
print('MG test logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(test_logmass) ))
myclock.ring('MG logmass computation')
# save the trained model (skipped)
result = []
for i, _ in enumerate(options.query_var):
# this two methods predict both query, missing variables
test = test_set[i].copy()
model_result = []
pred = predict_wrapper(m, test)
model_result.append( ('CLGBN', pred ))
pred_mass = [np.log(m.mass(x)) for x in pred]
if isinstance(options.missing[i], float) and options.missing[i] == 0:
print('S{}, CLGBN pred logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format(i, *logmass_statistic(pred_mass) ))
myclock.ring('CLGBN predict')
pred = predict_wrapper(mg, test)
model_result.append( ('MG', pred ) )
pred_mass = mg.mass(pred, logmode = True)
if isinstance(options.missing[i], float) and options.missing[i] == 0:
print('S{}, MG pred logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format(i, *logmass_statistic(pred_mass) ))
myclock.ring('MG predict')
result.append(model_result)
return result
|
|
#! /usr/bin/env python
import argparse
import cv2
import sys
import time
import datetime
import imutils
from collections import deque
import numpy as np
import serial
ser = serial.Serial('COM18', 9600) #initializing serial communication for Zigbee
cam_device = 1
laser = (0,0)
maxlen=10
pts = deque(maxlen=10)
detected = ""
radius= 0
circle_pos = (0,0)
boundary_x=(200,220) #left and down
boundary_y=(400,180) #right and up
laser_position = ""
X = ""
Y = ""
stop_trigger=False
send_trigger=""
border_trigger = False
x_border_trig = False
y_border_trig = False
center = None
class LaserTracker(object):
def __init__(self, cam_width=640, cam_height=480, hue_min=20, hue_max=160,
sat_min=100, sat_max=255, val_min=200, val_max=256,
display_thresholds=True):
"""
* ``cam_width`` x ``cam_height`` -- This should be the size of the
image coming from the camera. Default is 640x480.
HSV color space Threshold values for a RED laser pointer are determined
by:
* ``hue_min``, ``hue_max`` -- Min/Max allowed Hue values
* ``sat_min``, ``sat_max`` -- Min/Max allowed Saturation values
* ``val_min``, ``val_max`` -- Min/Max allowed pixel values
If the dot from the laser pointer doesn't fall within these values, it
will be ignored.
* ``display_thresholds`` -- if True, additional windows will display
values for threshold image channels.
"""
self.cam_width = cam_width
self.cam_height = cam_height
self.hue_min = hue_min
self.hue_max = hue_max
self.sat_min = sat_min
self.sat_max = sat_max
self.val_min = val_min
self.val_max = val_max
self.display_thresholds = display_thresholds
self.capture = None # camera capture device
self.channels = {
'hue': None,
'saturation': None,
'value': None,
'laser': None,
}
def create_and_position_window(self, name, xpos, ypos):
"""Creates a named widow placing it on the screen at (xpos, ypos)."""
# Create a window
cv2.namedWindow(name)
# Resize it to the size of the camera image
cv2.resizeWindow(name, self.cam_width, self.cam_height)
# Move to (xpos,ypos) on the screen
cv2.moveWindow(name, xpos, ypos)
def setup_camera_capture(self, device_num):
"""Perform camera setup for the device number (default device = 0).
Returns a reference to the camera Capture object.
"""
try:
pass
#device = int(device_num)
#sys.stdout.write("Using Camera Device: {0}\n".format(device_num))
except (IndexError, ValueError):
# assume we want the 1st device
device_num = ""
#sys.stderr.write("Invalid Device. Using default device 0\n")
# Try to start capturing frames
self.capture = cv2.VideoCapture(device_num)
if not self.capture.isOpened():
sys.stderr.write("Faled to Open Capture device. Quitting.\n")
sys.exit(1)
# set the wanted image size from the camera
self.capture.set(
cv2.CAP_PROP_FRAME_WIDTH,
self.cam_width
)
self.capture.set(
cv2.CAP_PROP_FRAME_HEIGHT,
self.cam_height
)
return self.capture
def handle_quit(self, delay=10):
"""Quit the program if the user presses "Esc" or "q"."""
key = cv2.waitKey(delay)
c = chr(key & 255)
if c in ['q', 'Q', chr(27)]:
sys.exit(0)
def threshold_image(self, channel):
if channel == "hue":
minimum = self.hue_min
maximum = self.hue_max
elif channel == "saturation":
minimum = self.sat_min
maximum = self.sat_max
elif channel == "value":
minimum = self.val_min
maximum = self.val_max
(t, tmp) = cv2.threshold(
self.channels[channel], # src
maximum, # threshold value
0, # we dont care because of the selected type
cv2.THRESH_TOZERO_INV #t type
)
(t, self.channels[channel]) = cv2.threshold(
tmp, # src
minimum, # threshold value
255, # maxvalue
cv2.THRESH_BINARY # type
)
if channel == 'hue':
# only works for filtering red color because the range for the hue is split
self.channels['hue'] = cv2.bitwise_not(self.channels['hue'])
def detect(self, frame):
global laser
global pts
global detected
global maxlen
global radius
global circle_pos
global laser_position
global boundary_x
global boundary_y
global X, Y
global center
global ser
global stop_trigger, send_trigger
global y_border_trig
global x_border_trig
global border_trigger
#Point pt;
#pt.x = 10;
#pt.y = 8;
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# split the video frame into color channels
h, s, v = cv2.split(hsv_img)
self.channels['hue'] = h
self.channels['saturation'] = s
self.channels['value'] = v
# Threshold ranges of HSV components; storing the results in place
self.threshold_image("hue")
self.threshold_image("saturation")
self.threshold_image("value")
# Perform an AND on HSV components to identify the laser!
self.channels['laser'] = cv2.bitwise_and(
self.channels['hue'],
self.channels['value']
)
self.channels['laser'] = cv2.bitwise_and(
self.channels['saturation'],
self.channels['laser']
)
# Merge the HSV components back together.
hsv_image = cv2.merge([self.channels['hue'],self.channels['saturation'],self.channels['value'],])
(_, cnts, _) = cv2.findContours(self.channels['laser'].copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
# print len(cnts)
c = max(cnts, key=cv2.contourArea)
(circle_pos, radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
#checking the size of laser pointer.
if (M["m10"] != 0) or (M["m00"] != 0) or (M["m01"] != 0): #find the centre of circle only if the laser pointer is big enough
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
laser = center
else: #else if the laser is too small (far away), then don't find the centre of it
laser = circle_pos
#msg = "laser too far"
# only proceed if the radius meets a minimum size
if True:#radius > 5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(laser[0]), int(laser[1])), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
laser = center
detected = "Yes"
cv2.line(frame,(300,230),laser,(0,255,0),5)
#else:
#detected = "No"
#area = ""
#boundary_x=(left,down)
#boundary_y=(right,up)
if laser != None and boundary_x != None:
#stop_trigger = True
if x_border_trig == True or y_border_trig == True:
print 'outside border'
stop_trigger = False
ser.write( X + "-" + Y + "'\n")
elif border_trigger == False or y_border_trig == False:
if stop_trigger == False:
print 'inside border'
ser.write( "---'\n")
stop_trigger = True
if laser[1] > boundary_x[1]:
X = "D"
x_border_trig = True
cv2.line(frame,(300,230),laser,(150,0,255),5)
if laser[1] < boundary_y[1]:
X = "U"
x_border_trig = True
cv2.line(frame,(300,230),laser,(0,0,255),5)
if laser[1] > boundary_y[1] and laser[1] < boundary_x[1]: #inside square
X = "-"
x_border_trig = False
################################################################
if laser[0] > boundary_x[0]:
Y = "-"
y_border_trig = False
if laser[0] > boundary_y[0]:
Y = "R"
y_border_trig = True
cv2.line(frame,(300,230),laser,(0,0,255),5)
if laser[0] < boundary_y[0] and laser[0] < boundary_x[0]:
Y = "L"
y_border_trig = True
# else: #laser pointer outside the square
# send_trigger = "-"
# print "inside square"
# if stop_trigger == True:
# ser.write("S-S\n")
# stop_trigger = False
else:
detected = "No"
#laser_boundary= "-"
laser_position = X + ":" + Y #+ ":" + L + ":" + R
#################################################
# pts.appendleft(center)
# # loop over the set of tracked points
# for i in xrange(1, len(pts)):
# # if either of the tracked points are None, ignore
# # them
# if pts[i - 1] is None or pts[i] is None:
# continue
#
# # otherwise, compute the thickness of the line and
# # draw the connecting lines
# thickness = int(np.sqrt(maxlen / float(i + 1)) * 2.5)
# cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
#
#################################################
cv2.putText(frame, "contour area: {}".format(laser), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "laser: {}".format(detected), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.putText(frame, "Radius: {}".format(radius), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "{}".format(laser_position), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# update the points queue
#left#down #right#up
cv2.rectangle(frame, boundary_x, boundary_y, 150, thickness=3, lineType=3, shift=0)
cv2.imshow('mask', self.channels['laser'])
cv2.imshow('RGB_VideoFrame', frame)
################################################
return hsv_image
def display(self, img, frame):
"""Display the combined image and (optionally) all other image channels
NOTE: default color space in OpenCV is BGR.
"""
#cv2.imshow('RGB_VideoFrame', frame)
#cv2.imshow('LaserPointer', self.channels['laser'])
#if self.display_thresholds:
# cv2.imshow('Thresholded_HSV_Image', img)
# cv2.imshow('Hue', self.channels['hue'])
# cv2.imshow('Saturation', self.channels['saturation'])
# cv2.imshow('Value', self.channels['value'])
def setup_windows(self):
sys.stdout.write("Using OpenCV version: {0}\n".format(cv2.__version__))
# create output windows
self.create_and_position_window('LaserPointer', 0, 0)
self.create_and_position_window('RGB_VideoFrame',
10 + self.cam_width, 0)
if self.display_thresholds:
self.create_and_position_window('Thresholded_HSV_Image', 10, 10)
self.create_and_position_window('Hue', 20, 20)
self.create_and_position_window('Saturation', 30, 30)
self.create_and_position_window('Value', 40, 40)
def run(self):
# Set up window positions
#self.setup_windows()
# Set up the camera capture
global cam_device
self.setup_camera_capture(cam_device)
while True:
# 1. capture the current image
success, frame = self.capture.read()
if not success: # no image captured... end the processing
#sys.stderr.write("Could not read camera frame. Quitting\n")
#sys.exit(1)
#time.sleep(1)
#self.setup_windows()
self.setup_camera_capture(1)
success, frame = self.capture.read()
hsv_image = self.detect(frame)
self.display(hsv_image, frame)
self.handle_quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run the Laser Tracker')
parser.add_argument('-W', '--width',
default=640,
type=int,
help='Camera Width'
)
parser.add_argument('-H', '--height',
default=480,
type=int,
help='Camera Height'
)
parser.add_argument('-u', '--huemin',
default=20,
type=int,
help='Hue Minimum Threshold'
)
parser.add_argument('-U', '--huemax',
default=160,
type=int,
help='Hue Maximum Threshold'
)
parser.add_argument('-s', '--satmin',
default=100,
type=int,
help='Saturation Minimum Threshold'
)
parser.add_argument('-S', '--satmax',
default=255,
type=int,
help='Saturation Maximum Threshold'
)
parser.add_argument('-v', '--valmin',
default=200,
type=int,
help='Value Minimum Threshold'
)
parser.add_argument('-V', '--valmax',
default=255,
type=int,
help='Value Maximum Threshold'
)
parser.add_argument('-d', '--display',
action='store_true',
help='Display Threshold Windows'
)
params = parser.parse_args()
tracker = LaserTracker(
cam_width=params.width,
cam_height=params.height,
hue_min=params.huemin,
hue_max=params.huemax,
sat_min=params.satmin,
sat_max=params.satmax,
val_min=params.valmin,
val_max=params.valmax,
display_thresholds=False
)
tracker.run()
|
|
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import numpy as np
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, GradientBoostingClassifier, \
RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from art.classifiers.scikitlearn import ScikitlearnDecisionTreeClassifier, ScikitlearnExtraTreeClassifier, \
ScikitlearnAdaBoostClassifier, ScikitlearnBaggingClassifier, ScikitlearnExtraTreesClassifier, \
ScikitlearnGradientBoostingClassifier, ScikitlearnRandomForestClassifier, ScikitlearnLogisticRegression, \
ScikitlearnSVC
from art.classifiers import SklearnClassifier
from art.utils import load_dataset
logger = logging.getLogger('testLogger')
np.random.seed(seed=1234)
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris')
class TestScikitlearnDecisionTreeClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = DecisionTreeClassifier()
cls.classifier = ScikitlearnDecisionTreeClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnExtraTreeClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = ExtraTreeClassifier()
cls.classifier = ScikitlearnExtraTreeClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnAdaBoostClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = AdaBoostClassifier()
cls.classifier = ScikitlearnAdaBoostClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [3.07686594e-16, 2.23540978e-02, 9.77645902e-01]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnBaggingClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = BaggingClassifier()
cls.classifier = ScikitlearnBaggingClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnExtraTreesClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = ExtraTreesClassifier()
cls.classifier = ScikitlearnExtraTreesClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnGradientBoostingClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = GradientBoostingClassifier()
cls.classifier = ScikitlearnGradientBoostingClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [1.00105813e-05, 2.07276221e-05, 9.99969262e-01]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnRandomForestClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = RandomForestClassifier()
cls.classifier = ScikitlearnRandomForestClassifier(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[11:12])
y_expected = [1.0, 0.0, 0.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
class TestScikitlearnLogisticRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = LogisticRegression(verbose=0, C=1, solver='newton-cg', dual=False, fit_intercept=True)
cls.classifier = ScikitlearnLogisticRegression(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.07809449, 0.36258262, 0.55932295]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
def test_class_gradient_none_1(self):
grad_predicted = self.classifier.class_gradient(x_test[0:1], label=None)
grad_expected = [[[-1.97934151, 1.36346793, -6.29719639, -2.61386204],
[-0.56940532, -0.71100581, -1.00625587, -0.68006182],
[0.64548057, 0.27053964, 1.5315429, 0.80580771]]]
for i_class in range(3):
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[0, i_class, i_shape], grad_expected[0][i_class][i_shape], 3)
def test_class_gradient_none_2(self):
grad_predicted = self.classifier.class_gradient(x_test[0:2], label=None)
grad_expected = [[[-1.97934151, 1.36346793, -6.29719639, -2.61386204],
[-0.56940532, -0.71100581, -1.00625587, -0.68006182],
[0.64548057, 0.27053964, 1.5315429, 0.80580771]],
[[-1.92147708, 1.3512013, -6.13324356, -2.53924561],
[-0.51154077, -0.72327244, -0.84230322, -0.60544527],
[0.70334512, 0.25827295, 1.69549561, 0.88042426]]]
for i_sample in range(2):
for i_class in range(3):
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[i_sample, i_class, i_shape],
grad_expected[i_sample][i_class][i_shape], 3)
def test_class_gradient_int_1(self):
grad_predicted = self.classifier.class_gradient(x_test[0:1], label=1)
grad_expected = [[[-0.56940532, -0.71100581, -1.00625587, -0.68006182]]]
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[0, 0, i_shape], grad_expected[0][0][i_shape], 3)
def test_class_gradient_int_2(self):
grad_predicted = self.classifier.class_gradient(x_test[0:2], label=1)
grad_expected = [[[-0.56940532, -0.71100581, -1.00625587, -0.68006182]],
[[-0.51154077, -0.72327244, -0.84230322, -0.60544527]]]
for i_sample in range(2):
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[i_sample, 0, i_shape], grad_expected[i_sample][0][i_shape], 3)
def test_class_gradient_list_1(self):
grad_predicted = self.classifier.class_gradient(x_test[0:1], label=[1])
grad_expected = [[[-0.56940532, -0.71100581, -1.00625587, -0.68006182]]]
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[0, 0, i_shape], grad_expected[0][0][i_shape], 3)
def test_class_gradient_list_2(self):
grad_predicted = self.classifier.class_gradient(x_test[0:2], label=[1, 2])
grad_expected = [[[-0.56940532, -0.71100581, -1.00625587, -0.68006182]],
[[0.70334512, 0.25827295, 1.69549561, 0.88042426]]]
for i_sample in range(2):
for i_shape in range(4):
self.assertAlmostEqual(grad_predicted[i_sample, 0, i_shape], grad_expected[i_sample][0][i_shape], 3)
def test_class_gradient_label_wrong_type(self):
with self.assertRaises(TypeError) as context:
_ = self.classifier.class_gradient(x_test[0:2], label=np.asarray([0, 1, 0]))
self.assertIn('Unrecognized type for argument `label` with type <class \'numpy.ndarray\'>',
str(context.exception))
def test_loss_gradient(self):
grad_predicted = self.classifier.loss_gradient(x_test[0:1], y_test[0:1])
grad_expected = [-2.5487468, 0.6524621, -7.3034525, -3.2939239]
for i in range(4):
self.assertAlmostEqual(grad_predicted[0, i], grad_expected[i], 3)
class TestScikitlearnSVCSVC(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = SVC()
cls.classifier = ScikitlearnSVC(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], 3)
def test_loss_gradient(self):
grad_predicted = self.classifier.loss_gradient(x_test[0:1], y_test[0:1])
grad_expected = [-2.7088013, 0.31372938, -7.4563603, -3.5995052]
for i in range(4):
self.assertAlmostEqual(grad_predicted[0, i], grad_expected[i], 3)
class TestScikitlearnSVCLinearSVC(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
sklearn_model = LinearSVC()
cls.classifier = ScikitlearnSVC(model=sklearn_model)
assert (type(cls.classifier) == type(SklearnClassifier(model=sklearn_model)))
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
def test_loss_gradient(self):
grad_predicted = self.classifier.loss_gradient(x_test[0:1], y_test[0:1])
grad_expected = [0.38537693, 0.5659405, -3.600912, -2.338979]
for i in range(4):
self.assertAlmostEqual(grad_predicted[0, i], grad_expected[i], 3)
class TestScikitlearnPipeline(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(seed=1234)
svc = SVC(C=1.0, kernel='rbf')
pca = PCA()
sklearn_model = Pipeline(steps=[('pca', pca), ('svc', svc)])
cls.classifier = SklearnClassifier(model=sklearn_model)
cls.classifier.fit(x=x_train, y=y_train)
def test_predict(self):
y_predicted = self.classifier.predict(x_test[0:1])
y_expected = [0.0, 0.0, 1.0]
for i in range(3):
self.assertAlmostEqual(y_predicted[0, i], y_expected[i], places=4)
def test_input_shape(self):
self.assertEqual(self.classifier.input_shape, (4,))
|
|
import numpy as np
def editDistance(s1, s2):
m=len(s1)+1
n=len(s2)+1
tbl = np.empty([m,n])
for i in xrange(m): tbl[i,0]=i
for j in xrange(n): tbl[0,j]=j
for i in xrange(1, m):
for j in xrange(1, n):
cost = 0 if s1[i-1] == s2[j-1] else 1
tbl[i,j] = min(tbl[i, j-1]+1, tbl[i-1, j]+1, tbl[i-1, j-1]+cost)
return tbl[i,j]
#test
if __name__ == "__main__":
d = -1
for i in xrange(1000000):
d=editDistance("AAAATTTTCCCCGGGGAAAANTTTTCCCCGGGG", "AAAATTTTCCCCGGGGAAAAMTTTTCCCCGGGG")
print d
|
|
# imports
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import Parameter
import numpy as np
from numpy.linalg import norm
import scipy.io as sio
import pickle
usecuda = True
usecuda = usecuda and torch.cuda.is_available()
dtype = torch.FloatTensor
if usecuda:
dtype = torch.cuda.FloatTensor
class BidirectionalTrustModel(torch.nn.Module):
# Init Method (define parameters)
def __init__(
self,
modelname,
inpsize,
obsseqlen,
taskrepsize,
capabilityRepresentationSize
):
super(BidirectionalTrustModel, self).__init__()
self.modelname = modelname # modelname
self.capabilityRepresentationSize = capabilityRepresentationSize # how many capabilities are represented
self.capabilityEdges = Variable(dtype(np.ones((self.capabilityRepresentationSize,2)) * [0.0, 1.0]), requires_grad=False) # initialized as zeros and ones
self.discretizationBins = 10 # how many bins in each dimension
self.updateProbabilityDistribution() # probability distribution tensor
self.betas = Parameter(dtype(20.0 * np.random.rand( self.capabilityRepresentationSize ))) # parameters to be optimized
self.zetas = Parameter(dtype(np.random.rand( self.capabilityRepresentationSize ))) # parameters to be optimized
# self.zetas = dtype(np.ones( self.capabilityRepresentationSize )) # or only ones
self.optimizedCapabilitiesMatrix = Parameter(dtype(np.random.rand(1, 12))) # parameters to be optimized
self.counter = 0
# Forward Method (model process)
def forward(self, inptasksobs, inptasksperf, inptaskspred, num_obs_tasks, tasksobsids, taskspredids, \
obs_task_sens_cap_seq, pred_task_sens_cap, obs_task_proc_cap_seq, pred_task_proc_cap):
# parameters
tasksPerObservationSequence = inptasksobs.shape[0] # 3 for our dataset // 2 for Soh's
observationSequencesNumber = inptasksobs.shape[1] # 49 or 63 or N for our dataset // 192 or 186 for Soh's
trustPredictionsNumber = 1 # adequate to the dataset format... // (both)
predictedTrust = Variable(dtype(np.zeros((observationSequencesNumber, trustPredictionsNumber))), requires_grad=False)
# (49, 1) for our dataset // (both)
# for each (of the 49 * 3) observations sequence prior to trust predictions
for i in range(observationSequencesNumber):
# re-initialize the capability edges
self.capabilityEdges = Variable(dtype(np.ones((self.capabilityRepresentationSize,2)) * [0.0, 1.0]), requires_grad=False)
self.updateProbabilityDistribution()
## Capabilities estimation loop
# checks each task on the observation sequence
for j in range(tasksPerObservationSequence):
self.capabilityUpdate(inptasksobs[j,i,:], inptasksperf[j,i,:], tasksobsids[j,i,0], \
obs_task_sens_cap_seq[j, i], obs_task_proc_cap_seq[j, i])
# difficulties_obs[j, i, 0])
## Trust computation loop
# computes trust for each input task... But in our dataset we consider only 1
for j in range(trustPredictionsNumber):
predictedTrust[i, j] = self.computeTrust(taskspredids[i, 0], \
pred_task_sens_cap[i, 0], pred_task_proc_cap[i, 0])
# difficulties_pred[i, 0])
trust = predictedTrust
return dtype(trust)
# Auxiliary Methods
def capabilityUpdate(self, observedTask, observedTaskPerformance, observedTaskID,
observedTaskSensingCap, observedTaskProcessingCap):
observedCapability = dtype((observedTaskSensingCap, observedTaskProcessingCap))
taskIsNonZero, taskSuccess = self.getSuccessOrFailBools(observedTaskPerformance)
capabilityEdgesChanged = False
if taskIsNonZero:
if taskSuccess:
for i in range(self.capabilityRepresentationSize):
if observedCapability[i] > self.capabilityEdges[i, 1]:
self.capabilityEdges[i, 1] = observedCapability[i]
capabilityEdgesChanged = True
elif observedCapability[i] > self.capabilityEdges[i, 0]:
self.capabilityEdges[i, 0] = observedCapability[i]
capabilityEdgesChanged = True
else:
for i in range(self.capabilityRepresentationSize):
if observedCapability[i] < self.capabilityEdges[i, 0]:
self.capabilityEdges[i, 0] = observedCapability[i]
capabilityEdgesChanged = True
elif observedCapability[i] < self.capabilityEdges[i, 1]:
self.capabilityEdges[i, 1] = observedCapability[i]
capabilityEdgesChanged = True
for i in range(self.capabilityRepresentationSize):
if self.capabilityEdges[i, 0] == self.capabilityEdges[i, 1]:
if self.capabilityEdges[i, 1] == 0.0:
self.capabilityEdges[i, 1] = 1 / self.discretizationBins
else:
self.capabilityEdges[i, 0] = self.capabilityEdges[i, 1] - 1 / self.discretizationBins
if capabilityEdgesChanged == True:
self.updateProbabilityDistribution()
return
def getSuccessOrFailBools(self, observedTaskPerformance):
if not(observedTaskPerformance[0]) and not(observedTaskPerformance[1]):
taskIsNonZero = False
taskSuccess = False
elif not(observedTaskPerformance[0]) and observedTaskPerformance[1]:
taskIsNonZero = True
taskSuccess = True
elif observedTaskPerformance[0] and not(observedTaskPerformance[1]):
taskIsNonZero = True
taskSuccess = False
else:
print("Error: performance indicators = [1, 1]")
raise SystemExit(0)
return taskIsNonZero, taskSuccess
def sigm(self, x):
return 1 / (1 + torch.exp(-x))
def computeTrust(self, inptaskspredID, predictionTaskSensingCap, predictionTaskProcessingCap):
requiredCapability = dtype((predictionTaskSensingCap, predictionTaskProcessingCap))
trust = 0.0
if self.capabilityRepresentationSize == 1:
for j in range(self.discretizationBins):
stepInDim_j = (j + 0.5) / self.discretizationBins
trust = trust + self.trustGivenCapability([stepInDim_j], requiredCapability) * self.probabilityDistribution[j]
elif self.capabilityRepresentationSize == 2:
for k in range(self.discretizationBins):
stepInDim_k = (k + 0.5) / self.discretizationBins
for j in range(self.discretizationBins):
stepInDim_j = (j + 0.5) / self.discretizationBins
trust = trust + self.trustGivenCapability([stepInDim_j, stepInDim_k],
requiredCapability) * self.probabilityDistribution[j, k]
elif self.capabilityRepresentationSize == 3:
for l in range(self.discretizationBins):
stepInDim_l = (l + 0.5) / self.discretizationBins
for k in range(self.discretizationBins):
stepInDim_k = (k + 0.5) / self.discretizationBins
for j in range(self.discretizationBins):
stepInDim_j = (j + 0.5) / self.discretizationBins
trust = trust + self.trustGivenCapability([stepInDim_j, stepInDim_k, stepInDim_l],
requiredCapability) * self.probabilityDistribution[j, k, l]
# print("capEdges: ", self.capabilityEdges)
# print("reqCap: ", requiredCapability)
# print("Trust: ", trust)
# print("------")
return trust
def trustGivenCapability(self, capability, requiredCapability):
trust = 1.0
for i in range(self.capabilityRepresentationSize):
p_i = self.betas[i] * (requiredCapability[i] - capability[i])
d_i = ( 1 + torch.exp(p_i) ) ** ( - self.zetas[i] * self.zetas[i] )
trust = trust * d_i
return trust
def updateProbabilityDistribution(self):
# Tuple to start the distribution tensor
probabilityStarter = tuple(self.discretizationBins * np.ones((self.capabilityRepresentationSize), dtype = int))
# Distribution tensors
probabilityDistribution = torch.ones(probabilityStarter, dtype = torch.int8)
# zeroProbability = torch.ones(probabilityStarter, dtype = torch.int8)
# hardcoded solution: for 1 dim
if self.capabilityRepresentationSize == 1:
for j in range(self.discretizationBins):
step = (j + 0.5) / self.discretizationBins
if step < self.capabilityEdges[0, 0]:
probabilityDistribution[j] = 0
if step > self.capabilityEdges[0, 1]:
probabilityDistribution[j] = 0
probabilityDistribution = probabilityDistribution.float()
if usecuda:
probabilityDistribution = probabilityDistribution.cuda()
probabilityDistribution = dtype(probabilityDistribution)
probabilityDistribution = probabilityDistribution / torch.sum(probabilityDistribution)
# hardcoded solution: for 2 dim
if self.capabilityRepresentationSize == 2:
for j in range(self.discretizationBins):
step = (j + 0.5) / self.discretizationBins
if step < self.capabilityEdges[0, 0]:
probabilityDistribution[j,:] = 0
if step > self.capabilityEdges[0, 1]:
probabilityDistribution[j,:] = 0
if step < self.capabilityEdges[1, 0]:
probabilityDistribution[:,j] = 0
if step > self.capabilityEdges[1, 1]:
probabilityDistribution[:,j] = 0
probabilityDistribution = probabilityDistribution.float()
if usecuda:
probabilityDistribution = probabilityDistribution.cuda()
probabilityDistribution = dtype(probabilityDistribution)
probabilityDistribution = probabilityDistribution / torch.sum(probabilityDistribution)
self.probabilityDistribution = probabilityDistribution
return
|
|
import math
import numpy
import random
import types
from itertools import izip, tee, imap
from operator import itemgetter, add
from cStringIO import StringIO
import cPickle as pickle
import tensorflow as tf
class Graph(object):
def transform_batch(self, data):
raise NotImplemented
def train_and_loss_for_batch(self, data):
return self.train, self.loss
class DistributedGraph(object):
def __init__(self, sc, partitions, model):
self.sc = sc
self.partitions = partitions
self.params = {n:sc.broadcast(v) for n, v in self.get_initial_params(model)}
@staticmethod
def get_initial_params(builder):
with tf.Graph().as_default(), tf.Session() as session:
builder()
session.run(tf.initialize_all_variables())
return [(v.name, session.run(v)) for v in tf.all_variables()]
def initialize_all_variables(self, session):
for v in tf.all_variables():
session.run(v.assign(self.params[v.name].value))
@staticmethod
def shuffle_and_batch(dataset, partitions, batch_size, fraction=1.0):
return dataset\
.sample(False, fraction)\
.zipWithIndex()\
.map(lambda (k,v): (v % partitions, k))\
.partitionBy(partitions)\
.map(lambda (k,v): v)\
.mapPartitions(lambda items: izip(*([iter(items)] * batch_size)), preservesPartitioning=True)
@staticmethod
def get_worker_op(graph_init, params, op):
def worker_op(partition):
with tf.Graph().as_default(), tf.Session() as s:
g = graph_init()
for v in tf.all_variables():
s.run(v.assign(params[v.name].value))
for r in op(s, g, partition):
yield r
return worker_op
def evaluate(self, dataset, graph_init):
def eval_partition(s, g, items):
for b in items:
yield s.run(g.evaluate, g.transform_batch(b))
return dataset\
.mapPartitions(self.get_worker_op(graph_init, self.params, eval_partition))\
.flatMap(lambda rs: rs)\
.mean()
def train(self, dataset, graph_init, worker_epochs = 2):
# todo: add option to train until a timeout elapses instead of fixed number of epochs
# this should reduce latency from variance in batch runtime between workers
num_partitions = self.partitions
def train_on_partition(s, g, batches):
loss = 0.
# todo: only transform batch once
batches = list(batches)
num_batches = len(batches) * worker_epochs
for i in xrange(worker_epochs):
for batch in batches:
loss += s.run(g.train_and_loss_for_batch(batch), g.transform_batch(batch))[1]
if (i + 1) != worker_epochs:
random.shuffle(batches)
for v in tf.trainable_variables():
yield v.name, s.run(v/num_partitions)
yield '_loss', loss / num_batches
results = dict(dataset\
.mapPartitions(self.get_worker_op(graph_init, self.params, train_on_partition))\
.reduceByKey(add)\
.collect())
total_loss = results.pop('_loss')
for n, v in results.iteritems():
self.params[n].unpersist()
self.params[n] = self.sc.broadcast(v)
return total_loss / self.partitions
|
|
'''
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
import sys
import numpy as np
import os.path
import matplotlib.pyplot as plt
def printUsage(errmsg = ""):
if len(errmsg): print("Error: " + errmsg)
print()
print("Usage: epnp_eval_plots.py </path/to/eval/files>")
print()
sys.exit()
return
def parseArgs():
args = sys.argv
if len(args)==1 : printUsage()
evalPath = args[1]
return evalPath
# list files of given extension in a directory
def listFiles(inpath,extension):
allfiles = os.listdir(inpath)
outfiles = list()
for s in allfiles:
filename,ext = os.path.splitext(s)
if ext==extension:
outfiles.append(filename)
return outfiles
class ParseError(Exception):
def __init__(self, msg):
self.msg = msg
pass
def nextValidLine(file): # read line but skip empty lines and lines starting by #
while 1:
line = file.readline()
if not line: return (None,[])
line = line.rstrip()
if len(line)>0 and line[0]!='#':
words = line.split()
if len(words):
return (line,words)
def asint(txt, name):
try: x = int(txt)
except: raise ParseError("unable to parse %s" % name)
return(x)
def asfloat(txt, name):
try: x = float(txt)
except: raise ParseError("unable to parse %s" % name)
return(x)
def parseEvalFile(filename):
file = open(filename,"r")
# parse file signature and benchmark type
line,words = nextValidLine(file)
if not line or len(words)!=2: raise ParseError("not an epnp benchmark file")
if words[0]!="epnp_benchmark": raise ParseError("not an epnp benchmark file")
if words[1]!="accuracy": raise ParseError("unsupported benchmark type")
line,words = nextValidLine(file)
if not line or len(words)==0: raise ParseError("unable to parse benchmark parameters")
# parse benchmark parameters (they are in a single line)
data = {}
while len(words):
data[words[0]] = asfloat(words[1],words[0])
del words[0:2]
# parse data matrix as floats
mat = []
while 1:
(line,words) = nextValidLine(file)
if not line: break
for index, value in enumerate(words):
try: words[index] = float(value)
except: raise ParseError("unable to parse a line of values")
mat.append(words)
mat = np.asarray(mat)
data['matrix'] = mat
if data['matrix'].shape[1]!=14: raise ParseError("unexpected number of values in matrix")
return data
def createTitle(data):
title = ""
if 'near' in data:
title = "Non-planar, depth in [%.1f,%.1f] m\n%dx%d px, f=%.1f px, %d runs" \
% (data['near'],data['far'],data['width'],data['height'],data['focal'],data['runs'])
elif 'plane_depth' in data:
title = "Plane at %.1f meters, angle=%.1f deg, non-planarity=%.1f cm\n%dx%d px, f=%.1f px, %d runs" \
% (data['plane_depth'],data['plane_angle'],100*data['plane_deviation'],data['width'],data['height'],data['focal'],data['runs'])
return title
def generateMeanErrorVersusNoisePlots(filename, data):
mat = data["matrix"]
title = createTitle(data)
xlabel = 'Image noise $\sigma$ (pixels)'
selected_points = [6, 8, 12, 20, 50]
styles = ['k*-', 'ro-', 'g+-','m^-','bs-', 'cx-']
fig = plt.figure(figsize=(1,1))
plt.subplot(121)
ax = fig.gca()
for i in range(0,len(selected_points)):
idx = mat[:,0]==selected_points[i]
if len(idx)==0: continue
noise_sigma = mat[idx,1]
rot_error_mean = mat[idx,9]
label = "%d points" % selected_points[i]
h = ax.plot(noise_sigma,rot_error_mean, styles[i], label=label)
ax.legend(loc=2)
ax.grid(color=[0.9,0.9,0.9], linestyle='-', linewidth=1,zorder=0)
ax.set_xlabel(xlabel)
ax.set_ylabel('Mean Rotation Error (degrees)')
ax.set_ylim([0,3.1])
ax.set_title(title,fontsize=10)
plt.subplot(122)
ax = fig.gca()
for i in range(0,len(selected_points)):
idx = mat[:,0]==selected_points[i]
noise_sigma = mat[idx,1]
pos_error_mean = mat[idx,11]
label = "%d points" % selected_points[i]
ax.plot(noise_sigma,pos_error_mean, styles[i], label=label)
ax.legend(loc=2)
ax.grid(color=[0.9,0.9,0.9], linestyle='-', linewidth=1,zorder=0)
ax.set_xlabel(xlabel)
ax.set_ylabel('Mean Position Error (meters)')
ax.set_ylim([0,0.3])
ax.set_title(title,fontsize=10)
fig.set_dpi(100)
fig.set_size_inches(12,4)
out_file = filename + ".png"
print("Writing", out_file)
plt.savefig(out_file, bbox_inches='tight')
#plt.show()
def generateMeanErrorVersusPointsPlots(filename, data):
mat = data["matrix"]
title = createTitle(data)
noise_sigmas = np.unique(mat[:,1])
noise_sigmas = noise_sigmas[noise_sigmas>0]
#print("Noise: ", list(noise_sigmas))
styles = ['cx-', 'bs-', 'm^-', 'g+-', 'ro-', 'k*-', 'cx:', 'bs:', 'm^:', 'g+:', 'ro:', 'k*:']
fig = plt.figure(figsize=(1,1))
plt.subplot(121)
ax = fig.gca()
for i in range(0,len(noise_sigmas)):
idx = mat[:,1]==noise_sigmas[i]
if len(idx)==0: continue
num_points = mat[idx,0]
rot_error_mean = mat[idx,9]
label = "$\sigma$=%.1f px" % noise_sigmas[i]
h = ax.plot(num_points,rot_error_mean, styles[i], label=label)
ax.legend(loc=1)
ax.grid(color=[0.9,0.9,0.9], linestyle='-', linewidth=1, zorder=0)
ax.set_xlabel('#points')
ax.set_xscale('log')
ax.set_ylabel('Mean Rotation Error (degrees)')
ax.set_ylim([0,3.1])
ax.set_title(title,fontsize=10)
plt.subplot(122)
ax = fig.gca()
for i in range(0,len(noise_sigmas)):
idx = mat[:,1]==noise_sigmas[i]
if len(idx)==0: continue
num_points = mat[idx,0]
pos_error_mean = mat[idx,11]
label = "$\sigma$=%.1f px" % noise_sigmas[i]
h = ax.plot(num_points,pos_error_mean, styles[i], label=label)
ax.legend(loc=1)
ax.grid(color=[0.9,0.9,0.9], linestyle='-', linewidth=1, zorder=0)
ax.set_xlabel('#points')
ax.set_xscale('log')
ax.set_ylabel('Mean Position Error (meters)')
ax.set_ylim([0,0.3])
ax.set_title(title,fontsize=10)
fig.set_dpi(100)
fig.set_size_inches(12,4)
out_file = filename + ".png"
print("Writing", out_file)
plt.savefig(out_file, bbox_inches='tight')
# plt.show()
if __name__ == "__main__":
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
inpath = parseArgs()
filenames = [\
'epnp-eval-f700-nonplanar.txt',\
'epnp-eval-f700-planar-5m-fronto.txt',\
'epnp-eval-f700-planar-5m-slanted.txt',\
'epnp-eval-f700-quasiplanar-5m-slanted.txt',\
]
#ext = '.eval'
#files = listFiles(inpath,ext)
#print("%d '%s' files found in directory inpath" % (len(files), ext))
for filename in filenames:
filename = inpath + os.sep + filename
if not os.path.isfile(filename):
print("File not found:",filename)
continue
filename,ext = os.path.splitext(filename)
# parse benchmark data file
print("Parsing", filename + ext)
try: data = parseEvalFile(filename+ext)
except ParseError as err:
print("Parsing error %s: %s" % (filename,err.msg))
sys.exit()
# generate plots
generateMeanErrorVersusNoisePlots(filename + '-erravg-noise', data)
generateMeanErrorVersusPointsPlots(filename + '-erravg-numpts', data)
#break
|
|
import numpy as np
def h_fpp(H):
"""Evaluate the significance of an H score.
The H test is an extension of the Z_m^2 or Rayleigh tests for
uniformity on the circle. These tests estimate the Fourier coefficients
of the distribution and compare them with the values predicted for
a uniform distribution, but they require the user to specify the number
of harmonics to use. The H test automatically selects the number of
harmonics to use based on the data.
Arguments
---------
H : float
The H value to evaluate
Returns
-------
fpp : float
The probability of an H score this large arising from sampling a
uniform distribution.
Reference
---------
de Jager, O. C., Swanepoel, J. W. H, and Raubenheimer, B. C., "A
powerful test for weak periodic signals of unknown light curve shape
in sparse data", Astron. Astrophys. 221, 180-190, 1989.
"""
# These values are obtained by fitting to simulations.
a = 0.9999755
b = 0.39802
c = 1.210597
d = 0.45901
e = 0.0022900
if H<=23:
return a*np.exp(-b*H)
elif H<50:
return c*np.exp(-d*H+e*H**2)
else:
return 4e-8
# This comes up too often to raise an exception
raise ValueError("H=%g>50 not supported; false positive probability less than 4*10**(-8)" % H)
def h_test(events):
"""Apply the H test for uniformity on [0,1).
The H test is an extension of the Z_m^2 or Rayleigh tests for
uniformity on the circle. These tests estimate the Fourier coefficients
of the distribution and compare them with the values predicted for
a uniform distribution, but they require the user to specify the number
of harmonics to use. The H test automatically selects the number of
harmonics to use based on the data. The returned statistic, H, has mean
and standard deviation approximately 2.51, but its significance should
be evaluated with the routine h_fpp. This is done automatically in this
routine.
Arguments
---------
events : array-like
events should consist of an array of values to be interpreted as
values modulo 1. These events will be tested for statistically
significant deviations from uniformity.
Returns
-------
H : float
The raw score. Larger numbers indicate more non-uniformity.
M : int
The number of harmonics that give the most significant deviation
from uniformity.
fpp : float
The probability of an H score this large arising from sampling a
uniform distribution.
Reference
---------
de Jager, O. C., Swanepoel, J. W. H, and Raubenheimer, B. C., "A
powerful test for weak periodic signals of unknown light curve shape
in sparse data", Astron. Astrophys. 221, 180-190, 1989.
"""
max_harmonic = 20
ev = np.reshape(events, (-1,))
cs = np.sum(np.exp(2.j*np.pi*np.arange(1,max_harmonic+1)*ev[:,None]),axis=0)/len(ev)
Zm2 = 2*len(ev)*np.cumsum(np.abs(cs)**2)
Hcand = (Zm2 - 4*np.arange(1,max_harmonic+1) + 4)
M = np.argmax(Hcand)+1
H = Hcand[M-1]
fpp = h_fpp(H)
return (H, M, fpp)
|
|
import numpy as np
from numpy import nan
import pytest
from pandas._libs import groupby, lib, reduction
from pandas.core.dtypes.common import ensure_int64
from pandas import Index, isna
from pandas.core.groupby.ops import generate_bins_generic
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
def test_series_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_series_bin_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed="left")
assert (bins == np.array([2, 5, 6])).all()
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6, 6])).all()
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6])).all()
msg = "Invalid length for values or for binner"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [], "right")
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values[:0], binner, "right")
msg = "Values falls before first bin"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [4], "right")
msg = "Values falls after last bin"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [-3, -1], "right")
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = getattr(groupby, "group_ohlc_%s" % dtype)
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = nan
func(out, counts, obj[:, None], labels)
expected[0] = nan
assert_almost_equal(out, expected)
_check("float32")
_check("float64")
class TestMoments:
pass
class TestReducer:
def test_int_index(self):
from pandas.core.series import Series
arr = np.random.randn(100, 4)
result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
result = reduction.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(100))
result = reduction.reduce(arr, np.sum, dummy=dummy, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(4))
result = reduction.reduce(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
expected = arr.sum(1)
assert_almost_equal(result, expected)
result = reduction.reduce(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
assert_almost_equal(result, expected)
|
|
import sys
import os
import requests
import re
import urllib.request
from scipy.io import savemat, loadmat
from tqdm import tqdm
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QWidget, QPushButton, QProgressBar, QVBoxLayout, QLabel, QApplication
import time
import numpy as np
import re
from requests_html import HTMLSession
class Auth:
def __init__(self):
super(Auth,self).__init()
def bearer(self):
url = "https://hellofresh.de"
session = HTMLSession()
response = session.get(url)
regex = r"\"accessToken\":\"([a-zA-Z_0-9.-]*)\""
return(re.findall(regex, response.html.html, re.MULTILINE))
class TQDM:
def __init__(self,r):
#super(TQDM,self).__init(r)
self.tq=tqdm(r)
def remaining(self):
#elapsed = self.tq.format_dict["elapsed"]
rate = self.tq.format_dict["rate"]
remaining = (self.tq.total - self.tq.n) / rate if rate and self.tq.total else 0 # Seconds*
remaining = time.strftime('%H:%M:%S', time.gmtime(remaining))
return remaining
class Thread(QThread):
_signal = pyqtSignal(int)
_state_msg = pyqtSignal('QString')
_max = pyqtSignal(int)
def __init__(self):
super(Thread, self).__init__()
self._isRunning = True
def __del__(self):
self.wait()
def stop(self):
self._isRunning = False
def run(self):
# run thread while flag is True
while self._isRunning:
# create new session
session = requests.Session()
# HelloFresh URL
url = "https://gw.hellofresh.com/api/recipes/search?"
# maximum items to return are 250
limit = 250
# take only recipes in consideration if there ingredient amount is more than 3
minIngredientAmount = 3
# Should also are the images saved?
saveImg=True
buildCorMatrix=True
LOCALE=["de-DE","en-US"]
COUNTRY=["de","us"]
locale=LOCALE[0]
country=COUNTRY[0]
payload={
"offset": "0",
"limit": str(limit),
"locale": locale,
"country": country
}
self._state_msg.emit("Get fresh bearer auth token...")
bToken= Auth.bearer(self)
# apply random bearer
headers = {
'Authorization': 'Bearer '+bToken[0]
}
# find out how many recipes are present (only german DE market)
response = requests.get(url, headers=headers, params=payload).json()
total = response['total']
iterations = int(response['total']/limit)
residuals = response['total']%limit
print(">> Found a total of",total,"HelloFresh recipes")
# Because max 250 recipes can be loaded,
# a iteration about all recipes should be started
if residuals>0:
iterations=iterations+1
recipes=[]
offset = 0
_tqdmObj = TQDM(range(iterations))
for i in _tqdmObj.tq:
if not self._isRunning:
break
#self.label.setText("Download "+str(i)+" of "+str(iterations)+"recipe from server")
self._max.emit(iterations)
self._signal.emit(i)
self._state_msg.emit("Step 1/8\n\nDownload "+str(i)+" of "+str(iterations)+" packages server\n\nRemaining Time "+_tqdmObj.remaining())
payload={
"offset": str(offset),
"limit": str(limit),
"locale": locale,
"country": country
}
offset += 250
recipes.extend(session.request("GET", url, headers=headers, params=payload).json()['items'])
# Drop recipes when
# - ingredients list is empty
# - thermomix classified
# - ingredients amount less than "minIngredientAmount"
recipesFiltred=[]
recipesOutsourced=[]
for i in range(total):
if not self._isRunning:
break
try:
if recipes[i]['name']=="Schweinefilet in Pflaumensoße":
print("stop")
if(any(recipes[i]['ingredients']) and recipes[i]['label']['handle']!='thermomix' and re.match(".*thermomix",recipes[i]['comment'])==None and len(recipes[i]['ingredients'])>minIngredientAmount):
recipesFiltred.append(recipes[i])
else:
recipesOutsourced.append(recipes[i])
except:
if(recipes[i]['label']==None and len(recipes[i]['ingredients'])>minIngredientAmount):
recipesFiltred.append(recipes[i])
else:
recipesOutsourced.append(recipes[i])
print(">> Found ",str(len(recipesFiltred)),"relevant recipes")
# Build final dict container
print(">> Build final dict container")
recipesFinal=[]
ingredients=[]
steps=[]
tags=[]
label=[]
headline=[]
for i in range(len(recipesFiltred)):
for j in range(len(recipesFiltred[i]['ingredients'])):
ingredient=str(recipesFiltred[i]['ingredients'][j]['name'])
amount=str(recipesFiltred[i]['yields'][0]['ingredients'][j]['amount'])
unit=str(recipesFiltred[i]['yields'][0]['ingredients'][j]['unit'])
ingredients.append([amount,unit,ingredient])
for k in range(len(recipesFiltred[i]['steps'])):
steps.append(recipesFiltred[i]['steps'][k]['instructionsMarkdown'])
for item in recipesFiltred[i]['tags']:
tags.append(item['name'])
try:
label.append(recipesFiltred[i]['label']['text'])
except:
label.append("")
if recipesFiltred[i]['headline'] != None:
try:
headline.append(recipesFiltred[i]['headline'])
except:
headline.append("")
else:
headline.append("")
recipesFinal.append([recipesFiltred[i]['name'],ingredients,recipesFiltred[i]['websiteUrl'],steps,"https://img.hellofresh.com/c_fit,f_auto,fl_lossy,h_1100,q_auto,w_2600/hellofresh_s3"+recipesFiltred[i]['imagePath'],tags,label,headline])
ingredients=[]
steps=[]
tags=[]
label=[]
headline=[]
recipes=[]
recipes=recipesFinal
if country=="de":
ite=2
else:
ite=1
# Loop for simplify the recipes
# (1) standard
# (2) simple
for opt in range(ite):
if not self._isRunning:
break
if opt==0:
print(">> Start standard recipe session")
self._max.emit(ite)
self._signal.emit(0)
self._state_msg.emit("Start standard recipe session")
else:
print(">> Start simplified recipe session")
print(">> Create ingredient list")
Ingredients = []
tmp=[]
matFile=loadmat('db/de-DE-s/dict.mat')['HelloIngredients'][1:]
_tqdmObj = TQDM(range(len(recipes)))
for i in _tqdmObj.tq:
self._max.emit(len(recipes))
self._signal.emit(i)
if opt:
step="6"
else:
step="2"
self._state_msg.emit("Step "+step+"/8\n\nRearrange the recipes...\n\nRemaining Time "+_tqdmObj.remaining())
if not self._isRunning:
break
for j in range(len(recipes[i][1])):
# Should be simplified?
if(opt):
for k in range(len(matFile)):
if matFile[k][0][0].strip()==recipes[i][1][j][2]:
if matFile[k][1]:
recipes[i][1][j][2]=matFile[k][1][0].strip()
tmp.append(recipes[i][1][j][2])
else:
tmp.append(recipes[i][1][j][2])
Ingredients.append(tmp)
tmp=[]
# Save image urls to file
if(saveImg and not opt):
print(">> Load and save recipe images")
_tqdmObj = TQDM(range(len(recipes)))
for i in _tqdmObj.tq:
if not self._isRunning:
break
self._max.emit(len(recipes))
self._signal.emit(i)
self._state_msg.emit("Step 3/8\n\nDownload recipe image "+str(i)+" of "+str(len(recipes))+"\n\nRemaining Time "+_tqdmObj.remaining())
try:
urllib.request.urlretrieve(recipes[i][4], "img/"+locale+"/"+str(i)+".jpg")
except:
urllib.request.urlretrieve("https://help.ifttt.com/hc/article_attachments/360041394694/no_image_card.png", "img/"+locale+"/"+str(i)+".jpg")
# Build ingredient recipes correlation matrix
if(buildCorMatrix):
print(">> Build correlation matrix")
s_CrlFct = np.zeros([len(Ingredients),len(Ingredients)])
_tqdmObj = TQDM(range(len(Ingredients)))
for j in _tqdmObj.tq:
if not self._isRunning:
break
self._max.emit(len(Ingredients))
self._signal.emit(j)
if opt:
step="7"
else:
step="4"
self._state_msg.emit("Step "+step+"/8\n\nBuild correlation matrix...\n\nRemaining Time "+_tqdmObj.remaining())
for k in range(len(Ingredients)):
ar = Ingredients[j] + Ingredients[k]
s_NotUnique = len(ar)
s_Unique = len(np.unique(ar))
s_CrlFct[j][k] = abs(s_Unique - s_NotUnique)
# Build unique ingredient list
print(">> Create final unique ingredient list")
ingredientListUniqueType=[]
_tqdmObj = TQDM(range(len(Ingredients)))
for j in _tqdmObj.tq:
self._max.emit(len(Ingredients))
self._signal.emit(j)
if opt:
step="8"
else:
step="5"
self._state_msg.emit("Step "+step+"/8\n\nSave all results...")
for k in range(len(Ingredients[j])):
ingredientListUniqueType.append(Ingredients[j][k])
ingredientListUniqueType=np.unique(ingredientListUniqueType)
if self._isRunning:
if(opt):
# Path to simplified data
path="db/"+locale+"-s/"
else:
# Standard path
path="db/"+locale+"/"
if(buildCorMatrix):
# Save the correlation matrix to *.mat file
savemat(path+"cor_fac.mat", {"CorrelationFactors":s_CrlFct})
# Save the ingredient list to *.mat file
savemat(path+"ingredients.mat", {"ingredientListUniqueType":ingredientListUniqueType})
# Save the recipes to *.mat file
savemat(path+"data_pkg.mat", {"recipes":recipes})
self._isRunning = False
for i in (range(5)):
time.sleep(1)
self._state_msg.emit("Update finished\n\nApp will be restarted in "+str(4-i)+"...")
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
class Update(QWidget):
def __init__(self):
super(Update, self).__init__()
self.setWindowTitle('H3R Updater')
self.btn = QPushButton('Start Update')
self.btn.clicked.connect(self.btnFunc)
self.pbar = QProgressBar(self,minimum=0, maximum=13-1)
self.pbar.setValue(0)
self.label = QLabel("", self)
self.resize(int(QApplication.desktop().size().width()*0.3),int(QApplication.desktop().size().height()*0.2))
#self.resize(300, 200)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.label)
self.label.setAlignment(Qt.AlignCenter)
self.label.setWordWrap(True)
self.vbox.addWidget(self.pbar)
self.vbox.addWidget(self.btn)
self.setLayout(self.vbox)
self.thread = Thread()
self.show()
def btnFunc(self):
self.thread._signal.connect(self.signal_accept)
self.thread._state_msg.connect(self.updateLabel)
self.thread._max.connect(self.updatePbarMaximum)
self.thread.start()
self.btn.setEnabled(False)
def signal_accept(self, msg):
self.pbar.setValue(int(msg))
if self.pbar.value() == 99:
self.pbar.setValue(0)
def updateLabel(self, msg):
self.label.setText(str(msg))
def updatePbarMaximum(self,num):
self.pbar.setMaximum(num)
def closeEvent(self,event):
self.thread.stop()
#os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
# t=Thread()
# t.run()
|
|
# Tools - Pandas
*The `pandas` library provides high-performance, easy-to-use data structures and data analysis tools. The main data structure is the `DataFrame`, which you can think of as an in-memory 2D table (like a spreadsheet, with column names and row labels). Many features available in Excel are available programmatically, such as creating pivot tables, computing columns based on other columns, plotting graphs, etc. You can also group rows by column value, or join tables much like in SQL. Pandas is also great at handling time series.*
Prerequisites:
* NumPy – if you are not familiar with NumPy, we recommend that you go through the [NumPy tutorial](tools_numpy.ipynb) now.
## Setup
First, let's import `pandas`. People usually import it as `pd`:
import pandas as pd
## `Series` objects
The `pandas` library contains these useful data structures:
* `Series` objects, that we will discuss now. A `Series` object is 1D array, similar to a column in a spreadsheet (with a column name and row labels).
* `DataFrame` objects. This is a 2D table, similar to a spreadsheet (with column names and row labels).
* `Panel` objects. You can see a `Panel` as a dictionary of `DataFrame`s. These are less used, so we will not discuss them here.
## Creating a `Series`
Let's start by creating our first `Series` object!
s = pd.Series([2,-1,3,5])
s
## Similar to a 1D `ndarray`
`Series` objects behave much like one-dimensional NumPy `ndarray`s, and you can often pass them as parameters to NumPy functions:
import numpy as np
np.exp(s)
Arithmetic operations on `Series` are also possible, and they apply *elementwise*, just like for `ndarray`s:
s + [1000,2000,3000,4000]
Similar to NumPy, if you add a single number to a `Series`, that number is added to all items in the `Series`. This is called * broadcasting*:
s + 1000
The same is true for all binary operations such as `*` or `/`, and even conditional operations:
s < 0
## Index labels
Each item in a `Series` object has a unique identifier called the *index label*. By default, it is simply the rank of the item in the `Series` (starting at `0`) but you can also set the index labels manually:
s2 = pd.Series([68, 83, 112, 68], index=["alice", "bob", "charles", "darwin"])
s2
You can then use the `Series` just like a `dict`:
s2["bob"]
You can still access the items by integer location, like in a regular array:
s2[1]
To make it clear when you are accessing by label or by integer location, it is recommended to always use the `loc` attribute when accessing by label, and the `iloc` attribute when accessing by integer location:
s2.loc["bob"]
s2.iloc[1]
Slicing a `Series` also slices the index labels:
s2.iloc[1:3]
This can lead to unexpected results when using the default numeric labels, so be careful:
surprise = pd.Series([1000, 1001, 1002, 1003])
surprise
surprise_slice = surprise[2:]
surprise_slice
Oh look! The first element has index label `2`. The element with index label `0` is absent from the slice:
try:
surprise_slice[0]
except KeyError as e:
print("Key error:", e)
But remember that you can access elements by integer location using the `iloc` attribute. This illustrates another reason why it's always better to use `loc` and `iloc` to access `Series` objects:
surprise_slice.iloc[0]
## Init from `dict`
You can create a `Series` object from a `dict`. The keys will be used as index labels:
weights = {"alice": 68, "bob": 83, "colin": 86, "darwin": 68}
s3 = pd.Series(weights)
s3
You can control which elements you want to include in the `Series` and in what order by explicitly specifying the desired `index`:
s4 = pd.Series(weights, index = ["colin", "alice"])
s4
## Automatic alignment
When an operation involves multiple `Series` objects, `pandas` automatically aligns items by matching index labels.
print(s2.keys())
print(s3.keys())
s2 + s3
The resulting `Series` contains the union of index labels from `s2` and `s3`. Since `"colin"` is missing from `s2` and `"charles"` is missing from `s3`, these items have a `NaN` result value. (ie. Not-a-Number means *missing*).
Automatic alignment is very handy when working with data that may come from various sources with varying structure and missing items. But if you forget to set the right index labels, you can have surprising results:
s5 = pd.Series([1000,1000,1000,1000])
print("s2 =", s2.values)
print("s5 =", s5.values)
s2 + s5
Pandas could not align the `Series`, since their labels do not match at all, hence the full `NaN` result.
## Init with a scalar
You can also initialize a `Series` object using a scalar and a list of index labels: all items will be set to the scalar.
meaning = pd.Series(42, ["life", "universe", "everything"])
meaning
## `Series` name
A `Series` can have a `name`:
s6 = pd.Series([83, 68], index=["bob", "alice"], name="weights")
s6
## Plotting a `Series`
Pandas makes it easy to plot `Series` data using matplotlib (for more details on matplotlib, check out the [matplotlib tutorial](tools_matplotlib.ipynb)). Just import matplotlib and call the `plot()` method:
%matplotlib inline
import matplotlib.pyplot as plt
temperatures = [4.4,5.1,6.1,6.2,6.1,6.1,5.7,5.2,4.7,4.1,3.9,3.5]
s7 = pd.Series(temperatures, name="Temperature")
s7.plot()
plt.show()
There are *many* options for plotting your data. It is not necessary to list them all here: if you need a particular type of plot (histograms, pie charts, etc.), just look for it in the excellent [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) section of pandas' documentation, and look at the example code.
## Handling time
Many datasets have timestamps, and pandas is awesome at manipulating such data:
* it can represent periods (such as 2016Q3) and frequencies (such as "monthly"),
* it can convert periods to actual timestamps, and *vice versa*,
* it can resample data and aggregate values any way you like,
* it can handle timezones.
## Time range
Let's start by creating a time series using `pd.date_range()`. This returns a `DatetimeIndex` containing one datetime per hour for 12 hours starting on October 29th 2016 at 5:30pm.
dates = pd.date_range('2016/10/29 5:30pm', periods=12, freq='H')
dates
This `DatetimeIndex` may be used as an index in a `Series`:
temp_series = pd.Series(temperatures, dates)
temp_series
Let's plot this series:
temp_series.plot(kind="bar")
plt.grid(True)
plt.show()
## Resampling
Pandas lets us resample a time series very simply. Just call the `resample()` method and specify a new frequency:
temp_series_freq_2H = temp_series.resample("2H")
temp_series_freq_2H
The resampling operation is actually a deferred operation, which is why we did not get a `Series` object, but a `DatetimeIndexResampler` object instead. To actually perform the resampling operation, we can simply call the `mean()` method: Pandas will compute the mean of every pair of consecutive hours:
temp_series_freq_2H = temp_series_freq_2H.mean()
Let's plot the result:
temp_series_freq_2H.plot(kind="bar")
plt.show()
Note how the values have automatically been aggregated into 2-hour periods. If we look at the 6-8pm period, for example, we had a value of `5.1` at 6:30pm, and `6.1` at 7:30pm. After resampling, we just have one value of `5.6`, which is the mean of `5.1` and `6.1`. Rather than computing the mean, we could have used any other aggregation function, for example we can decide to keep the minimum value of each period:
temp_series_freq_2H = temp_series.resample("2H").min()
temp_series_freq_2H
Or, equivalently, we could use the `apply()` method instead:
temp_series_freq_2H = temp_series.resample("2H").apply(np.min)
temp_series_freq_2H
## Upsampling and interpolation
This was an example of downsampling. We can also upsample (ie. increase the frequency), but this creates holes in our data:
temp_series_freq_15min = temp_series.resample("15Min").mean()
temp_series_freq_15min.head(n=10) # `head` displays the top n values
One solution is to fill the gaps by interpolating. We just call the `interpolate()` method. The default is to use linear interpolation, but we can also select another method, such as cubic interpolation:
temp_series_freq_15min = temp_series.resample("15Min").interpolate(method="cubic")
temp_series_freq_15min.head(n=10)
temp_series.plot(label="Period: 1 hour")
temp_series_freq_15min.plot(label="Period: 15 minutes")
plt.legend()
plt.show()
## Timezones
By default datetimes are *naive*: they are not aware of timezones, so 2016-10-30 02:30 might mean October 30th 2016 at 2:30am in Paris or in New York. We can make datetimes timezone *aware* by calling the `tz_localize()` method:
temp_series_ny = temp_series.tz_localize("America/New_York")
temp_series_ny
Note that `-04:00` is now appended to all the datetimes. This means that these datetimes refer to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) - 4 hours.
We can convert these datetimes to Paris time like this:
temp_series_paris = temp_series_ny.tz_convert("Europe/Paris")
temp_series_paris
You may have noticed that the UTC offset changes from `+02:00` to `+01:00`: this is because France switches to winter time at 3am that particular night (time goes back to 2am). Notice that 2:30am occurs twice! Let's go back to a naive representation (if you log some data hourly using local time, without storing the timezone, you might get something like this):
temp_series_paris_naive = temp_series_paris.tz_localize(None)
temp_series_paris_naive
Now `02:30` is really ambiguous. If we try to localize these naive datetimes to the Paris timezone, we get an error:
try:
temp_series_paris_naive.tz_localize("Europe/Paris")
except Exception as e:
print(type(e))
print(e)
Fortunately using the `ambiguous` argument we can tell pandas to infer the right DST (Daylight Saving Time) based on the order of the ambiguous timestamps:
temp_series_paris_naive.tz_localize("Europe/Paris", ambiguous="infer")
## Periods
The `pd.period_range()` function returns a `PeriodIndex` instead of a `DatetimeIndex`. For example, let's get all quarters in 2016 and 2017:
quarters = pd.period_range('2016Q1', periods=8, freq='Q')
quarters
Adding a number `N` to a `PeriodIndex` shifts the periods by `N` times the `PeriodIndex`'s frequency:
quarters + 3
The `asfreq()` method lets us change the frequency of the `PeriodIndex`. All periods are lengthened or shortened accordingly. For example, let's convert all the quarterly periods to monthly periods (zooming in):
quarters.asfreq("M")
By default, the `asfreq` zooms on the end of each period. We can tell it to zoom on the start of each period instead:
quarters.asfreq("M", how="start")
And we can zoom out:
quarters.asfreq("A")
Of course we can create a `Series` with a `PeriodIndex`:
quarterly_revenue = pd.Series([300, 320, 290, 390, 320, 360, 310, 410], index = quarters)
quarterly_revenue
quarterly_revenue.plot(kind="line")
plt.show()
We can convert periods to timestamps by calling `to_timestamp`. By default this will give us the first day of each period, but by setting `how` and `freq`, we can get the last hour of each period:
last_hours = quarterly_revenue.to_timestamp(how="end", freq="H")
last_hours
And back to periods by calling `to_period`:
last_hours.to_period()
Pandas also provides many other time-related functions that we recommend you check out in the [documentation](http://pandas.pydata.org/pandas-docs/stable/timeseries.html). To whet your appetite, here is one way to get the last business day of each month in 2016, at 9am:
months_2016 = pd.period_range("2016", periods=12, freq="M")
one_day_after_last_days = months_2016.asfreq("D") + 1
last_bdays = one_day_after_last_days.to_timestamp() - pd.tseries.offsets.BDay()
last_bdays.to_period("H") + 9
## `DataFrame` objects
A DataFrame object represents a spreadsheet, with cell values, column names and row index labels. You can define expressions to compute columns based on other columns, create pivot-tables, group rows, draw graphs, etc. You can see `DataFrame`s as dictionaries of `Series`.
## Creating a `DataFrame`
You can create a DataFrame by passing a dictionary of `Series` objects:
people_dict = {
"weight": pd.Series([68, 83, 112], index=["alice", "bob", "charles"]),
"birthyear": pd.Series([1984, 1985, 1992], index=["bob", "alice", "charles"], name="year"),
"children": pd.Series([0, 3], index=["charles", "bob"]),
"hobby": pd.Series(["Biking", "Dancing"], index=["alice", "bob"]),
}
people = pd.DataFrame(people_dict)
people
A few things to note:
* the `Series` were automatically aligned based on their index,
* missing values are represented as `NaN`,
* `Series` names are ignored (the name `"year"` was dropped),
* `DataFrame`s are displayed nicely in Jupyter notebooks, woohoo!
You can access columns pretty much as you would expect. They are returned as `Series` objects:
people["birthyear"]
You can also get multiple columns at once:
people[["birthyear", "hobby"]]
If you pass a list of columns and/or index row labels to the `DataFrame` constructor, it will guarantee that these columns and/or rows will exist, in that order, and no other column/row will exist. For example:
d2 = pd.DataFrame(
people_dict,
columns=["birthyear", "weight", "height"],
index=["bob", "alice", "eugene"]
)
d2
Another convenient way to create a `DataFrame` is to pass all the values to the constructor as an `ndarray`, or a list of lists, and specify the column names and row index labels separately:
values = [
[1985, np.nan, "Biking", 68],
[1984, 3, "Dancing", 83],
[1992, 0, np.nan, 112]
]
d3 = pd.DataFrame(
values,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
To specify missing values, you can either use `np.nan` or NumPy's masked arrays:
masked_array = np.ma.asarray(values, dtype=np.object)
masked_array[(0, 2), (1, 2)] = np.ma.masked
d3 = pd.DataFrame(
masked_array,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
Instead of an `ndarray`, you can also pass a `DataFrame` object:
d4 = pd.DataFrame(
d3,
columns=["hobby", "children"],
index=["alice", "bob"]
)
d4
It is also possible to create a `DataFrame` with a dictionary (or list) of dictionaries (or list):
people = pd.DataFrame({
"birthyear": {"alice":1985, "bob": 1984, "charles": 1992},
"hobby": {"alice":"Biking", "bob": "Dancing"},
"weight": {"alice":68, "bob": 83, "charles": 112},
"children": {"bob": 3, "charles": 0}
})
people
## Multi-indexing
If all columns are tuples of the same size, then they are understood as a multi-index. The same goes for row index labels. For example:
d5 = pd.DataFrame(
{
("public", "birthyear"):
{("Paris","alice"):1985, ("Paris","bob"): 1984, ("London","charles"): 1992},
("public", "hobby"):
{("Paris","alice"):"Biking", ("Paris","bob"): "Dancing"},
("private", "weight"):
{("Paris","alice"):68, ("Paris","bob"): 83, ("London","charles"): 112},
("private", "children"):
{("Paris", "alice"):np.nan, ("Paris","bob"): 3, ("London","charles"): 0}
}
)
d5
You can now get a `DataFrame` containing all the `"public"` columns very simply:
d5["public"]
d5["public", "hobby"] # Same result as d5["public"]["hobby"]
## Dropping a level
Let's look at `d5` again:
d5
There are two levels of columns, and two levels of indices. We can drop a column level by calling `droplevel()` (the same goes for indices):
d5.columns = d5.columns.droplevel(level = 0)
d5
## Transposing
You can swap columns and indices using the `T` attribute:
d6 = d5.T
d6
## Stacking and unstacking levels
Calling the `stack()` method will push the lowest column level after the lowest index:
d7 = d6.stack()
d7
Note that many `NaN` values appeared. This makes sense because many new combinations did not exist before (eg. there was no `bob` in `London`).
Calling `unstack()` will do the reverse, once again creating many `NaN` values.
d8 = d7.unstack()
d8
If we call `unstack` again, we end up with a `Series` object:
d9 = d8.unstack()
d9
The `stack()` and `unstack()` methods let you select the `level` to stack/unstack. You can even stack/unstack multiple levels at once:
d10 = d9.unstack(level = (0,1))
d10
## Most methods return modified copies
As you may have noticed, the `stack()` and `unstack()` methods do not modify the object they apply to. Instead, they work on a copy and return that copy. This is true of most methods in pandas.
## Accessing rows
Let's go back to the `people` `DataFrame`:
people
The `loc` attribute lets you access rows instead of columns. The result is a `Series` object in which the `DataFrame`'s column names are mapped to row index labels:
people.loc["charles"]
You can also access rows by integer location using the `iloc` attribute:
people.iloc[2]
You can also get a slice of rows, and this returns a `DataFrame` object:
people.iloc[1:3]
Finally, you can pass a boolean array to get the matching rows:
people[np.array([True, False, True])]
This is most useful when combined with boolean expressions:
people[people["birthyear"] < 1990]
## Adding and removing columns
You can generally treat `DataFrame` objects like dictionaries of `Series`, so the following work fine:
people
people["age"] = 2018 - people["birthyear"] # adds a new column "age"
people["over 30"] = people["age"] > 30 # adds another column "over 30"
birthyears = people.pop("birthyear")
del people["children"]
people
birthyears
When you add a new colum, it must have the same number of rows. Missing rows are filled with NaN, and extra rows are ignored:
people["pets"] = pd.Series({"bob": 0, "charles": 5, "eugene":1}) # alice is missing, eugene is ignored
people
When adding a new column, it is added at the end (on the right) by default. You can also insert a column anywhere else using the `insert()` method:
people.insert(1, "height", [172, 181, 185])
people
## Assigning new columns
You can also create new columns by calling the `assign()` method. Note that this returns a new `DataFrame` object, the original is not modified:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
has_pets = people["pets"] > 0
)
Note that you cannot access columns created within the same assignment:
try:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
overweight = people["body_mass_index"] > 25
)
except KeyError as e:
print("Key error:", e)
The solution is to split this assignment in two consecutive assignments:
d6 = people.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
d6.assign(overweight = d6["body_mass_index"] > 25)
Having to create a temporary variable `d6` is not very convenient. You may want to just chain the assigment calls, but it does not work because the `people` object is not actually modified by the first assignment:
try:
(people
.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
.assign(overweight = people["body_mass_index"] > 25)
)
except KeyError as e:
print("Key error:", e)
But fear not, there is a simple solution. You can pass a function to the `assign()` method (typically a `lambda` function), and this function will be called with the `DataFrame` as a parameter:
(people
.assign(body_mass_index = lambda df: df["weight"] / (df["height"] / 100) ** 2)
.assign(overweight = lambda df: df["body_mass_index"] > 25)
)
Problem solved!
## Evaluating an expression
A great feature supported by pandas is expression evaluation. This relies on the `numexpr` library which must be installed.
people.eval("weight / (height/100) ** 2 > 25")
Assignment expressions are also supported. Let's set `inplace=True` to directly modify the `DataFrame` rather than getting a modified copy:
people.eval("body_mass_index = weight / (height/100) ** 2", inplace=True)
people
You can use a local or global variable in an expression by prefixing it with `'@'`:
overweight_threshold = 30
people.eval("overweight = body_mass_index > @overweight_threshold", inplace=True)
people
## Querying a `DataFrame`
The `query()` method lets you filter a `DataFrame` based on a query expression:
people.query("age > 30 and pets == 0")
## Sorting a `DataFrame`
You can sort a `DataFrame` by calling its `sort_index` method. By default it sorts the rows by their index label, in ascending order, but let's reverse the order:
people.sort_index(ascending=False)
Note that `sort_index` returned a sorted *copy* of the `DataFrame`. To modify `people` directly, we can set the `inplace` argument to `True`. Also, we can sort the columns instead of the rows by setting `axis=1`:
people.sort_index(axis=1, inplace=True)
people
To sort the `DataFrame` by the values instead of the labels, we can use `sort_values` and specify the column to sort by:
people.sort_values(by="age", inplace=True)
people
## Plotting a `DataFrame`
Just like for `Series`, pandas makes it easy to draw nice graphs based on a `DataFrame`.
For example, it is trivial to create a line plot from a `DataFrame`'s data by calling its `plot` method:
people.plot(kind = "line", x = "body_mass_index", y = ["height", "weight"])
plt.show()
You can pass extra arguments supported by matplotlib's functions. For example, we can create scatterplot and pass it a list of sizes using the `s` argument of matplotlib's `scatter()` function:
people.plot(kind = "scatter", x = "height", y = "weight", s=[40, 120, 200])
plt.show()
Again, there are way too many options to list here: the best option is to scroll through the [Visualization](http://pandas.pydata.org/pandas-docs/stable/visualization.html) page in pandas' documentation, find the plot you are interested in and look at the example code.
## Operations on `DataFrame`s
Although `DataFrame`s do not try to mimick NumPy arrays, there are a few similarities. Let's create a `DataFrame` to demonstrate this:
grades_array = np.array([[8,8,9],[10,9,9],[4, 8, 2], [9, 10, 10]])
grades = pd.DataFrame(grades_array, columns=["sep", "oct", "nov"], index=["alice","bob","charles","darwin"])
grades
You can apply NumPy mathematical functions on a `DataFrame`: the function is applied to all values:
np.sqrt(grades)
Similarly, adding a single value to a `DataFrame` will add that value to all elements in the `DataFrame`. This is called *broadcasting*:
grades + 1
Of course, the same is true for all other binary operations, including arithmetic (`*`,`/`,`**`...) and conditional (`>`, `==`...) operations:
grades >= 5
Aggregation operations, such as computing the `max`, the `sum` or the `mean` of a `DataFrame`, apply to each column, and you get back a `Series` object:
grades.mean()
The `all` method is also an aggregation operation: it checks whether all values are `True` or not. Let's see during which months all students got a grade greater than `5`:
(grades > 5).all()
Most of these functions take an optional `axis` parameter which lets you specify along which axis of the `DataFrame` you want the operation executed. The default is `axis=0`, meaning that the operation is executed vertically (on each column). You can set `axis=1` to execute the operation horizontally (on each row). For example, let's find out which students had all grades greater than `5`:
(grades > 5).all(axis = 1)
The `any` method returns `True` if any value is True. Let's see who got at least one grade 10:
(grades == 10).any(axis = 1)
If you add a `Series` object to a `DataFrame` (or execute any other binary operation), pandas attempts to broadcast the operation to all *rows* in the `DataFrame`. This only works if the `Series` has the same size as the `DataFrame`s rows. For example, let's substract the `mean` of the `DataFrame` (a `Series` object) from the `DataFrame`:
grades - grades.mean() # equivalent to: grades - [7.75, 8.75, 7.50]
We substracted `7.75` from all September grades, `8.75` from October grades and `7.50` from November grades. It is equivalent to substracting this `DataFrame`:
pd.DataFrame([[7.75, 8.75, 7.50]]*4, index=grades.index, columns=grades.columns)
If you want to substract the global mean from every grade, here is one way to do it:
grades - grades.values.mean() # substracts the global mean (8.00) from all grades
## Automatic alignment
Similar to `Series`, when operating on multiple `DataFrame`s, pandas automatically aligns them by row index label, but also by column names. Let's create a `DataFrame` with bonus points for each person from October to December:
bonus_array = np.array([[0,np.nan,2],[np.nan,1,0],[0, 1, 0], [3, 3, 0]])
bonus_points = pd.DataFrame(bonus_array, columns=["oct", "nov", "dec"], index=["bob","colin", "darwin", "charles"])
bonus_points
grades + bonus_points
Looks like the addition worked in some cases but way too many elements are now empty. That's because when aligning the `DataFrame`s, some columns and rows were only present on one side, and thus they were considered missing on the other side (`NaN`). Then adding `NaN` to a number results in `NaN`, hence the result.
## Handling missing data
Dealing with missing data is a frequent task when working with real life data. Pandas offers a few tools to handle missing data.
Let's try to fix the problem above. For example, we can decide that missing data should result in a zero, instead of `NaN`. We can replace all `NaN` values by a any value using the `fillna()` method:
(grades + bonus_points).fillna(0)
It's a bit unfair that we're setting grades to zero in September, though. Perhaps we should decide that missing grades are missing grades, but missing bonus points should be replaced by zeros:
fixed_bonus_points = bonus_points.fillna(0)
fixed_bonus_points.insert(0, "sep", 0)
fixed_bonus_points.loc["alice"] = 0
grades + fixed_bonus_points
That's much better: although we made up some data, we have not been too unfair.
Another way to handle missing data is to interpolate. Let's look at the `bonus_points` `DataFrame` again:
bonus_points
Now let's call the `interpolate` method. By default, it interpolates vertically (`axis=0`), so let's tell it to interpolate horizontally (`axis=1`).
bonus_points.interpolate(axis=1)
Bob had 0 bonus points in October, and 2 in December. When we interpolate for November, we get the mean: 1 bonus point. Colin had 1 bonus point in November, but we do not know how many bonus points he had in September, so we cannot interpolate, this is why there is still a missing value in October after interpolation. To fix this, we can set the September bonus points to 0 before interpolation.
better_bonus_points = bonus_points.copy()
better_bonus_points.insert(0, "sep", 0)
better_bonus_points.loc["alice"] = 0
better_bonus_points = better_bonus_points.interpolate(axis=1)
better_bonus_points
Great, now we have reasonable bonus points everywhere. Let's find out the final grades:
grades + better_bonus_points
It is slightly annoying that the September column ends up on the right. This is because the `DataFrame`s we are adding do not have the exact same columns (the `grades` `DataFrame` is missing the `"dec"` column), so to make things predictable, pandas orders the final columns alphabetically. To fix this, we can simply add the missing column before adding:
grades["dec"] = np.nan
final_grades = grades + better_bonus_points
final_grades
There's not much we can do about December and Colin: it's bad enough that we are making up bonus points, but we can't reasonably make up grades (well I guess some teachers probably do). So let's call the `dropna()` method to get rid of rows that are full of `NaN`s:
final_grades_clean = final_grades.dropna(how="all")
final_grades_clean
Now let's remove columns that are full of `NaN`s by setting the `axis` argument to `1`:
final_grades_clean = final_grades_clean.dropna(axis=1, how="all")
final_grades_clean
## Aggregating with `groupby`
Similar to the SQL language, pandas allows grouping your data into groups to run calculations over each group.
First, let's add some extra data about each person so we can group them, and let's go back to the `final_grades` `DataFrame` so we can see how `NaN` values are handled:
final_grades["hobby"] = ["Biking", "Dancing", np.nan, "Dancing", "Biking"]
final_grades
Now let's group data in this `DataFrame` by hobby:
grouped_grades = final_grades.groupby("hobby")
grouped_grades
We are ready to compute the average grade per hobby:
grouped_grades.mean()
That was easy! Note that the `NaN` values have simply been skipped when computing the means.
## Pivot tables
Pandas supports spreadsheet-like [pivot tables](https://en.wikipedia.org/wiki/Pivot_table) that allow quick data summarization. To illustrate this, let's create a simple `DataFrame`:
bonus_points
more_grades = final_grades_clean.stack().reset_index()
more_grades.columns = ["name", "month", "grade"]
more_grades["bonus"] = [np.nan, np.nan, np.nan, 0, np.nan, 2, 3, 3, 0, 0, 1, 0]
more_grades
Now we can call the `pd.pivot_table()` function for this `DataFrame`, asking to group by the `name` column. By default, `pivot_table()` computes the mean of each numeric column:
pd.pivot_table(more_grades, index="name")
We can change the aggregation function by setting the `aggfunc` argument, and we can also specify the list of columns whose values will be aggregated:
pd.pivot_table(more_grades, index="name", values=["grade","bonus"], aggfunc=np.max)
We can also specify the `columns` to aggregate over horizontally, and request the grand totals for each row and column by setting `margins=True`:
pd.pivot_table(more_grades, index="name", values="grade", columns="month", margins=True)
Finally, we can specify multiple index or column names, and pandas will create multi-level indices:
pd.pivot_table(more_grades, index=("name", "month"), margins=True)
## Overview functions
When dealing with large `DataFrames`, it is useful to get a quick overview of its content. Pandas offers a few functions for this. First, let's create a large `DataFrame` with a mix of numeric values, missing values and text values. Notice how Jupyter displays only the corners of the `DataFrame`:
much_data = np.fromfunction(lambda x,y: (x+y*y)%17*11, (10000, 26))
large_df = pd.DataFrame(much_data, columns=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
large_df[large_df % 16 == 0] = np.nan
large_df.insert(3,"some_text", "Blabla")
large_df
The `head()` method returns the top 5 rows:
large_df.head()
Of course there's also a `tail()` function to view the bottom 5 rows. You can pass the number of rows you want:
large_df.tail(n=2)
The `info()` method prints out a summary of each columns contents:
large_df.info()
Finally, the `describe()` method gives a nice overview of the main aggregated values over each column:
* `count`: number of non-null (not NaN) values
* `mean`: mean of non-null values
* `std`: [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) of non-null values
* `min`: minimum of non-null values
* `25%`, `50%`, `75%`: 25th, 50th and 75th [percentile](https://en.wikipedia.org/wiki/Percentile) of non-null values
* `max`: maximum of non-null values
large_df.describe()
## Saving & loading
Pandas can save `DataFrame`s to various backends, including file formats such as CSV, Excel, JSON, HTML and HDF5, or to a SQL database. Let's create a `DataFrame` to demonstrate this:
my_df = pd.DataFrame(
[["Biking", 68.5, 1985, np.nan], ["Dancing", 83.1, 1984, 3]],
columns=["hobby","weight","birthyear","children"],
index=["alice", "bob"]
)
my_df
## Saving
Let's save it to CSV, HTML and JSON:
my_df.to_csv("my_df.csv")
my_df.to_html("my_df.html")
my_df.to_json("my_df.json")
Done! Let's take a peek at what was saved:
for filename in ("my_df.csv", "my_df.html", "my_df.json"):
print("#", filename)
with open(filename, "rt") as f:
print(f.read())
print()
Note that the index is saved as the first column (with no name) in a CSV file, as `<th>` tags in HTML and as keys in JSON.
Saving to other formats works very similarly, but some formats require extra libraries to be installed. For example, saving to Excel requires the openpyxl library:
try:
my_df.to_excel("my_df.xlsx", sheet_name='People')
except ImportError as e:
print(e)
## Loading
Now let's load our CSV file back into a `DataFrame`:
my_df_loaded = pd.read_csv("my_df.csv", index_col=0)
my_df_loaded
As you might guess, there are similar `read_json`, `read_html`, `read_excel` functions as well. We can also read data straight from the Internet. For example, let's load all U.S. cities from [simplemaps.com](http://simplemaps.com/):
us_cities = None
try:
csv_url = "http://simplemaps.com/files/cities.csv"
us_cities = pd.read_csv(csv_url, index_col=0)
us_cities = us_cities.head()
except IOError as e:
print(e)
us_cities
There are more options available, in particular regarding datetime format. Check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/io.html) for more details.
## Combining `DataFrame`s
## SQL-like joins
One powerful feature of pandas is it's ability to perform SQL-like joins on `DataFrame`s. Various types of joins are supported: inner joins, left/right outer joins and full joins. To illustrate this, let's start by creating a couple simple `DataFrame`s:
city_loc = pd.DataFrame(
[
["CA", "San Francisco", 37.781334, -122.416728],
["NY", "New York", 40.705649, -74.008344],
["FL", "Miami", 25.791100, -80.320733],
["OH", "Cleveland", 41.473508, -81.739791],
["UT", "Salt Lake City", 40.755851, -111.896657]
], columns=["state", "city", "lat", "lng"])
city_loc
city_pop = pd.DataFrame(
[
[808976, "San Francisco", "California"],
[8363710, "New York", "New-York"],
[413201, "Miami", "Florida"],
[2242193, "Houston", "Texas"]
], index=[3,4,5,6], columns=["population", "city", "state"])
city_pop
Now let's join these `DataFrame`s using the `merge()` function:
pd.merge(left=city_loc, right=city_pop, on="city")
Note that both `DataFrame`s have a column named `state`, so in the result they got renamed to `state_x` and `state_y`.
Also, note that Cleveland, Salt Lake City and Houston were dropped because they don't exist in *both* `DataFrame`s. This is the equivalent of a SQL `INNER JOIN`. If you want a `FULL OUTER JOIN`, where no city gets dropped and `NaN` values are added, you must specify `how="outer"`:
all_cities = pd.merge(left=city_loc, right=city_pop, on="city", how="outer")
all_cities
Of course `LEFT OUTER JOIN` is also available by setting `how="left"`: only the cities present in the left `DataFrame` end up in the result. Similarly, with `how="right"` only cities in the right `DataFrame` appear in the result. For example:
pd.merge(left=city_loc, right=city_pop, on="city", how="right")
If the key to join on is actually in one (or both) `DataFrame`'s index, you must use `left_index=True` and/or `right_index=True`. If the key column names differ, you must use `left_on` and `right_on`. For example:
city_pop2 = city_pop.copy()
city_pop2.columns = ["population", "name", "state"]
pd.merge(left=city_loc, right=city_pop2, left_on="city", right_on="name")
## Concatenation
Rather than joining `DataFrame`s, we may just want to concatenate them. That's what `concat()` is for:
result_concat = pd.concat([city_loc, city_pop])
result_concat
Note that this operation aligned the data horizontally (by columns) but not vertically (by rows). In this example, we end up with multiple rows having the same index (eg. 3). Pandas handles this rather gracefully:
result_concat.loc[3]
Or you can tell pandas to just ignore the index:
pd.concat([city_loc, city_pop], ignore_index=True)
Notice that when a column does not exist in a `DataFrame`, it acts as if it was filled with `NaN` values. If we set `join="inner"`, then only columns that exist in *both* `DataFrame`s are returned:
pd.concat([city_loc, city_pop], join="inner")
You can concatenate `DataFrame`s horizontally instead of vertically by setting `axis=1`:
pd.concat([city_loc, city_pop], axis=1)
In this case it really does not make much sense because the indices do not align well (eg. Cleveland and San Francisco end up on the same row, because they shared the index label `3`). So let's reindex the `DataFrame`s by city name before concatenating:
pd.concat([city_loc.set_index("city"), city_pop.set_index("city")], axis=1)
This looks a lot like a `FULL OUTER JOIN`, except that the `state` columns were not renamed to `state_x` and `state_y`, and the `city` column is now the index.
The `append()` method is a useful shorthand for concatenating `DataFrame`s vertically:
city_loc.append(city_pop)
As always in pandas, the `append()` method does *not* actually modify `city_loc`: it works on a copy and returns the modified copy.
## Categories
It is quite frequent to have values that represent categories, for example `1` for female and `2` for male, or `"A"` for Good, `"B"` for Average, `"C"` for Bad. These categorical values can be hard to read and cumbersome to handle, but fortunately pandas makes it easy. To illustrate this, let's take the `city_pop` `DataFrame` we created earlier, and add a column that represents a category:
city_eco = city_pop.copy()
city_eco["eco_code"] = [17, 17, 34, 20]
city_eco
Right now the `eco_code` column is full of apparently meaningless codes. Let's fix that. First, we will create a new categorical column based on the `eco_code`s:
city_eco["economy"] = city_eco["eco_code"].astype('category')
city_eco["economy"].cat.categories
Now we can give each category a meaningful name:
city_eco["economy"].cat.categories = ["Finance", "Energy", "Tourism"]
city_eco
Note that categorical values are sorted according to their categorical order, *not* their alphabetical order:
city_eco.sort_values(by="economy", ascending=False)
## What next?
As you probably noticed by now, pandas is quite a large library with *many* features. Although we went through the most important features, there is still a lot to discover. Probably the best way to learn more is to get your hands dirty with some real-life data. It is also a good idea to go through pandas' excellent [documentation](http://pandas.pydata.org/pandas-docs/stable/index.html), in particular the [Cookbook](http://pandas.pydata.org/pandas-docs/stable/cookbook.html).
|
|
from preprocess.generateMap import ClusterGenerator
import matplotlib.pyplot as plt
from model.climateNet import ClimateNet
from dataReader.dataset import dataset
from torch.utils.data import DataLoader, WeightedRandomSampler
import torch
import numpy as np
import pandas as pd
from sklearn import metrics
import os
if not os.path.isdir('res/'):
os.mkdir('res/')
if not os.path.isdir('fig/'):
os.mkdir('fig/')
## Select cluster process
def run(EPOCH,SUBDATA_SIZE,MODEL_NUM,SPLIT_THRES,LR):
cg = ClusterGenerator(MODEL_NUM,"data_source/train.csv")
cg.loadData()
cg.kmeans()
cg.randomSample(decay=2,size=SUBDATA_SIZE)
# cg.generateStack()
# cg.showGrid() #Used for visualize the data cluster
# Uncommend this line if using the localize Sampling
# data_frame = cg.meanShiftData
# Uncommend this line if using the Random Sampling
data_frame = cg.sampleData
print("Training dataFrame", data_frame.shape)
train_frame = data_frame.sample(frac = SPLIT_THRES)
val_frame = data_frame.drop(train_frame.index)
net_list = []
stats_list = []
## Training process
for idx in range(MODEL_NUM):
print()
print("Working on model: ", idx)
train_dataset = dataset(train_frame, idx, mode = 'train')
val_dataset = dataset(val_frame, idx, mode='val')
ws = WeightedRandomSampler(torch.DoubleTensor(train_dataset.get_cls_label_weight()), len(train_dataset))
trainloader = DataLoader(train_dataset,
# sampler=ws,
batch_size=32,
shuffle=True
)
valloader = DataLoader(val_dataset, batch_size=32,shuffle=False)
net = ClimateNet(train_dataset.get_feature_len())
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
reg_loss_func = torch.nn.MSELoss()
# train_cls_loss_func = torch.nn.BCEWithLogitsLoss(weight=train_dataset.get_cls_weight()) # the target label is NOT an one-hotted
# train_cls_loss_func = torch.nn.BCEWithLogitsLoss()
# val_cls_loss_func = torch.nn.BCEWithLogitsLoss() # the target label is NOT an one-hotted
cls_loss_func = torch.nn.BCELoss()
# print(net)
cls_train_losses = []
cls_train_accur = []
cls_val_losses = []
cls_val_accur = []
reg_train_losses = []
reg_val_losses = []
total_losses = []
best_loss = np.inf
best_acc = 0
save_weight = None
print("start traning with dataset:",len(train_dataset),len(val_dataset))
print()
for i in range(EPOCH):
epoch_train_cls_loss = 0
epoch_train_reg_loss = 0
epoch_total_train = 0
epoch_total_val = 0
epoch_valid_cls_loss = 0
epoch_valid_reg_loss = 0
epoch_total_valid = 0
for j,(x, y_reg, y_cls) in enumerate(trainloader):
epoch_total_train += x.shape[0]
#calculate output
cls_train_output, reg_train_output = net(x)
#calculate loss
cls_train_loss = cls_loss_func(cls_train_output, y_cls.reshape(-1,1))
reg_train_loss = reg_loss_func(reg_train_output, y_reg.reshape(-1,1))
#class accuracy
epoch_train_cls_loss += cls_train_loss*x.shape[0]
epoch_train_reg_loss += reg_train_loss*x.shape[0]
# loss = reg_loss_func(output,y )
total_loss = 0.5*cls_train_loss + 0.5*reg_train_loss
# total_loss = cls_train_loss
#class accuracy
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
cls_train_predicted, reg_train_predicted = net(train_dataset.get_x())
cls_train_acc = (cls_train_predicted.reshape(-1).detach().numpy().round() == train_dataset.get_y_cls().numpy()).mean()
# cls_train_acc = metrics.balanced_accuracy_score(cls_train_predicted.reshape(-1).detach().numpy().round(), train_dataset.get_y_cls().numpy())
epoch_train_cls_loss /= epoch_total_train
epoch_train_reg_loss /= epoch_total_train
for j_v,(x_v, y_v_reg, y_v_cls) in enumerate(valloader):
epoch_total_valid += x_v.shape[0]
#calculate output
cls_val_output, reg_val_output = net(x_v)
#calculate loss
cls_val_loss = cls_loss_func(cls_val_output, y_v_cls.reshape(-1,1))
reg_val_loss = reg_loss_func(reg_val_output, y_v_reg.reshape(-1,1))
#class accuracy
epoch_valid_cls_loss += cls_val_loss*x_v.shape[0]
epoch_valid_reg_loss += reg_val_loss*x_v.shape[0]
cls_val_predicted, reg_val_predicted = net(val_dataset.get_x())
cls_val_acc = (cls_val_predicted.reshape(-1).detach().numpy().round() == val_dataset.get_y_cls().numpy()).mean()
epoch_valid_cls_loss /= epoch_total_valid
epoch_valid_reg_loss /= epoch_total_valid
# if cls_val_acc > best_acc:
# print("Update save Weight at EPOCH {}, val acc: {}".format(i,best_acc))
# best_acc = cls_val_acc
# save_weight = net.state_dict()
if epoch_valid_cls_loss < best_loss:
print("Update save Weight at EPOCH {}, val loss: {}".format(i,best_loss))
best_loss = epoch_valid_cls_loss
save_weight = net.state_dict()
cls_train_losses.append(epoch_train_cls_loss.data)
cls_train_accur.append(cls_train_acc)
cls_val_losses.append(epoch_valid_cls_loss.data)
cls_val_accur.append(cls_val_acc)
reg_train_losses.append(epoch_train_reg_loss.data)
reg_val_losses.append(reg_val_loss.data)
if i%10 == 0:
print("Epoch:",i)
print("cls_train_loss: {:.5f} cls_val_loss: {:.5f} train_acc: {:.5f} val_acc: {:.5f}".format(epoch_train_cls_loss,
epoch_valid_cls_loss,
cls_train_acc,
cls_val_acc))
print("reg_train_loss:{:.5f} reg_val_loss: {:.5f}".format(epoch_train_reg_loss,
epoch_valid_reg_loss))
stats_list.append([idx,[cls_train_losses,
cls_train_accur,
cls_val_losses,
cls_val_accur,
reg_train_losses,
reg_val_losses]])
net_list.append([save_weight,[cg.centers[idx]]])
del net
del total_loss,cls_train_loss,reg_train_loss,cls_val_loss,
del optimizer
## save the loss in each epoch, commend this part when doing grid search
plt.clf()
for i in range(len(stats_list)):
cls_train_losses_score = stats_list[i][1][0]
cls_val_losses_score = stats_list[i][1][2]
plt.plot(cls_train_losses_score,label="train")
plt.plot(cls_val_losses_score,label="val")
# plt.plot(cls_val_losses,label='val')
plt.title('Loss vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend()
plt.savefig("fig/mode_{}_cls_losses.png".format(i))
plt.clf()
for i in range(len(stats_list)):
cls_train_accur_score = stats_list[i][1][1]
plt.plot(cls_train_accur_score,label="Net{}".format(i))
# plt.plot(cls_val_losses,label='val')
plt.title('accuracy vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.savefig("fig/accuracy.png")
plt.clf()
for i in range(len(stats_list)):
reg_train_losses = stats_list[i][1][4]
reg_val_losses = stats_list[i][1][5]
plt.plot(reg_train_losses,label="train")
plt.plot(reg_val_losses,label="val")
# plt.plot(cls_val_losses,label='val')
plt.title('reg_train_losses vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.legend()
plt.savefig("fig/mode_{}_reg_losses.png".format(i))
plt.clf()
### Test process
test_source = "data_source/test.csv"
test_dataset = dataset(pd.read_csv(test_source),0,mode='test')
testloader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)
cls_res = []
reg_res = []
distances = []
mse = []
for j_v,(x_t, y_t_reg, y_t_cls) in enumerate(testloader):
for net_num in range(len(net_list)):
net = ClimateNet(x_t.shape[1])
net.load_state_dict(net_list[net_num][0])
net.eval()
la, lo = net_list[net_num][1][0]
distances.append(np.sqrt((test_dataset.get_location()[:,0]-la)**2+(test_dataset.get_location()[:,1]-lo)**2))
#class accuracy
cls_predicted, reg_predicted = net(x_t)
cls_res.append(cls_predicted.reshape(-1).detach().numpy().round())
reg_res.append(reg_predicted.reshape(-1).detach().numpy())
# cls_val_acc = (cls_val_predicted.reshape(-1).detach().numpy().round() == val_dataset.get_y_cls().numpy()).mean()
cls_res = np.array(cls_res) #[model, data]
reg_res = np.array(reg_res) #[model, data]
distances = np.array(distances) #[data, model]
probability = 1./(distances)
probability = probability/np.sum(probability, axis=0, keepdims=True)
res_combine = (probability * cls_res).sum(axis=0)
reg_combine = (probability * reg_res).sum(axis=0)
cls_score = (res_combine.round() == y_t_cls.reshape(-1).detach().numpy()).mean()
reg_score = ((reg_combine - y_t_reg.reshape(-1).detach().numpy())**2).mean()
# prob.append(probability)
print("Clssification Accuracy:",cls_score,"Regression Mean Square Error:",reg_score)
return(cls_score,reg_score)
if __name__ =='__main__':
EPOCH = [100,200]
SUBDATA_SIZE = [100,150,200]
MODEL_NUM = [5,10,15,20]
SPLIT_THRES = [0.7,0.8]
LR = [0.01, 0.05, 0.001, 0.005]
ind = 0
result_list = []
# for e in EPOCH:
# for s in SUBDATA_SIZE:
# for m in MODEL_NUM:
# for sp in SPLIT_THRES:
# for l in LR:
# with open('res/result.txt','a+') as f:
# print("Working on Epoch:{}, Size:{}, Model#:{}, Split%:{}, LearningRate:{}".format(e,s,m,sp,l))
# cls_score,reg_score = run(e,s,m,sp,l)
# re = np.array([cls_score,reg_score,e,s,m,sp,l])
# np.save('res/result_{}.npy'.format(ind), re)
# f.write("Epoch:{}, Size:{}, Model#:{}, Split:{}, LearningRate:{}, result: cls: {:.5f} reg: {:.5f} \n".format(e,s,m,sp,l,cls_score,reg_score))
# print("Finish...")
# ind+=1
# f.close()
run(150,200,10,0.8,0.01)
|
|
import numpy as np
from abraia import Multiple
multiple = Multiple()
def test_load_image():
img = multiple.load_image('lion.jpg')
assert isinstance(img, np.ndarray)
def test_load_metadata():
meta = multiple.load_metadata('lion.jpg')
assert meta['MIMEType'] == 'image/jpeg'
def test_save_image():
img = multiple.load_image('lion.jpg')
path = multiple.save_image('lion.png', img)
assert path == 'lion.png'
def test_load_tiff_image():
multiple.upload_file('images/AnnualCrop_1896.tiff')
img = multiple.load_image('AnnualCrop_1896.tiff')
assert isinstance(img, np.ndarray)
def test_save_tiff_image():
img = multiple.load_image('AnnualCrop_1896.tiff')
path = multiple.save_image('test.tiff', img)
assert path == 'test.tiff'
|
|
""" Test pyfive's abililty to read multidimensional datasets. """
import os
import numpy as np
from numpy.testing import assert_array_equal
import pyfive
DIRNAME = os.path.dirname(__file__)
DATASET_COMPRESSED_HDF5_FILE = os.path.join(DIRNAME, 'compressed.hdf5')
def test_compressed_dataset():
with pyfive.File(DATASET_COMPRESSED_HDF5_FILE) as hfile:
# check data
dset1 = hfile['dataset1']
assert dset1.shape == (21, 16)
assert dset1.dtype == np.dtype('u2')
assert dset1.compression == 'gzip'
assert dset1.compression_opts == 4
assert dset1.shuffle is False
assert_array_equal(dset1[:], np.arange(21*16).reshape((21, 16)))
dset2 = hfile['dataset2']
assert dset2.shape == (21, 16)
assert dset2.dtype == np.dtype('i4')
assert dset2.compression == 'gzip'
assert dset2.compression_opts == 4
assert dset2.shuffle is True
assert_array_equal(dset2[:], np.arange(21*16).reshape((21, 16)))
dset3 = hfile['dataset3']
assert dset3.shape == (21, 16)
assert dset3.dtype == np.dtype('f8')
assert dset3.compression is None
assert dset3.compression_opts is None
assert dset3.shuffle is True
assert_array_equal(dset2[:], np.arange(21*16).reshape((21, 16)))
|
|
# coding: utf-8
# In[1]:
#After conversion and audio features have been extracted, rename the split channels according to the intensity values from OpenSmile or IBM ASR results (this is not consistently A: l, B: r in SWBD)
# In[6]:
import os
import sys
import numpy as np
from collections import defaultdict
# In[17]:
#rename all separate channel audio files and feature files based on the acoustic features (intensity?) from the csv files and the word timings
#the one with the highest volume wins
# In[18]:
def get_first_n_timings(timings_file,n=4):
f = open(timings_file)
c = 0
timings = []
for line in f:
s = line.split("\t")
start = float(s[0]); stop = float(s[1])
timings.append((start,stop))
c+=1
if c == n: break
f.close()
return timings
# In[19]:
def get_mean_intensity(filename,intervals=None):
"frameIndex; frameTime; pcm_RMSenergy_sma; pcm_LOGenergy_sma; F0final_sma; voicingFinalUnclipped_sma; F0raw_sma; pcm_intensity_sma; pcm_loudness_sma"
#print intervals
f = open(filename)
intensities = []
start = False
for line in f:
#print line
if start == False:
start = True; continue
s = line.split(";")
#print s
time = float(s[1])
#print time
if intervals[-1][1] < time:
break
for i in intervals:
if time > i[0] and time <= i[1]:
intensities.append(float(s[8])) #adding rmrs
break
f.close()
return np.average(intensities)
# In[24]:
rootdir = "../../../swbd"
audio_feature_files = os.listdir(rootdir+"/audio_features/")
wordtimingdir = rootdir + "/mapping_MS2SWDA/"
wordtimingfiles = os.listdir(wordtimingdir)
missed = []
pair = []
#NB this can be adjusted to rename the separate channel wav files too
for audio in sorted(audio_feature_files):
dialogue_number = audio[3:7]
print dialogue_number
if "A.csv" in audio or "B.csv" in audio: continue
#if not int(dialogue_number) in [2241, 3011]:
# continue
csv = rootdir + "/audio_features/" + audio
#wavfile = rootdir + "/wav/"+ audio.replace(".csv",".wav") #can add wavs too
pair.append((csv,None))
if len(pair)<2:
continue
assert pair[0][0].replace("_l","").replace("_r","") == pair[1][0].replace("_l","").replace("_r",""),\
pair[0][0].replace("_l","").replace("_r","")+ " " + pair[1][0].replace("_l","").replace("_r","")
#print "pair", pair
#we have the pair
#grab the mapping/timing files for A and B and check the first 3-4 words and get average intensity
#from each l/r file
#map each A and B accordingly to which file has the highest intensity for both first 5 words/
#sanity check is that one should win one, one should win the other
timing_files = filter(lambda x : dialogue_number in x, wordtimingfiles)
#print "timing", timing_files
if not len(timing_files)==2:
print dialogue_number, "missing"
missed.append(csv)
pair = []
continue
intensity_scores = defaultdict(list) #dict of ((wav/opensmilcsv)) -> [intensity at A's words, intensity at B's words]
for t in timing_files:
timings = get_first_n_timings(wordtimingdir+t,n=150)
speaker = t[4:5]
#print speaker
for p in pair:
#print "p", p
score = get_mean_intensity(p[0],intervals=timings)
intensity_scores[p[0]].append((speaker,score))
#print intensity_scores.items()
winner = {}
for p in intensity_scores.keys():
winner[p] = max(intensity_scores[p], key=lambda x: x[1])[0]
#print winner
if winner[pair[0][0]] == winner[pair[1][0]]:
missed.append(csv)
pair = []
continue
#do the renaming
for p in pair:
leftright = p[0][-6:-4]
speaker = winner[p[0]]
for filename in p:
if not filename: continue
c = "mv {} {}".format(filename,filename.replace(leftright,speaker))
print c
os.system(c)
pair= [] #reset
#h = raw_input()
#if h == "q": break
# In[25]:
print "missed"
print missed
|
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, OperatorSetIdProto
from typing import Sequence, Text, Tuple, List, Callable
from onnx import numpy_helper
import numpy as np
import struct
import onnx.version_converter
import unittest
class TestVersionConverter(unittest.TestCase):
def _converted(
self,
graph, # type: GraphProto
initial_version, # type: OperatorSetIdProto
target_version # type: int
): # type: (...) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test', opset_imports=[initial_version])
# print(type(orig_model))
converted_model = onnx.version_converter.convert_version(orig_model,
target_version)
checker.check_model(converted_model)
return converted_model
# Test 1: Backwards Incompatible Conversion: Reshape: 8 -> 2
def test_backwards_incompatible(self): # type: () -> None
def test(): # type: () -> None
nodes = [helper.make_node('Add', ["W", "Z"], ["shape"]),
helper.make_node('Reshape', ["X", "shape"], ["A"]),
helper.make_node('Add', ["A", "W"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
self._converted(graph, helper.make_operatorsetid("", 8), 2)
self.assertRaises(RuntimeError, test)
# Test 2: Backwards Compatible Conversion (No Adaptations): Add: 3 -> 2
def test_backwards_compatible(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 3), 2)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 2
# Test 3: Non-Existent Op Conversion: Cos: 8 -> 6
def test_non_existent_op(self): # type: () -> None
def test(): # type: () -> None
nodes = [helper.make_node('Cos', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
self._converted(graph, helper.make_operatorsetid("", 8), 6)
self.assertRaises(RuntimeError, test)
# Test Add Adapter: 8 -> 5
def test_add_8_5(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 5
# Test Add Adapter: 5 -> 8
def test_add_5_8(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Add"
assert converted_model.opset_import[0].version == 8
# Test Add Adapter: 5 -> 8, requiring insertion of an Unsqueeze node
def test_add_5_8_with_unsqueeze(self): # type: () -> None
nodes = [helper.make_node('Add', ["X1", "X2"], ["Y"], axis=0, broadcast=1)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Unsqueeze"
assert converted_model.graph.node[1].op_type == "Add"
assert converted_model.opset_import[0].version == 8
# Test Mul Adapter: 8 -> 5
def test_mul_8_5(self): # type: () -> None
nodes = [helper.make_node('Mul', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Mul"
assert converted_model.opset_import[0].version == 5
# Test Mul Adapter: 5 -> 8
def test_mul_5_8(self): # type: () -> None
nodes = [helper.make_node('Mul', ["X1", "X2"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Mul"
assert converted_model.opset_import[0].version == 8
# Test Gemm Adapter: 1 -> 8
def test_gemm_up(self): # type: () -> None
nodes = [helper.make_node('Gemm', ["A", "B", "C"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.opset_import[0].version == 8
# Test Gemm Adapter: 8 -> 1
def test_gemm_down(self): # type: () -> None
nodes = [helper.make_node('Gemm', ["A", "B", "C"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (5, 5,)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.opset_import[0].version == 1
# Test Relu Adapter: 5 -> 7
def test_relu_5_7(self): # type: () -> None
nodes = [helper.make_node('Relu', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 7)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Relu"
assert converted_model.opset_import[0].version == 7
# Test Relu Adapter: 7 -> 5
def test_relu_7_5(self): # type: () -> None
nodes = [helper.make_node('Relu', ["X"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 7), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Relu"
assert converted_model.opset_import[0].version == 5
# Test BatchNormalization Adapter: 8 -> 5
def test_batch_normalization_8_5(self): # type: () -> None
nodes = [helper.make_node('BatchNormalization', ["X", "scale", "B",
"mean", "var"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("var", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == 5
# Test BatchNormalization Adapter: 5 -> 8
def test_batch_normalization_5_8(self): # type: () -> None
nodes = [helper.make_node('BatchNormalization', ["X", "scale", "B",
"mean", "var"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("var", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == 8
# Test Concat Adapter: 3 -> 5
def test_concat_3_5(self): # type: () -> None
nodes = [helper.make_node('Concat', ["X1", "X2", "X3",
"X4", "X5"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X3", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X4", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X5", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 3), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Concat"
assert converted_model.opset_import[0].version == 5
# Test Concat Adapter: 5 -> 3
def test_concat_5_3(self): # type: () -> None
nodes = [helper.make_node('Concat', ["X1", "X2", "X3",
"X4", "X5"], ["Y"], axis=0)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X1", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X2", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X3", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X4", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X5", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 3)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Concat"
assert converted_model.opset_import[0].version == 3
# Test Reshape Adapter: 6 -> 4
def test_reshape_6_4(self): # type: () -> None
nodes = [helper.make_node('Constant', [], ["shape"],
value=helper.make_tensor("", TensorProto.INT64, [1],
[5])),
helper.make_node('Reshape', ["X", "shape"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 6), 4)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Reshape"
assert converted_model.opset_import[0].version == 4
# Test Reshape Adapter: 4 -> 6
def test_reshape_4_6(self): # type: () -> None
nodes = [helper.make_node('Reshape', ["X"], ["Y"], shape=[5])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 4), 6)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.node[1].op_type == "Reshape"
assert converted_model.opset_import[0].version == 6
# Test Sum Adapter: 7 -> 8
def test_sum_7_8(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 7), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 8
# Test Sum Adapter: 5 -> 8
def test_sum_5_8(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 5), 7)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 7
# Test Sum Adapter: 8 -> 5
def test_sum_8_5(self): # type: () -> None
nodes = [helper.make_node('Sum', ["data_0", "data_1", "data_2",
"data_3", "data_4"], ["sum"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_2", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_3", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("data_4", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("sum", TensorProto.FLOAT, (5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 5)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Sum"
assert converted_model.opset_import[0].version == 5
# Test AveragePool Adapter: 1 -> 8
def test_averagepool_up(self): # type: () -> None
nodes = [helper.make_node('AveragePool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5, 5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "AveragePool"
assert converted_model.opset_import[0].version == 8
# Test AveragePool Adapter: 8 -> 1
def test_averagepool_down(self): # type: () -> None
nodes = [helper.make_node('AveragePool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5, 5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "AveragePool"
assert converted_model.opset_import[0].version == 1
# Test Dropout Adapter: 1 -> 8
def test_dropout_up(self): # type: () -> None
nodes = [helper.make_node('Dropout', ["data"], ["output"], is_test=1)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("output", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Dropout"
assert converted_model.opset_import[0].version == 8
# Test Dropout Adapter: 8 -> 1
def test_dropout_down(self): # type: () -> None
nodes = [helper.make_node('Dropout', ["data"], ["output"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("data", TensorProto.FLOAT, (5, 5,))],
[helper.make_tensor_value_info("output", TensorProto.FLOAT, (5, 5,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Dropout"
assert converted_model.opset_import[0].version == 1
# Test Max Adapter: 7 -> 8
def test_max_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (2, 3, 4)
nodes = [onnx.helper.make_node(
"Max",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_max",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Max"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Min Adapter: 7 -> 8
def test_min_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (2, 3, 4)
nodes = [onnx.helper.make_node(
"Min",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_min",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Min"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Mean Adapter: 7 -> 8
def test_mean_7_8(self): # type: () -> None
from_opset = 7
to_opset = 8
data_type = TensorProto.FLOAT
data_shape = (3,)
nodes = [onnx.helper.make_node(
"Mean",
inputs=["X"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_mean",
[onnx.helper.make_tensor_value_info("X", data_type, data_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, data_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Mean"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test MaxPool Adapter: 1 -> 8
def test_maxpool_up(self): # type: () -> None
nodes = [helper.make_node('MaxPool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5, 5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 1), 8)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "MaxPool"
assert converted_model.opset_import[0].version == 8
# Test Upsample Adapter: 6 -> 7
def test_upsample_6_7(self): # type: () -> None
from_opset = 6
to_opset = 7
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Upsample",
inputs=["X"],
outputs=["Y"],
mode="nearest",
width_scale=3.0,
height_scale=2.0
)]
graph = helper.make_graph(
nodes,
"test_upsample_6_7",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert len(converted_model.graph.node) == 1
assert converted_model.graph.node[0].op_type == "Upsample"
attribute_names = [attr.name for attr in converted_model.graph.node[0].attribute]
assert 'scales' in attribute_names
assert 'width_scale' not in attribute_names
assert 'height_scale' not in attribute_names
assert converted_model.opset_import[0].version == to_opset
# Test MaxPool Adapter: 8 -> 1
def test_maxpool_down(self): # type: () -> None
nodes = [helper.make_node('MaxPool', ["X"], ["Y"], kernel_shape=[1, 1])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5, 5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 8), 1)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "MaxPool"
assert converted_model.opset_import[0].version == 1
# Test BatchNormalization Adapter: 8 -> 9
def test_batch_normalization_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [helper.make_node(
'BatchNormalization',
inputs=["x", "s", "bias", "mean", "var"],
outputs=["y"]
)]
input_shape = (1, 2, 1, 3)
x = helper.make_tensor_value_info("x", data_type, input_shape)
scale = helper.make_tensor_value_info("s", data_type, [input_shape[1]])
B = helper.make_tensor_value_info("bias", data_type, [input_shape[1]])
mean = helper.make_tensor_value_info("mean", data_type, [input_shape[1]])
var = helper.make_tensor_value_info("var", data_type, [input_shape[1]])
y = helper.make_tensor_value_info("y", data_type, input_shape)
graph = helper.make_graph(
nodes,
"test_batchnormalization_8_9",
[x, scale, B, mean, var],
[y]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == to_opset
# Test BatchNormalization Adapter: 9 -> 8
def test_batchnormalization_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
'BatchNormalization',
inputs=['X', 'scale', 'B', 'mean', 'var'],
outputs=['Y'],
)]
input_shape = (2, 3, 4, 5)
x = onnx.helper.make_tensor_value_info("X", data_type, input_shape)
scale = onnx.helper.make_tensor_value_info("scale", data_type, [input_shape[1]])
B = onnx.helper.make_tensor_value_info("B", data_type, [input_shape[1]])
mean = onnx.helper.make_tensor_value_info("mean", data_type, [input_shape[1]])
var = onnx.helper.make_tensor_value_info("var", data_type, [input_shape[1]])
y = onnx.helper.make_tensor_value_info("Y", data_type, input_shape)
graph = onnx.helper.make_graph(
nodes, "test_batchnormalization", [x, scale, B, mean, var], [y]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "BatchNormalization"
assert converted_model.opset_import[0].version == to_opset
# Test Constant Adapter: 8 -> 9
def test_constant_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
output_shape = [2, 3, 4]
output_value = np.arange(24)
nodes = [helper.make_node(
"Constant",
inputs=[],
outputs=["Y"],
value=helper.make_tensor("", data_type, output_shape, output_value))]
graph = helper.make_graph(
nodes,
"test_constant",
[],
[onnx.helper.make_tensor_value_info("Y", data_type, output_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Constant Adapter: 9 -> 8
def test_constant_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
output_shape = [2, 3, 4]
output_value = np.arange(24)
nodes = [helper.make_node(
"Constant",
inputs=[],
outputs=["Y"],
value=helper.make_tensor("", data_type, output_shape, output_value))]
graph = helper.make_graph(
nodes,
"test_constant",
[],
[onnx.helper.make_tensor_value_info("Y", data_type, output_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Flatten Adapter: 8 -> 9
def test_flatten_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Flatten",
inputs=["X"],
outputs=["Y"],
axis=1
)]
graph = helper.make_graph(
nodes,
"test_flatten",
[onnx.helper.make_tensor_value_info("X", data_type, [2, 3, 4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [2, 12])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Flatten"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Flatten Adapter: 9 -> 8
def test_flatten_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Flatten",
inputs=["X"],
outputs=["Y"],
axis=1
)]
graph = helper.make_graph(
nodes,
"test_flatten",
[onnx.helper.make_tensor_value_info("X", data_type, [2, 3, 4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [2, 12])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[1].op_type == "Flatten"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test PRelu Adapter: 8 -> 9
def test_prelu_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"PRelu",
inputs=["X", "Slope"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_prelu",
[onnx.helper.make_tensor_value_info("X", data_type, input_shape),
onnx.helper.make_tensor_value_info("Slope", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "PRelu"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test PRelu Adapter: 9 -> 8
def test_prelu_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"PRelu",
inputs=["X", "Slope"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_prelu",
[onnx.helper.make_tensor_value_info("X", data_type, input_shape),
onnx.helper.make_tensor_value_info("Slope", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", data_type, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "PRelu"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Greater Adapter: 8 -> 9
def test_greater_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Greater",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_greater",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Greater"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Greater Adapter: 9 -> 8
def test_greater_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Greater",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_greater",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "Greater"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Less Adapter: 8 -> 9
def test_less_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Less",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_less",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Less"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test Less Adapter: 9 -> 8
def test_less_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Less",
inputs=["X1", "X2"],
outputs=["Y"]
)]
input_shape = [2, 3, 4]
graph = helper.make_graph(
nodes,
"test_less",
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "Less"
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
assert converted_model.opset_import[0].version == to_opset
# Test MatMul Adapter: 8 -> 9
def test_matmul_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"MatMul",
inputs=["X1", "X2"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_matmul",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "MatMul"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test MatMul Adapter: 9 -> 8
def test_matmul_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"MatMul",
inputs=["X1", "X2"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_matmul",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[2].op_type == "MatMul"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Gemm Adapter: 8 -> 9
def test_gemm_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Gemm",
inputs=["X1", "X2", "X3"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_gemm",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3]),
onnx.helper.make_tensor_value_info("X3", data_type, [3, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Gemm"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Gemm Adapter: 9 -> 8
def test_gemm_9_8(self): # type: () -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.UINT64
nodes = [onnx.helper.make_node(
"Gemm",
inputs=["X1", "X2", "X3"],
outputs=["Y"]
)]
graph = helper.make_graph(
nodes,
"test_gemm",
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3]),
onnx.helper.make_tensor_value_info("X3", data_type, [3, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[3].op_type == "Gemm"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
assert converted_model.opset_import[0].version == to_opset
# Test Upsample Adapter: 8 -> 9
def test_upsample_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Upsample",
inputs=["X"],
outputs=["Y"],
mode="nearest",
scales=[1.0, 1.0, 2.0, 3.0],
)]
graph = helper.make_graph(
nodes,
"test_upsample_8_9",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert len(converted_model.graph.node) == 2
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.node[1].op_type == "Upsample"
assert len(converted_model.graph.node[1].attribute) == 1
assert converted_model.graph.node[1].attribute[0].name == "mode"
assert converted_model.opset_import[0].version == to_opset
# Test Helper for Upsample Adapter: 9 -> 8
def helper_upsample_with_initializer(self, raw_scale=False): # type: (bool) -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
nodes = [onnx.helper.make_node(
"Upsample",
inputs=["X", "Scales"],
outputs=["Y"],
mode="nearest"
)]
scale_value = [1.0, 1.0, 2.0, 3.0]
scale_tensor = onnx.helper.make_tensor("Scales", onnx.TensorProto.FLOAT, [4], bytes(struct.pack("4f", *scale_value)) if raw_scale else scale_value, raw_scale)
graph = helper.make_graph(
nodes,
"test_upsample",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2]),
onnx.helper.make_tensor_value_info("Scales", data_type, [4])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])],
[scale_tensor])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Upsample"
assert len(converted_model.graph.initializer) == 0
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "scales"
assert converted_model.opset_import[0].version == to_opset
# Test Helper for Upsample Adapter: 9 -> 8
def helper_upsample_with_constant(self, raw_scale=False): # type: (bool) -> None
from_opset = 9
to_opset = 8
data_type = TensorProto.FLOAT
scale_value = [1.0, 1.0, 2.0, 3.0]
scale_tensor = onnx.helper.make_tensor("const_value", onnx.TensorProto.FLOAT, [4], bytes(struct.pack("4f", *scale_value)) if raw_scale else scale_value, raw_scale)
nodes = [
onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['Constant_Output'],
value=scale_tensor),
onnx.helper.make_node(
"Upsample",
inputs=["X", "Constant_Output"],
outputs=["Y"],
mode="nearest")]
graph = helper.make_graph(
nodes,
"test_upsample",
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])],
value_info=[onnx.helper.make_tensor_value_info("Constant_Output", data_type, [4])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert len(converted_model.graph.node) == 1
assert converted_model.graph.node[0].op_type == "Upsample"
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "scales"
assert converted_model.opset_import[0].version == to_opset
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_constant_node_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=False)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_initializer_9_8(self): # type: () -> None
self.helper_upsample_with_initializer(raw_scale=False)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_raw_initializer_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=True)
# Test Upsample Adapter: 9 -> 8
def test_upsample_with_raw_constant_node_9_8(self): # type: () -> None
self.helper_upsample_with_constant(raw_scale=True)
# Test Scan Adapter: 8 -> 9
def test_scan_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type = TensorProto.FLOAT
node1 = onnx.helper.make_node("Add", inputs=["sum_in", "next"], outputs=["sum_out"],)
node2 = onnx.helper.make_node("Identity", inputs=["sum_out"], outputs=["scan_out"],)
g = onnx.helper.make_graph(
[node1, node2],
"scan_body",
[onnx.helper.make_tensor_value_info("sum_in", data_type, [2]),
onnx.helper.make_tensor_value_info("next", data_type, [2])],
[onnx.helper.make_tensor_value_info("sum_out", data_type, [2]),
onnx.helper.make_tensor_value_info("scan_out", data_type, [2])]
)
no_sequence_lens = '' # optional input, not supplied
nodes = [onnx.helper.make_node(
"Scan",
inputs=[no_sequence_lens, "initial", "x"],
outputs=["y", "z"],
body=g,
num_scan_inputs=1,
)]
initial = onnx.helper.make_tensor_value_info("initial", data_type, [1, 2])
x = onnx.helper.make_tensor_value_info("x", data_type, [1, 3, 2])
y = onnx.helper.make_tensor_value_info("y", data_type, [1, 2])
z = onnx.helper.make_tensor_value_info("z", data_type, [1, 3, 2])
graph = onnx.helper.make_graph(
nodes, "test_scan_8_9", [initial, x], [y, z]
)
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Scan"
assert converted_model.opset_import[0].version == to_opset
# Test Cast Adapter: 8 -> 9
def test_cast_8_9(self): # type: () -> None
from_opset = 8
to_opset = 9
data_type_from = TensorProto.FLOAT
data_type_to = TensorProto.UINT32
nodes = [onnx.helper.make_node(
"Cast",
inputs=["X"],
outputs=["Y"],
to=TensorProto.UINT32
)]
graph = helper.make_graph(
nodes,
"test_cast",
[onnx.helper.make_tensor_value_info("X", data_type_from, [2, 3])],
[onnx.helper.make_tensor_value_info("Y", data_type_to, [2, 3])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "Cast"
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type_to
assert converted_model.opset_import[0].version == to_opset
#Test Split Adapter: 13 -> 12
def test_split_13_12(self): # type: () -> None
nodes = [helper.make_node('Constant', [], ["split"],
value=helper.make_tensor("", TensorProto.INT64, [2],
[2, 3])),
helper.make_node('Split', ["X", "split"], ["Y1", "Y2"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y1", TensorProto.FLOAT, (2,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (3,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 13), 12)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Split"
assert converted_model.opset_import[0].version == 12
# Test Split Adapter: 12 -> 13
def test_split_12_13(self): # type: () -> None
nodes = [helper.make_node('Split', ["X"], ["Y1", "Y2"], split=[2, 3])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y1", TensorProto.FLOAT, (2,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (3,))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 12), 13)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.node[1].op_type == "Split"
assert converted_model.opset_import[0].version == 13
# Test AxesInputToAttribute Adapter: 13 -> 12
def test_axes_input_to_attr_13_12(self): # type: () -> None
nodes = [helper.make_node('Constant', [], ["axes"],
value=helper.make_tensor("", TensorProto.INT64, [1],
[0])),
helper.make_node('ReduceSum', ["X", "axes"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 13), 12)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "ReduceSum"
assert converted_model.opset_import[0].version == 12
# Test AxesAttributeToInput Adapter: 12 -> 13
def test_axes_attr_to_input_12_13(self): # type: () -> None
nodes = [helper.make_node('ReduceSum', ["X"], ["Y"], axes=[0])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 12), 13)
# Assert equality of graph and converted_model
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.opset_import[0].version == 13
# Test Slice Adapter: 9 -> 10
def test_slice_9_10(self): # type: () -> None
nodes = [helper.make_node('Slice', ["X"], ["Y"],
axes=[0, 1],
starts=[0, 0],
ends=[3, 10])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (20, 10, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 10, 5))])
converted_model = self._converted(graph, helper.make_operatorsetid(
"", 9), 10)
assert converted_model.graph.node[0].op_type == "Constant"
assert converted_model.graph.node[1].op_type == "Constant"
assert converted_model.graph.node[2].op_type == "Constant"
assert converted_model.graph.node[3].op_type == "Slice"
assert converted_model.opset_import[0].version == 10
assert len(converted_model.graph.node[3].input) == 4
assert len(converted_model.graph.node[3].attribute) == 0
# Test RNN Adapter: 13 -> 14
def test_rnn_13_14(self): # type: () -> None
from_opset = 13
to_opset = 14
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'RNN',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size
)]
graph = helper.make_graph(
nodes,
"test_rnn",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 2 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "RNN"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "layout"
# Test GRU Adapter: 13 -> 14
def test_gru_13_14(self): # type: () -> None
from_opset = 13
to_opset = 14
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'GRU',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size
)]
graph = helper.make_graph(
nodes,
"test_gru",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, 3 * hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, 3 * hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 6 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "GRU"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "layout"
# Test LSTM Adapter: 13 -> 14
def test_lstm_13_14(self): # type: () -> None
from_opset = 13
to_opset = 14
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'LSTM',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size
)]
graph = helper.make_graph(
nodes,
"test_lstm",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, 4 * hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, 4 * hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 8 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "LSTM"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 2
assert converted_model.graph.node[0].attribute[1].name == "layout"
# Test RNN Adapter: 14 -> 13
def test_rnn_14_13(self): # type: () -> None
from_opset = 14
to_opset = 13
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'RNN',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size,
layout=0,
)]
graph = helper.make_graph(
nodes,
"test_rnn",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 2 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "RNN"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 1
# Test GRU Adapter: 14 -> 13
def test_gru_14_13(self): # type: () -> None
from_opset = 14
to_opset = 13
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'GRU',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size,
layout=0,
)]
graph = helper.make_graph(
nodes,
"test_gru",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, 3 * hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, 3 * hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 6 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "GRU"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 1
# Test LSTM Adapter: 14 -> 13
def test_lstm_14_13(self): # type: () -> None
from_opset = 14
to_opset = 13
data_type = TensorProto.FLOAT
seq_length = 1
batch_size = 2
input_size = 3
num_directions = 1
hidden_size = 5
nodes = [onnx.helper.make_node(
'LSTM',
inputs=['X', 'W', 'R'],
outputs=['', 'Y_h'],
hidden_size=hidden_size,
layout=0,
)]
graph = helper.make_graph(
nodes,
"test_lstm",
[onnx.helper.make_tensor_value_info("X", data_type, [seq_length, batch_size, input_size]),
onnx.helper.make_tensor_value_info("W", data_type, [num_directions, 4 * hidden_size, input_size]),
onnx.helper.make_tensor_value_info("R", data_type, [num_directions, 4 * hidden_size, hidden_size]),
onnx.helper.make_tensor_value_info("B", data_type, [num_directions, 8 * hidden_size])],
[onnx.helper.make_tensor_value_info("Y_h", data_type, [num_directions, batch_size, hidden_size])])
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
assert converted_model.graph.node[0].op_type == "LSTM"
assert converted_model.opset_import[0].version == to_opset
assert len(converted_model.graph.node[0].attribute) == 1
# Test that subgraphs are converted
def test_if_subgraph_10_11(self): # type: () -> None
from_opset = 10
to_opset = 11
data_type = TensorProto.FLOAT
data_shape = [2]
subg1_node = [onnx.helper.make_node(
'Clip',
inputs=['sub_in'],
outputs=['sub_out'],
min=2.0,
max=3.0
)]
subg1_input = [
onnx.helper.make_tensor_value_info('sub_in', data_type, data_shape)
]
subg1_output = [
onnx.helper.make_tensor_value_info('sub_out', data_type, data_shape)
]
subg1 = helper.make_graph(subg1_node, "then_g", subg1_input, subg1_output)
subg2_node = [onnx.helper.make_node(
'Clip',
inputs=['sub_in'],
outputs=['sub_out'],
min=2.0,
max=3.0
)]
subg2_input = [
onnx.helper.make_tensor_value_info('sub_in', data_type, data_shape)
]
subg2_output = [
onnx.helper.make_tensor_value_info('sub_out', data_type, data_shape)
]
subg2 = helper.make_graph(subg2_node, "then_g", subg2_input, subg2_output)
node = [onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['out'],
then_branch=subg1,
else_branch=subg2
)]
input = [
onnx.helper.make_tensor_value_info('cond', TensorProto.BOOL, [])
]
output = [
onnx.helper.make_tensor_value_info('out', data_type, data_shape)
]
init = [helper.make_tensor('sub_in', data_type, data_shape, [4.0, 5.0])]
graph = helper.make_graph(node, 'test_subgraphs', input, output, init)
converted = self._converted(graph, helper.make_operatorsetid('', from_opset), to_opset)
assert converted.graph.node[0].op_type == 'If'
assert converted.opset_import[0].version == to_opset
assert converted.graph.node[0].attribute[0].g.node[2].op_type == 'Clip'
assert len(converted.graph.node[0].attribute[0].g.node[2].attribute) == 0
assert converted.graph.node[0].attribute[1].g.node[2].op_type == 'Clip'
assert len(converted.graph.node[0].attribute[1].g.node[2].attribute) == 0
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
import mmap
import os
import struct
import sys
from collections import OrderedDict
import numpy as np
class SigprocFile:
"""
Simple functions for reading sigproc filterbank files from python. Not all possible features are implemented.
Original Source from Paul Demorest's [pysigproc.py](https://github.com/demorest/pysigproc/blob/master/pysigproc.py).
Args:
fp (str): file name
copy_hdr (bool): copy header from another SigprocFile class object
Attributes:
rawdatafile (str): Raw data file
source_name (str): Source Name
machine_id (int) : Machine ID
barycentric (int): If 1 the data is barycentered
pulsarcentric (int): Is the data in pulsar's frame of reference?
src_raj (float): RA of the source (HHMMSS.SS)
src_dej (float): Dec of the source (DDMMSS.SS)
az_start (float): Telescope Azimuth (degrees)
za_start (float): Telescope Zenith Angle (degrees)
fch1 (float): Frequency of first channel (MHz))
foff (float): Channel bandwidth (MHz)
nchans (int): Number of channels
nbeams (int): Number of beams in the rcvr.
ibeam (int): Beam number
nbits (int): Number of bits the data are recorded in.
tstart (float): Start MJD of the data
tsamp (float): Sampling interval (seconds)
nifs (int): Number of IFs in the data.
"""
# List of types
_type = OrderedDict()
_type["rawdatafile"] = "string"
_type["source_name"] = "string"
_type["machine_id"] = "int"
_type["barycentric"] = "int"
_type["pulsarcentric"] = "int"
_type["telescope_id"] = "int"
_type["src_raj"] = "double"
_type["src_dej"] = "double"
_type["az_start"] = "double"
_type["za_start"] = "double"
_type["data_type"] = "int"
_type["fch1"] = "double"
_type["foff"] = "double"
_type["nchans"] = "int"
_type["nbeams"] = "int"
_type["ibeam"] = "int"
_type["nbits"] = "int"
_type["tstart"] = "double"
_type["tsamp"] = "double"
_type["nifs"] = "int"
def __init__(self, fname):
# init all items to None
for k in list(self._type.keys()):
setattr(self, k, None)
if os.path.isfile(fname) and os.stat(fname).st_size != 0:
self.fp = open(fname, 'rb')
self.read_header()
self._mmdata = mmap.mmap(
self.fp.fileno(), 0, mmap.MAP_PRIVATE, mmap.PROT_READ
)
@classmethod
def new_file(cls, fname, header):
"""
Create a new filterbank file
:param str fname: path to file
:param dict header: sigproc header
:return: Sigprocfile
"""
# init the file
sigprocfile = SigprocFile(fname)
# create the header
for k, v in header.items():
# key should be in list of known keys
assert k in sigprocfile._type.keys()
# set the value as attribute
setattr(sigprocfile, k, v)
# write the header
with open(fname, 'wb') as fp:
sigprocfile.filterbank_header(fout=fp)
# try to close the file in case it already existed and was opened
try:
sigprocfile.fp.close()
except AttributeError:
pass
return SigprocFile(fname)
@staticmethod
def send_string(val, f=sys.stdout):
"""
Encode and write a string.
Args:
val: value to encode
f: file object to write the value into
"""
val = val.encode()
f.write(struct.pack("i", len(val)))
f.write(val)
def send_num(self, name, val, f=sys.stdout):
"""
Encode a number
Args:
name: name to encode
val: value to encode
f: file object to write the value into
"""
self.send_string(name, f)
f.write(struct.pack(self._type[name][0], val))
def send(self, name, f=sys.stdout):
"""
Encode stuff
Args:
name: name to encode
f: file object to encode the value into
"""
if not hasattr(self, name):
return
if getattr(self, name) is None:
return
if self._type[name] == "string":
self.send_string(name, f)
self.send_string(getattr(self, name), f)
else:
self.send_num(name, getattr(self, name), f)
def filterbank_header(self, fout=sys.stdout):
"""
Write the filterbank header
Args:
fout: output file object
"""
self.send_string("HEADER_START", f=fout)
for k in list(self._type.keys()):
self.send(k, fout)
self.send_string("HEADER_END", f=fout)
@staticmethod
def get_string(fp):
"""
Read the next sigproc-format string in the file.
Args:
fp: file object to read stuff from.
"""
nchar = struct.unpack("i", fp.read(4))[0]
if nchar > 80 or nchar < 1:
return None, 0
out = fp.read(nchar)
return out, nchar + 4
def read_header(self):
"""
Read the header
"""
self.hdrbytes = 0
(s, n) = self.get_string(self.fp)
if s != b"HEADER_START":
self.hdrbytes = 0
return None
self.hdrbytes += n
while True:
(s, n) = self.get_string(self.fp)
s = s.decode()
self.hdrbytes += n
if s == "HEADER_END":
return
if self._type[s] == "string":
(v, n) = self.get_string(self.fp)
self.hdrbytes += n
setattr(self, s, v)
else:
datatype = self._type[s][0]
datasize = struct.calcsize(datatype)
val = struct.unpack(datatype, self.fp.read(datasize))[0]
setattr(self, s, val)
self.hdrbytes += datasize
@property
def dtype(self):
"""
Returns: dtype of the data
"""
if self.nbits == 8:
return np.uint8
elif self.nbits == 16:
return np.uint16
elif self.nbits == 32:
return np.float32
else:
raise RuntimeError("nbits=%d not supported" % self.nbits)
@property
def bytes_per_spectrum(self):
"""
Returns: bytes per spectrum
"""
return self.nbits * self.nchans * self.nifs / 8
def nspectra(self):
"""
Returns: Number of specrta in the file
"""
return (self._mmdata.size() - self.hdrbytes) / self.bytes_per_spectrum
def native_nspectra(self):
"""
Native number of spectra in the file. This will be made a property so that it can't be overwritten
Returns:Number of specta in the file
"""
return (self._mmdata.size() - self.hdrbytes) / self.bytes_per_spectrum
def get_data(self, nstart, nsamp):
"""
Return nsamp time slices starting at nstart.
Args:
nstart (int): Starting spectra number to start reading from.
nsamp (int): Number of spectra to read.
Returns:
np.ndarray: data.
"""
bstart = int(nstart) * self.bytes_per_spectrum
nbytes = int(nsamp) * self.bytes_per_spectrum
b0 = self.hdrbytes + bstart
b1 = b0 + nbytes
data = np.frombuffer(
self._mmdata[int(b0): int(b1)], dtype=self.dtype
).reshape((-1, self.nifs, self.nchans))
return data[:, 0, :]
def unpack(self, nstart, nsamp):
"""
Unpack nsamp time slices starting at nstart to 32-bit floats.
Args:
nstart (int): Starting spectra number to start reading from.
nsamp (int): Number of spectra to read.
Returns:
np.ndarray: Data
"""
if self.nbits >= 8:
return self.get_data(nstart, nsamp).astype(np.float32)
bstart = int(nstart) * self.bytes_per_spectrum
nbytes = int(nsamp) * self.bytes_per_spectrum
b0 = self.hdrbytes + bstart
b1 = b0 + nbytes
# reshape with the frequency axis reduced by packing factor
fac = 8 / self.nbits
d = np.frombuffer(self._mmdata[b0:b1], dtype=np.uint8).reshape(
(nsamp, self.nifs, self.nchans / fac)
)
unpacked = np.empty((nsamp, self.nifs, self.nchans), dtype=np.float32)
for i in range(fac):
mask = 2 ** (self.nbits * i) * (2 ** self.nbits - 1)
unpacked[..., i::fac] = (d & mask) / 2 ** (i * self.nbits)
return unpacked
def native_tsamp(self):
"""
This will be made a property so that it can't be overwritten.
Returns:
Native sampling time of the filterbank.
"""
return self.tsamp
def native_foff(self):
"""
This will be made a property so that it can't be overwritten.
Returns:
Native channel bandwidth of the filterbank.
"""
return self.foff
def native_nchans(self):
"""
This will be made a property so that it can't be overwritten.
Returns:
Native number of channels in the filterbank.
"""
return self.nchans
def write_header(self, filename):
"""
Write the filterbank header
Args:
filename (str): name of the filterbank file
"""
with open(filename, "wb") as f:
self.filterbank_header(fout=f)
return None
@staticmethod
def append_spectra(spectra, filename):
"""
Append spectra to the end of the file
Args:
spectra (np.ndarray) : np array of the data to be dumped into the filterbank file
filename (str): name of the filterbank file
"""
with open(filename, "ab") as f:
f.seek(0, os.SEEK_END)
f.write(spectra.flatten().astype(spectra.dtype))
|
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import shift_dim
class NormReLU(nn.Module):
def __init__(self, channels, relu=True, affine=True):
super().__init__()
self.relu = relu
self.norm = nn.BatchNorm3d(channels)
def forward(self, x):
x_float = x.float()
x_float = self.norm(x_float)
x = x_float.type_as(x)
if self.relu:
x = F.relu(x, inplace=True)
return x
class ResidualBlock(nn.Module):
def __init__(self, in_channels, filters, stride, use_projection=False):
super().__init__()
if use_projection:
self.proj_conv = nn.Conv3d(in_channels, filters, kernel_size=1,
stride=stride, bias=False)
self.proj_bnr = NormReLU(filters, relu=False)
self.conv1 = nn.Conv3d(in_channels, filters, kernel_size=3,
stride=stride, bias=False, padding=1)
self.bnr1 = NormReLU(filters)
self.conv2 = nn.Conv3d(filters, filters, kernel_size=3,
stride=1, bias=False, padding=1)
self.bnr2 = NormReLU(filters)
self.use_projection = use_projection
def forward(self, x):
shortcut = x
if self.use_projection:
shortcut = self.proj_bnr(self.proj_conv(x))
x = self.bnr1(self.conv1(x))
x = self.bnr2(self.conv2(x))
return F.relu(x + shortcut, inplace=True)
class BlockGroup(nn.Module):
def __init__(self, in_channels, filters, blocks, stride):
super().__init__()
self.start_block = ResidualBlock(in_channels, filters, stride, use_projection=True)
in_channels = filters
self.blocks = []
for _ in range(1, blocks):
self.blocks.append(ResidualBlock(in_channels, filters, 1))
self.blocks = nn.Sequential(*self.blocks)
def forward(self, x):
x = self.start_block(x)
x = self.blocks(x)
return x
class ResNet(nn.Module):
def __init__(self, in_channels, layers, width_multiplier,
stride, resnet_dim=240, cifar_stem=True):
super().__init__()
self.width_multiplier = width_multiplier
self.resnet_dim = resnet_dim
assert all([int(math.log2(d)) == math.log2(d) for d in stride]), stride
n_times_downsample = np.array([int(math.log2(d)) for d in stride])
if cifar_stem:
self.stem = nn.Sequential(
nn.Conv3d(in_channels, 64 * width_multiplier,
kernel_size=3, padding=1, bias=False),
NormReLU(64 * width_multiplier)
)
else:
stride = tuple([2 if d > 0 else 1 for d in n_times_downsample])
n_times_downsample -= 1 # conv
n_times_downsample[-2:] = n_times_downsample[-2:] - 1 # pooling
self.stem = nn.Sequential(
nn.Conv3d(in_channels, 64 * width_multiplier,
kernel_size=7, stride=stride, bias=False,
padding=3),
NormReLU(64 * width_multiplier),
nn.MaxPool3d(kernel_size=3, stride=(1, 2, 2), padding=1)
)
self.group1 = BlockGroup(64 * width_multiplier, 64 * width_multiplier,
blocks=layers[0], stride=1)
stride = tuple([2 if d > 0 else 1 for d in n_times_downsample])
n_times_downsample -= 1
self.group2 = BlockGroup(64 * width_multiplier, 128 * width_multiplier,
blocks=layers[1], stride=stride)
stride = tuple([2 if d > 0 else 1 for d in n_times_downsample])
n_times_downsample -= 1
self.group3 = BlockGroup(128 * width_multiplier, 256 * width_multiplier,
blocks=layers[2], stride=stride)
stride = tuple([2 if d > 0 else 1 for d in n_times_downsample])
n_times_downsample -= 1
self.group4 = BlockGroup(256 * width_multiplier, resnet_dim,
blocks=layers[3], stride=stride)
assert all([d <= 0 for d in n_times_downsample]), f'final downsample {n_times_downsample}'
def forward(self, x):
x = self.stem(x)
x = self.group1(x)
x = self.group2(x)
x = self.group3(x)
x = self.group4(x)
x = shift_dim(x, 1, -1)
return x
def resnet34(width_multiplier, stride, cifar_stem=True, resnet_dim=240):
return ResNet(3, [3, 4, 6, 3], width_multiplier,
stride, cifar_stem=cifar_stem, resnet_dim=resnet_dim)
|
|
import random
import os
import pickle
import librosa as lb
import numpy as np
import musdb
import yaml
# ignore warning about unsafe loaders in pyYAML 5.1 (used in musdb)
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({'YAMLLoadWarning': False})
def musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr,
frame_length):
"""
This function splits all MUSDB tracks in frames of a given length, downsamples them to a given sampling rate,
converts them to mono and saves each frame as .npy-file. It randomly splits the training partition into a training
(80 tracks) and a validation (20 tracks) set.
"""
path_to_save_train_set = os.path.join(path_to_save_data, 'train')
path_to_save_val_set = os.path.join(path_to_save_data, 'val')
path_to_save_test_set = os.path.join(path_to_save_data, 'test')
if not os.path.exists(path_to_save_data):
os.makedirs(path_to_save_data)
if not os.path.exists(path_to_save_train_set):
os.makedirs(path_to_save_train_set)
if not os.path.exists(path_to_save_val_set):
os.makedirs(path_to_save_val_set)
if not os.path.exists(path_to_save_test_set):
os.makedirs(path_to_save_test_set)
# load the musdb train and test partition with the parser musdb (https://github.com/sigsep/sigsep-mus-db)
musdb_corpus = musdb.DB(root_dir=path_to_musdb)
training_tracks = musdb_corpus.load_mus_tracks(subsets=['train'])
test_tracks = musdb_corpus.load_mus_tracks(subsets=['test'])
# randomly select 20 tracks from the training partition that will be the validation set
all_idx = list(np.arange(0, 100))
random.seed(1)
val_idx = random.sample(population=all_idx, k=20) # track indices of validation set tracks
train_idx = [idx for idx in all_idx if idx not in val_idx] # track indices of training set tracks
# process and save training set
train_file_list = []
for idx in train_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_train_set, file_name), frames[:, n])
train_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_train_set, "train_file_list.pickle"), "wb")
pickle.dump(train_file_list, pickle_out)
pickle_out.close()
# process and save validation set
val_file_list = []
for idx in val_idx:
track = training_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_val_set, file_name), frames[:, n])
val_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_val_set, "val_file_list.pickle"), "wb")
pickle.dump(val_file_list, pickle_out)
pickle_out.close()
# process and save test set
test_file_list = []
for idx in range(50):
track = test_tracks[idx]
track_name = track.name.split('-')
track_name = track_name[0][0:6] + "_" + track_name[1][1:6]
track_name = track_name.replace(" ", "_")
track_audio = track.targets['accompaniment'].audio
track_audio_mono = lb.to_mono(track_audio.T)
track_audio_mono_resampled = lb.core.resample(track_audio_mono, track.rate, target_sr)
frames = lb.util.frame(y=track_audio_mono_resampled, frame_length=frame_length, hop_length=frame_length)
number_of_frames = frames.shape[1]
for n in range(number_of_frames):
file_name = track_name + '_{}.npy'.format(n)
np.save(os.path.join(path_to_save_test_set, file_name), frames[:, n])
test_file_list.append(file_name)
pickle_out = open(os.path.join(path_to_save_test_set, "test_file_list.pickle"), "wb")
pickle.dump(test_file_list, pickle_out)
pickle_out.close()
if __name__ == '__main__':
path_to_musdb = '../Datasets/MUSDB18'
path_to_save_data = '../Datasets/MUSDB_accompaniments'
target_sr = 16000
frame_length = 131584
musdb_pre_processing(path_to_musdb, path_to_save_data, target_sr=target_sr, frame_length=frame_length)
|
|
import numpy as np
# This is for Scotland
bands = [12500.0, 14549.0, 24944.0, 43430.0, 150000.0, 1000000.0]
rates = [ 0.0, 19.0, 20.0, 21.0, 41.0, 46.0]
def gross_to_net(gross_income, bands, rates):
gross_income = float(gross_income)
chunks = []
for i in range(len(bands)):
if i == 0:
chunk = bands[i]
else:
chunk = bands[i] - bands[i-1]
chunks.append(chunk)
my_chunks = []
current_taxable_gross = 0
for chunk in chunks:
current_taxable_gross += chunk
if current_taxable_gross <= gross_income:
my_chunks.append(chunk)
previous_taxable_gross = current_taxable_gross
else:
my_current_chunk = max(gross_income - previous_taxable_gross, 0.0)
my_chunks.append(my_current_chunk)
previous_taxable_gross = current_taxable_gross
income_taxes = np.dot(my_chunks, np.divide(rates, 100.0))
# def gross_to_net(gross_income):
print(chunks)
print(my_chunks)
print(sum(my_chunks))
print(income_taxes)
income = gross_income - income_taxes
print(income)
weekly_earnings = gross_income / 52.0
print(weekly_earnings)
national_insurance_weekly = 0
if weekly_earnings <= 166.0:
national_insurance_weekly = 0.0
elif weekly_earnings <= 962.0:
national_insurance_weekly = (weekly_earnings - 166.0) * 0.12
elif weekly_earnings > 962.0:
national_insurance_weekly = (962.0 - 166.0) * 0.12 + (weekly_earnings - 962.0) * 0.02
print(national_insurance_weekly)
national_insurance = 52.0 * national_insurance_weekly
print(national_insurance)
net_income = income - national_insurance
print(net_income)
# This exclude national insurance because it is not paid after retirement
def net_to_gross(net_income, bands, rates):
net_income = float(net_income)
chunks = []
for i in range(len(bands)):
if i == 0:
chunk = bands[i]
else:
chunk = bands[i] - bands[i-1]
chunks.append(chunk)
# print(chunks)
net_chunks = []
for i, chunk in enumerate(chunks):
net_chunk = chunk * (1.0 - rates[i] / 100.0)
net_chunks.append(net_chunk)
# print(net_chunks)
gross_income = 0
current_net = 0
previous_net = 0
for net_chunk, chunk in zip(net_chunks, chunks):
current_net += net_chunk
if net_income >= current_net:
# ratio = (net_income - previous_net) / net_chunk
gross_income += chunk
previous_net = current_net
else:
ratio = (net_income - previous_net) / net_chunk
gross_income += chunk * ratio
break
# print(gross_income)
return gross_income
|
|
# -*- coding: utf-8 -*-
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import types as python_types
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine.base_layer import InputSpec
from ..engine.base_layer import Layer
from ..utils.generic_utils import func_dump
from ..utils.generic_utils import func_load
from ..utils.generic_utils import deserialize_keras_object
from ..utils.generic_utils import has_arg
from ..legacy import interfaces
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
If all features for a given sample timestep are equal to `mask_value`,
then the sample timestep will be masked (skipped) in all downstream layers
(as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
# Example
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask sample #0 at timestep #3, and sample #2 at timestep #5,
because you lack features for these sample timesteps. You can do:
- set `x[0, 3, :] = 0.` and `x[2, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
# Arguments
mask_value: Either None or mask value to skip
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
output_mask = K.any(K.not_equal(inputs, self.mask_value), axis=-1)
return output_mask
def call(self, inputs):
boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
axis=-1, keepdims=True)
return inputs * K.cast(boolean_mask, K.dtype(inputs))
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
# Arguments
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
# References
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
http://www.jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
"""
@interfaces.legacy_dropout_support
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = min(1., max(0., rate))
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = K.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs():
return K.dropout(inputs, self.rate, noise_shape,
seed=self.seed)
return K.in_train_phase(dropped_inputs, inputs,
training=training)
return inputs
def get_config(self):
config = {'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class GuidedDropout(Layer):
"""Applies a guided Dropout to the input, where the output activations are set
to 0 given by the weights of the layer.
# TODO: Test this layer
# Arguments:
weights_shape: (num_dropout_matrices, num_features)
weights: Initial weights
"""
def __init__(self, weights_shape, weights=None, **kwargs):
self.weights_shape = weights_shape
self.initial_weights = [weights]
self.init = initializers.get('uniform', dim_ordering='th')
super(GuidedDropout, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.init(self.weights_shape,
name='{}_W'.format(self.name))
self.trainable_weights = [self.W]
# initialize weights
if (self.initial_weights[0] is not None):
self.set_weights(self.initial_weights)
self.trainable = False
def call(self, inputs, mask=None):
modulated_input = inputs[0]
modulator_input = inputs[1]
modulated_output = modulated_input * self.W[K.argmax(modulator_input, axis=1), :]
return modulated_output
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {'weights_shape': self.weights_shape}
base_config = super(GuidedDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
# Arguments
rate: float between 0 and 1. Fraction of the input units to drop.
# Input shape
3D tensor with shape:
`(samples, timesteps, channels)`
# Output shape
Same as input
# References
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
"""
@interfaces.legacy_spatialdropout1d_support
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
# Arguments
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: `'channels_first'` or `'channels_last'`.
In `'channels_first'` mode, the channels dimension
(the depth) is at index 1,
in `'channels_last'` mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `'channels_last'`.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if `data_format='channels_first'`
or 4D tensor with shape:
`(samples, rows, cols, channels)` if `data_format='channels_last'`.
# Output shape
Same as input
# References
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
"""
@interfaces.legacy_spatialdropoutNd_support
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1)
else:
noise_shape = (input_shape[0], 1, 1, input_shape[3])
return noise_shape
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
# Arguments
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: `'channels_first'` or `'channels_last'`.
In `'channels_first'` mode, the channels dimension (the depth)
is at index 1, in `'channels_last'` mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `'channels_last'`.
# Input shape
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if `data_format='channels_first'`
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if `data_format='channels_last'`.
# Output shape
Same as input
# References
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
"""
@interfaces.legacy_spatialdropoutNd_support
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)
else:
noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])
return noise_shape
class Activation(Layer):
"""Applies an activation function to an output.
# Arguments
activation: name of activation function to use
(see: [activations](../activations.md)),
or alternatively, a Theano or TensorFlow operation.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class Reshape(Layer):
"""Reshapes an output to a certain shape.
# Arguments
target_shape: target shape. Tuple of integers.
Does not include the batch axis.
# Input shape
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
# Output shape
`(batch_size,) + target_shape`
# Example
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Finds and replaces a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
# Arguments
input_shape: original shape of array being reshaped
output_shape: target shape of the array, with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
# Returns
The new output shape with a `-1` replaced with its computed value.
# Raises
ValueError: if `input_shape` and `output_shape` do not match.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return tuple(output_shape)
def compute_output_shape(self, input_shape):
if None in input_shape[1:]:
# input shape (partially) unknown? replace -1's with None's
return ((input_shape[0],) +
tuple(s if s != -1 else None for s in self.target_shape))
else:
# input shape known? then we can compute the output shape
return (input_shape[0],) + self._fix_unknown_dimension(
input_shape[1:], self.target_shape)
def call(self, inputs):
return K.reshape(inputs, (K.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
# Example
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
# Arguments
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tuple(output_shape)
def call(self, inputs):
return K.permute_dimensions(inputs, (0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PermuteGeneral(Layer):
"""Permutes the dimensions of the input according to a given pattern.
This is just like the layer Permute, but DOES INCLUDE the batch dimension.
# Arguments
dims: Tuple of integers. Permutation pattern, INCLUDING the
samples dimension. Indexing starts at 0.
For instance, `(1, 0, 2)` permutes the batch and first dimension of the input.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(PermuteGeneral, self).__init__(**kwargs)
self.dims = tuple(dims)
self.supports_masking = True
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
output_shape[i] = input_shape[dim]
return tuple(output_shape)
def call(self, inputs, mask=None):
return K.permute_dimensions(inputs, self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(PermuteGeneral, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
# Arguments
data_format: A string,
one of `'channels_last'` (default) or `'channels_first'`.
The ordering of the dimensions in the inputs.
The purpose of this argument is to preserve weight
ordering when switching a model from one data format
to another.
`'channels_last'` corresponds to inputs with shape
`(batch, ..., channels)` while `'channels_first'` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `'channels_last'`.
# Example
```python
model = Sequential()
model.add(Conv2D(64, (3, 3),
input_shape=(3, 32, 32), padding='same',))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=3)
self.data_format = K.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
if not all(input_shape[1:]):
raise ValueError('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '). '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return (input_shape[0], np.prod(input_shape[1:]))
def call(self, inputs):
if self.data_format == 'channels_first':
# Ensure works for any dim
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = K.permute_dimensions(inputs, permutation)
return K.batch_flatten(inputs)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RepeatVector(Layer):
"""Repeats the input n times.
# Example
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
# Arguments
n: integer, repetition factor.
# Input shape
2D tensor of shape `(num_samples, features)`.
# Output shape
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.n, input_shape[1])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RepeatMatrix(Layer):
"""Repeats the input n times.
Applies the same procedure as RepeatVector() but for inputs of any dimenions.
The new dimension will be introduced in the position defined by the user.
# Arguments
n: integer, repetition factor.
dim: integer, dimension along which the input will be repeated (default = 1)
# Input shape
R-dimensional tensor of shape `(nb_samples, dim1, dim2, ..., dimR-1)`.
# Output shape
R+1-dimensional tensor of shape `(nb_samples, n, dim2, dim3, ..., dimR)` if dim==1.
"""
def __init__(self, n, dim=1, **kwargs):
self.supports_masking = True
self.n = n
self.dim = dim
super(RepeatMatrix, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape[:self.dim]) + [self.n] + list(input_shape[self.dim:])
return tuple(output_shape)
def compute_mask(self, input, input_mask=None):
return input_mask
def call(self, inputs, mask=None):
return K.repeatRdim(inputs, self.n, axis=self.dim)
def get_config(self):
config = {'n': self.n,
'dim': self.dim}
base_config = super(RepeatMatrix, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
# Examples
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
def antirectifier_output_shape(input_shape):
shape = list(input_shape)
assert len(shape) == 2 # only valid for 2D tensors
shape[-1] *= 2
return tuple(shape)
model.add(Lambda(antirectifier,
output_shape=antirectifier_output_shape))
```
```python
# add a layer that returns the hadamard product
# and sum of it from two input tensors
def hadamard_product_sum(tensors):
out1 = tensors[0] * tensors[1]
out2 = K.sum(out1, axis=-1)
return [out1, out2]
def hadamard_product_sum_output_shape(input_shapes):
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
assert shape1 == shape2 # else hadamard product isn't possible
return [tuple(shape1), tuple(shape2[:-1])]
x1 = Dense(32)(input_1)
x2 = Dense(32)(input_2)
layer = Lambda(hadamard_product_sum, hadamard_product_sum_output_shape)
x_hadamard, x_sum = layer([x1, x2])
```
# Arguments
function: The function to be evaluated.
Takes input tensor or list of tensors as first argument.
output_shape: Expected output shape from function.
Only relevant when using Theano.
Can be a tuple or function.
If a tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input:
`output_shape = (input_shape[0], ) + output_shape`
or, the input is `None` and
the sample dimension is also `None`:
`output_shape = (None, ) + output_shape`
If a function, it specifies the entire shape as a function of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a Tensor indicating the
input mask for Embedding.
arguments: optional dictionary of keyword arguments to be passed
to the function.
supports_masking: whether it supports masking or not
# Input shape
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Specified by `output_shape` argument
(or auto-inferred when using TensorFlow or CNTK).
"""
@interfaces.legacy_lambda_support
def __init__(self, function, output_shape=None,
mask=None, arguments=None,
supports_masking=True, **kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self._input_dtypes = None
self.arguments = arguments if arguments else {}
self.supports_masking = supports_masking
if mask is not None:
self.supports_masking = True
self.mask = mask
if output_shape is None:
self._output_shape = None
elif isinstance(output_shape, (tuple, list)):
self._output_shape = tuple(output_shape)
else:
if not callable(output_shape):
raise TypeError('In Lambda, `output_shape` '
'must be a list, a tuple, or a function.')
self._output_shape = output_shape
super(Lambda, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# With TensorFlow or CNTK, we can infer the output shape directly:
if K.backend() in ('tensorflow', 'cntk'):
if isinstance(input_shape, list):
xs = [K.placeholder(shape=shape, dtype=dtype)
for shape, dtype in zip(input_shape, self._input_dtypes)]
x = self.call(xs)
else:
x = K.placeholder(shape=input_shape, dtype=self._input_dtypes)
x = self.call(x)
if isinstance(x, list):
return [K.int_shape(x_elem) for x_elem in x]
else:
return K.int_shape(x)
# Otherwise, we default to the input shape.
warnings.warn('`output_shape` argument not specified for layer {} '
'and cannot be automatically inferred '
'with the Theano backend. '
'Defaulting to output shape `{}` '
'(same as input shape). '
'If the expected output shape is different, '
'specify it via the `output_shape` argument.'
.format(self.name, input_shape))
return input_shape
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = input_shape[0] if input_shape else None
return (num_samples,) + tuple(self._output_shape)
else:
shape = self._output_shape(input_shape)
if not isinstance(shape, (list, tuple)):
raise ValueError('`output_shape` function must return a tuple or '
'a list of tuples.')
if isinstance(shape, list):
if isinstance(shape[0], int) or shape[0] is None:
shape = tuple(shape)
return shape
def call(self, inputs, mask=None):
arguments = self.arguments
if has_arg(self.function, 'mask'):
arguments['mask'] = mask
if isinstance(inputs, list):
self._input_dtypes = [K.dtype(x) for x in inputs]
else:
self._input_dtypes = K.dtype(inputs)
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
if isinstance(self.function, python_types.LambdaType):
function = func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = func_dump(self._output_shape)
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
else:
output_shape = self._output_shape
output_shape_type = 'raw'
config = {'function': function,
'function_type': function_type,
'output_shape': output_shape,
'output_shape_type': output_shape_type,
'arguments': self.arguments}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
globs = globals()
if custom_objects:
globs = dict(list(globs.items()) + list(custom_objects.items()))
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
output_shape_type = config.pop('output_shape_type')
if output_shape_type == 'function':
# Simple lookup in custom objects
output_shape = deserialize_keras_object(
config['output_shape'],
custom_objects=custom_objects,
printable_module_name='output_shape function in Lambda layer')
elif output_shape_type == 'lambda':
# Unsafe deserialization from bytecode
output_shape = func_load(config['output_shape'], globs=globs)
else:
output_shape = config['output_shape']
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
config['function'] = function
config['output_shape'] = output_shape
return cls(**config)
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
# Example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
@interfaces.legacy_dense_support
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
output = K.dot(inputs, self.kernel)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
# Arguments
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(**kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)
def get_config(self):
config = {'l1': self.l1,
'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class PositionLayer(Layer):
"""
This layer is returns a sequence as the input, but as a range.
It recieves an input with shape: (B, N) and returns a tensor with the same
shape but with the content:
[arange(N), arange(N), ..., arange(N)]
# Arguments
maxlen: Integer, maximum length allowed (N)
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, maxlen=500, **kwargs):
self.supports_masking = True
self.positions = K.arange(0, stop=maxlen)
super(PositionLayer, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return self.positions[:K.shape(inputs)[1]]
def compute_mask(self, input_shape, input_mask=None):
return input_mask
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(PositionLayer, self).get_config()
return dict(list(base_config.items()))
class RemoveThirdDimension(Layer):
"""
Removes the third dimension of a 3D tensor.
# Input shape
3D. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input but without the last dimension.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(RemoveThirdDimension, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return inputs[:, :, 0]
def compute_mask(self, input_shape, input_mask=None):
return input_mask
def compute_output_shape(self, input_shape):
return input_shape[:-1]
def get_config(self):
base_config = super(RemoveThirdDimension, self).get_config()
return dict(list(base_config.items()))
class MaskedMean(Layer):
"""
This layer is called after an Embedding layer.
It averages all of the masked-out embeddings.
The mask is discarded.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskedMean, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return K.mean(K.cast(mask[:, :, None], K.dtype(inputs)) * inputs, axis=1)
def compute_mask(self, input_shape, input_mask=None):
return None
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2])
def get_config(self):
base_config = super(MaskedMean, self).get_config()
return dict(list(base_config.items()))
class ApplyMask(Layer):
"""
This layer is called after an Embedding layer.
It averages all of the masked-out embeddings.
The mask is discarded.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(ApplyMask, self).__init__(**kwargs)
def call(self, inputs, mask=None):
mask = inputs[1]
inputs = inputs[0]
return K.cast(mask[:, :, None], K.dtype(inputs)) * inputs
def compute_mask(self, inputs, mask=None):
return inputs[1]
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
base_config = super(ApplyMask, self).get_config()
return dict(list(base_config.items()))
class MaskLayer(Layer):
"""Applies to the input its mask. The mask is kept.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskLayer, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return K.cast(mask[:, :, None], K.dtype(inputs)) * inputs
def compute_mask(self, input_shape, input_mask=None):
return input_mask
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(MaskLayer, self).get_config()
return dict(list(base_config.items()))
class MaskAndRemoveMask(Layer):
"""Applies to the input its mask. The mask is removed.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskAndRemoveMask, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return K.cast(mask[:, :, None], K.dtype(inputs)) * inputs
def compute_mask(self, input_shape, input_mask=None):
return None
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(MaskAndRemoveMask, self).get_config()
return dict(list(base_config.items()))
class RemoveMask(Layer):
"""Removes the mask of the Layer.
"""
def __init__(self, **kwargs):
super(RemoveMask, self).__init__(**kwargs)
def compute_mask(self, input, input_mask=None):
return None
def get_config(self):
base_config = super(RemoveMask, self).get_config()
return dict(list(base_config.items()))
class GetMask(Layer):
"""Gets the mask of a layer.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(GetMask, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return mask
def compute_mask(self, input_shape, input_mask=None):
return None
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(GetMask, self).get_config()
return dict(list(base_config.items()))
class FlatMask(Layer):
"""
Flattens a n-dimensional mask to an (n-1)-dimensional one.
# Arguments
axis: Axis to flatten the mask.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
"""
def __init__(self, axis=2, **kwargs):
self.supports_masking = True
self.axis = axis
super(FlatMask, self).__init__(**kwargs)
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, input_shape, input_mask=None):
return K.any(input_mask, self.axis)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(FlatMask, self).get_config()
return dict(list(base_config.items()))
class WeightedSum(Layer):
""" Applies a weighted sum over a set of vectors input[0] and their respective weights input[1].
First, the weights are tiled for matching the length of the input vectors on dim=1.
Second, an element-wise multiplication is applied over the inputs.
Third, the output tensor is summed over the defined set of dimensions if
the input parameter sum_dims is provided.
# Arguments
sum_dims: dimensions on which the final summation will be applied after the respective multiplication
# Input shape
List with two tensors:
input[0]: vectors
input[1]: weights
Both tensors must have a matching number of dimensions and lengths, except
dim=1, which must be 1 for the set of weights.
# Output shape
Vector with the same number of dimensions and length as input[0] but having removed the dimensions
specified in sum_dims (if any).
"""
def __init__(self, sum_dims=[], **kwargs):
assert isinstance(sum_dims, list)
self.sum_dims = sorted(sum_dims)[::-1]
self.supports_masking = True
super(WeightedSum, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
assert len(input_shape) == 2
def call(self, inputs, mask=None):
# get input values and weights
values = inputs[0]
weights = inputs[1]
# tile weights before summing
K.repeatRdim(weights, K.shape(values)[1], axis=1)
# inputs = K.dot(values, weights)
inputs = values * weights
for d in self.sum_dims:
inputs = K.sum(inputs, axis=d)
return inputs
def compute_output_shape(self, input_shape):
out_dim = []
num_dim = len(input_shape[0])
for d in range(num_dim):
if d not in self.sum_dims:
out_dim.append(max(input_shape[0][d], input_shape[1][d]))
return tuple(out_dim)
def compute_mask(self, input, input_mask=None):
if not any(input_mask):
return None
else:
not_None_masks = [m for m in input_mask if m is not None]
if len(not_None_masks) == 1:
out_mask = input_mask[not_None_masks[0]]
else:
out_mask = input_mask[not_None_masks[0]] * input_mask[not_None_masks[1]]
return out_mask
def get_config(self):
config = {'sum_dims': self.sum_dims}
base_config = super(WeightedSum, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class WeightedMerge(Layer):
""" Applies a weighted merge over a set of tensors.
This layer learns a set of lambda weights for applying a weighted sum
for merging the input tensors.
# Arguments
mode: merge mode used. Possible values are 'sum' (default) or 'mul'.
init: Initialization function.
lambdas_regularizer: Regularizers for the weights.
weights: Initial weights for each element to merge.
# Input shape
List of tensors of any dimensions but with the same shape.
# Output shape
Tensor with the same number of dimensions as the input tensors.
"""
def __init__(self, mode='sum',
init='glorot_uniform',
lambdas_regularizer=None,
weights=None, **kwargs):
# self.out_shape = out_shape
self._valid_modes = ['sum', 'mul']
if mode not in self._valid_modes:
raise NotImplementedError(
"Merge mode of type '" + mode + "' is not valid. Valid modes are: " + str(self._valid_modes))
self.mode = mode
self.init = initializers.get(init)
self.lambdas_regularizer = regularizers.get(lambdas_regularizer)
self.initial_weights = weights
self.supports_masking = True
super(WeightedMerge, self).__init__(**kwargs)
def build(self, input_shape):
# Cnvert input_shape to list
if not isinstance(input_shape, list):
input_shape = [input_shape]
s = input_shape[0]
# Check all input_shape are compatible
for i in range(1, len(input_shape)):
for s1, s2 in zip(input_shape[i], s):
assert s1 == s2 or s1 is None or s2 is None, 'The shapes of some input tensors do not match ' \
'(' + str(input_shape[i]) + ' vs ' + str(s) + ').'
# Initialize weights
self.lambdas = self.init((len(input_shape),), name='{}_lambdas'.format(self.name))
# Set weights to trainable
self.trainable_weights = [self.lambdas]
# List of regularizers
self.regularizers = []
# Add lambdas regularizers (if necessary)
if self.lambdas_regularizer:
self.lambdas_regularizer.set_param(self.lambdas)
self.regularizers.append(self.lambdas_regularizer)
# Set initial weights (if necessary)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, inputs, mask=None):
if not isinstance(inputs, list):
inputs = [inputs]
# merge inputs after weighting by the learned lambda weights
s = inputs[0] * self.lambdas[0]
for i in range(1, len(inputs)):
if self.mode == 'sum':
s += inputs[i] * self.lambdas[i]
elif self.mode == 'mul':
s *= inputs[i] * self.lambdas[i]
return s
def compute_output_shape(self, input_shape):
# return tuple(list(input_shape[0][:2]) + self.out_shape)
if not isinstance(input_shape, list):
input_shape = [input_shape]
return tuple(input_shape[0])
def compute_mask(self, input, input_mask=None):
if not isinstance(input_mask, list):
input_mask = [input_mask]
if not any(input_mask):
return None
else:
return input_mask[0]
def get_config(self):
config = {'mode': self.mode,
'kernel_initializer': self.init.__name__,
'lambdas_regularizer': self.lambdas_regularizer.get_config() if self.lambdas_regularizer else None}
base_config = super(WeightedMerge, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SetSubtensor(Layer):
"""This layer performs a set_subtensor operation over two layers
# Arguments
indices: list of strings specifying the indexation over the two input layers
# Input shape
List with two tensors:
input[0]: Tensor to overwrite
input[1]: Tensor that overwrites
# Output shape
K.set_subtensor(input[0][indices[0], input[1][indices[1]])
# Supports masking: The mask of the first input layer
"""
def __init__(self, indices, **kwargs):
self.supports_masking = True
self.indices = indices
super(SetSubtensor, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
assert len(input_shape) == 2
def call(self, inputs, mask=None):
return K.set_subtensor(eval('inputs[0]' + self.indices[0]), eval('inputs[1]' + self.indices[1]))
def compute_mask(self, input_shape, input_mask=None):
return input_mask[0]
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {'indices': self.indices}
base_config = super(SetSubtensor, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroesLayer(Layer):
"""Given any input, produces an output input_dim zeroes
# Example
```python
# as first
# Arguments
output_dim: int > 0.
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
nD tensor with shape: `(nb_samples, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(nb_samples, input_dim)`.
# Output shape
nD tensor with shape: `(nb_samples, ..., units)`.
For instance, for a 2D input with shape `(nb_samples, input_dim)`,
the output would have shape `(nb_samples, units)`.
"""
def __init__(self, output_dim, input_dim=None, **kwargs):
self.output_dim = output_dim
self.input_dim = input_dim
super(ZeroesLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
self.built = True
def call(self, inputs, mask=None):
initial_state = K.zeros_like(inputs) # (samples, input_dim)
initial_state = K.sum(initial_state, axis=(1,)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.output_dim])
return initial_state
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
def get_config(self):
config = {'units': self.output_dim,
'input_dim': self.input_dim}
base_config = super(ZeroesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_lr_multipliers(self, W_learning_rate_multiplier, b_learning_rate_multiplier):
self.W_learning_rate_multiplier = W_learning_rate_multiplier
self.b_learning_rate_multiplier = b_learning_rate_multiplier
self.learning_rate_multipliers = [self.W_learning_rate_multiplier,
self.b_learning_rate_multiplier]
class EqualDimensions(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
# Arguments
dim_ordering: 'th' or 'tf'.
In 'th' mode, the channels dimension (the depth)
is at index 1, in 'tf' mode is it at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "tf".
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
4D tensor with shape:
`(samples, channels, rows+1, cols+1)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows+1, cols+1, channels)` if dim_ordering='tf'.
"""
def __init__(self,
dim_ordering='default',
**kwargs):
super(EqualDimensions, self).__init__(**kwargs)
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf, th}.')
self.dim_ordering = dim_ordering
def compute_output_shape(self, input_shape):
assert len(input_shape[0]) == len(input_shape[1])
out_dims = [input_shape[1][0], input_shape[1][1], input_shape[0][2], input_shape[0][3]]
return tuple(out_dims)
def call(self, inputs, mask=None):
return K.equal_dimensions(inputs[0], inputs[1])
def get_config(self):
config = {}
base_config = super(EqualDimensions, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Concat(Layer):
"""Concatenates multiple inputs along the specified axis. Inputs should have the same
shape except for the dimension specified in axis, which can have different sizes.
# Arguments
axis: int
Axis which inputs are joined over
cropping: None or [crop]
Cropping for each input axis. Cropping is always disable for axis.
dim_ordering: 'th' or 'tf'.
In 'th' mode, the channels dimension (the depth)
is at index 1, in 'tf' mode is it at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "tf".
"""
def __init__(self, axis=1,
cropping=None, dim_ordering='default',
**kwargs):
super(Concat, self).__init__(**kwargs)
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf, th}.')
self.dim_ordering = dim_ordering
self.axis = axis
if cropping is not None:
# If cropping is enabled, don't crop on the selected axis
cropping = list(cropping)
cropping[axis] = None
self.cropping = cropping
def compute_output_shape(self, input_shape):
input_shapes = autocrop_array_shapes(input_shape, self.cropping)
# Infer the output shape by grabbing, for each axis, the first
# input size that is not `None` (if there is any)
output_shape = [next((s for s in sizes if s is not None), None)
for sizes in zip(*input_shapes)]
def match(shape1, shape2):
axis = self.axis if self.axis >= 0 else len(shape1) + self.axis
return (len(shape1) == len(shape2) and
all(i == axis or s1 is None or s2 is None or s1 == s2
for i, (s1, s2) in enumerate(zip(shape1, shape2))))
# Check for compatibility with inferred output shape
if not all(match(shape, output_shape) for shape in input_shapes):
raise ValueError("Mismatch: input shapes must be the same except "
"in the concatenation axis")
# Infer output shape on concatenation axis and return
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = None if any(s is None for s in sizes) else sum(sizes)
output_shape[self.axis] = concat_size
return tuple(output_shape)
def call(self, inputs, mask=None):
inputs = autocrop(inputs, self.cropping)
return K.concatenate(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis, 'cropping': self.cropping}
base_config = super(Concat, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PositionwiseFeedForwardDense(Layer):
"""Fully connected feed-forward netwok, applied to each position.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(inputs) = inputs`).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the context.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
@interfaces.legacy_dense_support
def __init__(self, units,
activation='relu',
dropout=0.,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(PositionwiseFeedForwardDense, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.dropout = dropout
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel1 = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel1',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias1 = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias1',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias_1 = None
self.kernel2 = self.add_weight(shape=(self.units, input_dim),
initializer=self.kernel_initializer,
name='kernel2',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias2 = self.add_weight(shape=(input_dim,),
initializer=self.bias_initializer,
name='bias2',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias_2 = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
if self.dropout > 0:
self.dropout_layer = Dropout(self.dropout)
self.built = True
def call(self, inputs, mask=None):
intermediate_output = K.dot(inputs, self.kernel1)
if self.use_bias:
intermediate_output = K.bias_add(intermediate_output, self.bias1)
if self.activation is not None:
intermediate_output = self.activation(intermediate_output)
if self.dropout > 0:
intermediate_output = self.dropout_layer(intermediate_output)
output = K.dot(intermediate_output, self.kernel2)
if self.use_bias:
output = K.bias_add(output, self.bias2)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
return input_shape
def compute_mask(self, input_shape, input_mask=None):
return input_mask
def get_config(self):
config = {
'units': self.units,
'dropout': self.dropout,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(PositionwiseFeedForwardDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Slice(Layer):
"""Slices the input with the provided min-max indices.
# Arguments
min_idx: Integer indicating the start indices of the slice
max_idx: Integer final indices of the slice
# Input shape
2D tensor
# Output shape
Same shape as input.
"""
def __init__(self, min_idx, max_idx, **kwargs):
self.supports_masking = True
super(Slice, self).__init__(**kwargs)
self.min_idx = min_idx
self.max_idx = max_idx
def call(self, inputs, mask=None):
return inputs[:, self.min_idx:self.max_idx]
def compute_mask(self, input_shape, input_mask=None):
if input_mask is not None:
return input_mask[:, self.min_idx:self.max_idx]
else:
return None
def compute_output_shape(self, input_shape):
return (input_shape[0], self.max_idx - self.min_idx)
def get_config(self):
config = {
'min_idx': self.min_idx,
'max_idx': self.max_idx
}
base_config = super(Slice, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def autocrop(inputs, cropping):
"""Crops the given input arrays.
Cropping takes a sequence of inputs and crops them per-axis in order to
ensure that their sizes are consistent so that they can be combined
in an element-wise fashion. If cropping is enabled for a specific axis,
the minimum size in that axis of all inputs is computed, and all
inputs are cropped to that size.
The per-axis cropping modes are:
`None`: this axis is not cropped, inputs are unchanged in this axis
`'lower'`: inputs are cropped choosing the lower portion in this axis
(`a[:crop_size, ...]`)
`'upper'`: inputs are cropped choosing the upper portion in this axis
(`a[-crop_size:, ...]`)
`'center'`: inputs are cropped choosing the central portion in this axis
(``a[offset:offset+crop_size, ...]`` where
``offset = (a.shape[0]-crop_size)//2)``
# Arguments
inputs: list of expressions
The input arrays in the form of a list of Theano expressions
cropping: list of cropping modes
Cropping modes, one for each axis. If length of `cropping` is less
than the number of axes in the inputs, it is padded with `None`.
If `cropping` is None, `input` is returned as is.
# Returns
list of expressions. Each expression is the cropped version of the corresponding input
"""
if cropping is None:
# No cropping in any dimension
return inputs
else:
# Get the number of dimensions
ndim = K.ndim(inputs[0])
# Check for consistent number of dimensions
if not all(K.ndim(input) == ndim for input in inputs):
raise ValueError("Not all inputs are of the same ",
"dimensionality. Got {0} inputs of "
"dimensionalities {1}.".format(
len(inputs), [K.ndim(input) for input in inputs]))
# Get the shape of each input
shapes = [K.shape(input) for input in inputs]
# Convert the shapes to a matrix expression
shapes_tensor = K.variable(shapes)
# Min along axis 0 to get the minimum size in each dimension
min_shape = K.min(shapes_tensor, axis=0)
# Nested list of slices; each list in `slices` corresponds to
# an input and contains a slice for each dimension
slices_by_input = [[] for i in range(len(inputs))]
# If there are more dimensions than cropping entries, pad
# the cropping
cropping = list(cropping)
if ndim > len(cropping):
cropping = list(cropping) + [None] * (ndim - len(cropping))
# For each dimension
for dim, cr in enumerate(cropping):
if cr is None:
# Don't crop this dimension
slice_all = slice(None)
for slices in slices_by_input:
slices.append(slice_all)
else:
# We crop all inputs in the dimension `dim` so that they
# are the minimum found in this dimension from all inputs
sz = min_shape[dim]
if cr == 'lower':
# Choose the first `sz` elements
slc_lower = slice(None, sz)
for slices in slices_by_input:
slices.append(slc_lower)
elif cr == 'upper':
# Choose the last `sz` elements
slc_upper = slice(-sz, None)
for slices in slices_by_input:
slices.append(slc_upper)
elif cr == 'center':
# Choose `sz` elements from the center
for sh, slices in zip(shapes, slices_by_input):
offset = (sh[dim] - sz) // 2
slices.append(slice(offset, offset + sz))
else:
raise ValueError(
'Unknown crop mode \'{0}\''.format(cr))
return [input[slices] for input, slices in
zip(inputs, slices_by_input)]
def autocrop_array_shapes(input_shapes, cropping):
"""Computes the shapes of the given arrays after auto-cropping is applied.
For more information on cropping, see the :func:`autocrop` function
documentation.
# Arguments
input_shapes: the shapes of input arrays prior to cropping in
the form of a list of tuples
cropping: a list of cropping modes, one for each axis. If length of
`cropping` is less than the number of axes in the inputs, it is
padded with `None`. If `cropping` is None, `input_shapes` is returned
as is. For more information on their values and operation, see the
:func:`autocrop` documentation.
# Returns
shapes of the given arrays after auto-cropping is applied.
"""
if cropping is None:
return input_shapes
else:
# Check for consistent number of dimensions
ndim = len(input_shapes[0])
if not all(len(sh) == ndim for sh in input_shapes):
raise ValueError("Not all inputs are of the same "
"dimensionality. Got {0} inputs of "
"dimensionalities {1}.".format(len(input_shapes), [len(sh) for sh in input_shapes]))
result = []
# If there are more dimensions than cropping entries, pad
# the cropping
cropping = list(cropping)
if ndim > len(cropping):
cropping = list(cropping) + [None] * (ndim - len(cropping))
for sh, cr in zip(zip(*input_shapes), cropping):
if cr is None:
result.append(sh)
elif cr in {'lower', 'center', 'upper'}:
min_sh = None if any(inputs is None for inputs in sh) else min(sh)
result.append([min_sh] * len(sh))
else:
raise ValueError('Unknown crop mode \'{0}\''.format(cr))
return [tuple(sh) for sh in zip(*result)]
|
|
from . import utils
from .base import BaseModel
import numpy as np
class CholerM1(BaseModel):
"""
The "M1" four parameter model described in Choler et al. 2010
"""
def __init__(self, parameters={}):
BaseModel.__init__(self)
self.all_required_parameters = {'a1': (0, 100), 'a2': (0, 100),
'a3': (0, 100),
'L': (1,30)}
self._organize_parameters(parameters)
self._required_predictors = {'precip': 'per_timestep',
'evap' : 'per_timestep',
'Wcap' : 'per_site'}
self.state_variables = ['V','W','Dt']
self.set_internal_method(method='numpy')
def set_internal_method(self, method = 'numpy'):
if method == 'cython':
raise NotImplementedError('cython method not implemented for this model')
elif method == 'numpy':
self._apply_model = self._apply_model_numpy
else:
raise ValueError('Unknown internal method: ' + method)
def _apply_model_numpy(self,
# Site specific drivers
precip, # precip, Daily vector
evap, # potential ET, Daily vector
Wcap, # field capacity, single value/site
# Model parameters
a1,
a2,
a3,
L,
# Contraints on vegetation.
Vmin = 0.001, # Needs to be small non-zero value
Vmax = 1., # 100% cause GCC is scaled 0-1
# Note in the original Choler 2010 paper, Vmax is a site
# specific value set to the maximum value observed at a site.
# This is not feasable for extrapolation though.
# Initial conditions
W_initial = 0,
Wstart = 0,
V_initial = 0.001,
# Normally just the V (vegatation cover) should be returned,
# but for diagnostics use 'all' to get V, W, and Dt
return_vars = 'V'
):
"""
"""
L = int(L) # must be a whole number, any floats will be truncated.
# Initialize everything
# Primary state variables
W = np.empty_like(precip).astype('float32')
W[:] = W_initial
V = np.empty_like(precip).astype('float32')
V[:] = V_initial
# Derived variables
Dt = np.zeros_like(precip).astype('float32')
# Site level vars such as lagged plant-water and
# temp responses
# In the Choler2010 paper this term is We. It's marked
# add here to match the other models
Dtl = np.empty_like(Wcap)
Dtl1 = np.empty_like(Wcap)
n_timesteps = precip.shape[0] - 1
for i in range(1,n_timesteps):
# if we are near the start of the timeseries then initialize
# soil/plant water to something reasonable
# Condition (iii)
if i - L - 1 < 0:
Dt[i] = np.maximum(0, W[i] - a3)
Dtl[:] = Wstart
Dtl1[:] = Wstart
else:
Dt[i] = np.maximum(0, W[i] - a3)
Dtl = Dt[i-L]
Dtl1 = Dt[i-L-1]
# Soil water
W[i+1] = W[i] + precip[i] - a1 * (W[i]/Wcap) * evap[i]
# Condition (ii)
W[i+1] = np.maximum(0, np.minimum(Wcap, W[i+1]))
# Primary veg growth equation
V[i+1] = V[i] + a2 * (Dtl -Dtl1)
# Condtiion (iv)
# Constrain veg to 0-1
V[i+1] = np.maximum(Vmin, np.minimum(Vmax, V[i+1]))
if return_vars == 'V':
return V
elif return_vars == 'all':
return {'V':V, 'W':W, 'Dt':Dt}
class CholerM1A(CholerM1):
"""
The "M1A" model described in Choler et al. 2010
This fixes the a3 parameter to 0
"""
def __init__(self, parameters={}):
CholerM1.__init__(self)
self.all_required_parameters = {'a1': (0, 100), 'a2': (0, 100),
'a3': 0,
'L': (1,30)}
self._organize_parameters(parameters)
class CholerM1B(CholerM1):
"""
The "M1B" model described in Choler et al. 2010
Parameterizing the full model essentially. Described here
for completeness.
"""
def __init__(self, parameters={}):
CholerM1.__init__(self)
self._organize_parameters(parameters)
class CholerM2(BaseModel):
"""
The "M2" model described in Choler et al. 2010
"""
def __init__(self, parameters={}):
BaseModel.__init__(self)
self.all_required_parameters = {'b1': (0, 100), 'b2': (0, 100),
'b3': (0, 100), 'b4': (0, 100),
'b5': (0,100)}
self._organize_parameters(parameters)
self._required_predictors = {'precip': 'per_timestep',
'evap' : 'per_timestep',
'Wcap' : 'per_site'}
self.state_variables = ['V','W','Dt']
self.set_internal_method(method='numpy')
def set_internal_method(self, method = 'numpy'):
if method == 'cython':
raise NotImplementedError('cython method not implemented for this model')
elif method == 'numpy':
self._apply_model = self._apply_model_numpy
else:
raise ValueError('Unknown internal method: ' + method)
def _apply_model_numpy(self,
# Site specific drivers
precip, # precip, Daily vector
evap, # potential ET, Daily vector
Wcap, # field capacity, single value/site
# Model parameters
b1,
b2,
b3,
b4,
b5,
# Contraints on vegetation.
Vmin = 0.001, # Needs to be small non-zero value
Vmax = 1., # 100% cause GCC is scaled 0-1
# Note in the original Choler 2010 paper, Vmax is a site
# specific value set to the maximum value observed at a site.
# This is not feasable for extrapolation though.
# Initial conditions
W_initial = 0,
Wstart = 0,
V_initial = 0.001,
# Normally just the V (vegatation cover) should be returned,
# but for diagnostics use 'all' to get V, W, and Dt
return_vars = 'V'
):
"""
"""
# Initialize everything
# Primary state variables
W = np.empty_like(precip).astype('float32')
W[:] = W_initial
V = np.empty_like(precip).astype('float32')
V[:] = V_initial
# Derived variables
Dt = np.zeros_like(precip).astype('float32')
n_timesteps = precip.shape[0] - 1
for i in range(1,n_timesteps):
# plant available water
# condition (iii)
Dt[i] = np.maximum(0, W[i] - b5)
# Soil water
W[i+1] = W[i] + precip[i] - b1 * (1 - V[i]) * (W[i]/Wcap) * evap[i] - b2 * V[i] * Dt[i]
# condition (ii)
W[i+1] = np.maximum(0, np.minimum(Wcap, W[i+1]))
# Primary veg growth equation
V[i+1] = b3 * (Dt[i]/(Wcap - b5)) * V[i] * (1-(V[i]/Vmax)) - (b4 * V[i])
# Condtiion (iv)
# Constrain veg to 0-1
V[i+1] = np.maximum(Vmin, np.minimum(Vmax, V[i+1]))
if return_vars == 'V':
return V
elif return_vars == 'all':
return {'V':V, 'W':W, 'Dt':Dt}
class CholerM2A(CholerM2):
"""
The "M2A" model described in Choler et al. 2010
Here the b5 parameter is fixed at 0, essentially making plant available
water equal to total soil water.
"""
def __init__(self, parameters={}):
CholerM2.__init__(self)
self.all_required_parameters = {'b1': (0, 100), 'b2': (0, 100),
'b3': (0, 100), 'b4': (0, 100),
'b5': 0}
self._organize_parameters(parameters)
class CholerM2B(CholerM2):
"""
The "M2B" model described in Choler et al. 2010
Parameterizing the full model essentially. Described here
for completeness.
"""
def __init__(self, parameters={}):
CholerM2.__init__(self)
self._organize_parameters(parameters)
|
|
from sklearn.decomposition import NMF
from sklearn.metrics import silhouette_score
import numpy as np
class NMFClustering():
"""
"""
def __init__(self, n_clusters=2):
""" """
self.n_clusters = n_clusters
self.nmf = NMF(n_components=n_clusters)
def fit(self, X):
"""
"""
self.nmf.fit(X)
def predict(self, X):
"""
"""
return np.argmax(self.nmf.transform(X), axis=1)
def fit_predict(self, X):
"""
"""
self.fit(X)
return self.predict(X)
def set_params(self, **kwargs):
"""
"""
for param in kwargs:
setattr(self, param, kwargs[param])
def _silhouette_analysis(algo, matrix, clusters, **kwargs):
"""
"""
max_score = 0
max_n_clusters = None
n_samples = len(matrix)
for n_clusters in clusters:
algo.set_params(n_clusters=n_clusters)
clusters = algo.fit_predict(matrix)
if len(set(clusters)) == 1:
continue
score = silhouette_score(matrix, clusters)
if score >= max_score:
max_score = score
max_n_clusters = n_clusters
if n_samples - 1 <= n_clusters:
break
algo.set_params(n_clusters=max_n_clusters)
return max_n_clusters
def _silhouette_DBSCAN_analysis(algo, matrix, scale_list, scale='eps'):
"""
"""
max_score = None
max_eps = None
number_of_clusters = None
number_of_samples = len(matrix)
max_scale = 30
while True:
for eps in scale_list:
params = {scale: eps}
algo.set_params(**params)
clusters = algo.fit_predict(matrix)
outliers = clusters == -1
number_of_clusters = len(set(clusters))
if number_of_clusters <= 1:
continue
score = silhouette_score(matrix, clusters)
score = score * float(len(outliers)) / number_of_samples
if max_score is None:
max_score = score
if score >= max_score:
max_eps = eps
if max_eps is not None:
break
else:
if scale_list.max() > max_scale:
break
scale_list = scale_list + scale_list.max()
if max_eps is None:
max_eps = scale_list[-1]
algo.set_params(**{scale:max_eps})
return max_eps
def _mixture_bic_analysis(algo, matrix, clusters, **kwargs):
"""
"""
min_score = None
max_n_clusters = None
n_samples = len(matrix)
assert(algo.bic)
for n_clusters in clusters:
algo.set_params(n_components=n_clusters)
algo.fit(matrix)
clusters = algo.predict(matrix)
score = algo.bic(matrix)
if min_score is None:
min_score = score
if score <= min_score:
min_score = score
max_n_clusters = n_clusters
if n_samples - 1 <= n_clusters:
break
algo.set_params(n_components=max_n_clusters)
return max_n_clusters
|
|
import numpy as np
from tensorflow.keras import backend as K
import tensorflow.keras as keras
import tensorflow as t
import json
###definitions of classes that will be used to define a current Network State
class state:
def __init__(self,gs=10,param=10):
self.gs=gs
self.param=param
def __str__(self):
return str(self.gs)+"*"+str(self.param)
def __repr__(self):
return str(self)
class grap:
A=None
X=None
s=None
def __init__(self,s):
self.A=None
self.X=None
self.s=s
def __str__(self):
return str(self.s)+"("+str(self.A.shape)+"*"+str(self.X.shape)+")"
def __repr__(self):
return str(self)
class setting:
def __init__(self,**kwargs):
for key in kwargs:
setattr(self,key,kwargs[key])
def __str__(self):
dic={}
for key in dir(self):
dic[key]=str(getattr(self,key))
return json.dumps(dic,indent=2,sort_keys=True)#yes i litterally import json just for this
def __repr__(self):
return str(self)
###global attributes that probably will never be chanced
flag=0
self_interaction=True
cut=0.5
c_const=1000.0
###function to create the direct setting object
def getm():
m=setting()
m.usei=False
m.decompress="classic"
m.trivial_ladder_n=1
m.trivial_decompress_activation="linear"
m.trivial_decompress_init_kernel=t.keras.initializers.TruncatedNormal()
m.trivial_decompress_init_bias=t.keras.initializers.TruncatedNormal()
m.sortindex=-1
m.prenorm=False
m.graph_init_self=t.keras.initializers.TruncatedNormal()
m.graph_init_neig=t.keras.initializers.TruncatedNormal()
m.agraph_init_self=m.graph_init_self
m.agraph_init_neig=m.graph_init_neig
m.edges=3#particle net like
m.edgeactivation="relu"
m.edgeactivationfinal=m.edgeactivation
m.edgeusebias=False
m.edgeconcat=False
m.gq_activation="relu"
m.gq_init_kernel=t.keras.initializers.TruncatedNormal()
m.gq_init_bias=t.keras.initializers.Zeros()
m.gq_usebias=False
m.gq_batchnorm=False
m.shallcomplex=not True
m.complexsteps=3
m.gqa_activation=m.gq_activation
m.gqa_init_kernel=m.gq_init_kernel
m.gqa_init_bias=m.gq_init_bias
m.gqa_usebias=m.gq_usebias
m.gqa_batchnorm=m.gq_batchnorm
m.shallacomplex=m.shallcomplex
m.complexasteps=m.complexsteps
m.shallredense=False
m.redenseladder=[8,6]
m.redenseactivation="relu"
m.redenseinit=tf.keras.initializers.Identity()
m.compression_init=tf.keras.initializers.Identity()
m.mdense_activation="relu"
m.mdense_init_kernel=tf.keras.initializers.Identity()
m.mdense_init_bias=tf.keras.initializers.Zeros()
m.mdense_usebias=True
m.mdense_batchnorm=False
return m
|
|
import numpy as np
import cv2
from PIL import Image
import pytesseract
def plateDetection(plate):
gray_img = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray_img, 110, 255, cv2.THRESH_BINARY)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
num_contours, hierarchy = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if num_contours:
contour_area = [cv2.contourArea(c) for c in num_contours]
max_cntr_index = np.argmax(contour_area)
max_cnt = num_contours[max_cntr_index]
max_cntArea = contour_area[max_cntr_index]
x, y, w, h = cv2.boundingRect(max_cnt)
if not ratioCheck(max_cntArea, w, h):
return plate, None
final_img = thresh[y:y+h, x:x+w]
return final_img, [x, y, w, h]
else:
return plate, None
def ratioCheck(area, width, height):
ratio = float(width) / float(height)
if ratio < 1:
ratio = 1 / ratio
if (area < 1063.62 or area > 73862.5) or (ratio < 3 or ratio > 6):
return False
return True
def isMaxWhite(plate):
avg = np.mean(plate)
if(avg >= 115):
return True
else:
return False
def sizingPlate(rect):
(x, y), (width, height), rect_angle = rect
if(width > height):
angle = -rect_angle
else:
angle = 90 + rect_angle
if angle > 15:
return False
if height == 0 or width == 0:
return False
area = height*width
if not ratioCheck(area, width, height):
return False
else:
return True
img = cv2.imread(input("Enter image name with extension: "))
print("Identified image...",)
cv2.imshow("input", img)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
img2 = cv2.GaussianBlur(img, (3, 3), 0)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img2 = cv2.Sobel(img2, cv2.CV_8U, 1, 0, ksize=3)
_, img2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))
morph_img_threshold = img2.copy()
cv2.morphologyEx(src=img2, op=cv2.MORPH_CLOSE,
kernel=element, dst=morph_img_threshold)
num_contours, hierarchy = cv2.findContours(
morph_img_threshold, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img2, num_contours, -1, (0, 255, 0), 1)
for i, cnt in enumerate(num_contours):
min_rect = cv2.minAreaRect(cnt)
if sizingPlate(min_rect):
x, y, w, h = cv2.boundingRect(cnt)
plate_img = img[y:y+h, x:x+w]
print("Identifying Number Plate...")
cv2.imshow("Number Plate", plate_img)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
if(isMaxWhite(plate_img)):
clean_plate, rect = plateDetection(plate_img)
if rect:
fg = 0
x1, y1, w1, h1 = rect
x, y, w, h = x+x1, y+y1, w1, h1
plate_im = Image.fromarray(clean_plate)
text = pytesseract.image_to_string(plate_im, lang='eng')
print("Number Plate Detected : ", text)
|
|
from time import sleep
import gaussianfft as grf
import unittest
import numpy as np
from multiprocessing import Process, Queue, set_start_method
def create_realization():
# Returns a 100 x 100 realization of a random field
v = grf.variogram('exponential', 100.0, 50.0)
s = grf.simulate(v, 100, 10.0, 100, 10.0)
return np.array(s).reshape((100, 100), order='F')
def run_simulation_process(q, dt):
# Sleep to enable different automatic seeding for different runs
sleep(dt)
s = create_realization()
q.put((grf.seed(), s))
class TestMultiprocessSeeding(unittest.TestCase):
def test_mp(self):
""" Two random fields are generated in two different processes using the multiprocessing module. The purpose
of this test is to show how automatic seed generation works for sub processes. """
# Set start method if multiprocessing since the default is different on
# Windows and Linux
set_start_method('spawn')
queue = Queue()
# Set an arbitrary starting seed. This will never be used since simulation
# is done in child processes
t0_seed = 123
grf.seed(t0_seed)
p1 = Process(target=run_simulation_process, args=(queue, 0))
# Sleep >1 second to ensure a different seed for the second process
p2 = Process(target=run_simulation_process, args=(queue, 1.1))
p1.start()
p2.start()
p1.join(2)
p2.join(2)
seed_1, sim_1 = queue.get()
seed_2, sim_2 = queue.get()
# Repeat simulation 1, no parallelism
grf.seed(seed_1)
seq_sim_1 = create_realization()
# Repeat simulation 2, no parallelism
grf.seed(seed_2)
seq_sim_2 = create_realization()
# None of the three seeds that have been used should be equal
self.assertNotEqual(seed_1, seed_2)
self.assertNotEqual(seed_1, t0_seed)
self.assertNotEqual(seed_2, t0_seed)
# Check that realizations are reproduced correctly
self.assertTrue(np.all(np.allclose(seq_sim_1, sim_1)))
self.assertTrue(np.all(np.allclose(seq_sim_2, sim_2)))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import random
PATTERN_LENGTH = 7
class Link(object):
def __init__(self, graph, src, tar):
self.graph = graph
self.src = src
self.tar = tar
assert (graph.path_len[src][tar] == 0)
graph.path_len[src][tar] = 1
def output(self):
print("Link {} -> {}".format(self.src, self.tar))
class Graph(object):
def __init__(self, rand, spec):
self.rand = rand
self.spec = spec
self.num_patterns = spec.val("NUM_PATTERNS")
def reset(self):
# Links are represented by objects, but nodes are represented by indices.
# Keep in mind that the nodes in this graph do not (necessarily) correspond to transformer nodes.
# Allocate the pattern set.
self.patterns = [[0. for _ in range(PATTERN_LENGTH)] for _ in range(self.num_patterns)]
# Define the node patterns.
for i, p in enumerate(self.patterns):
for j in range(PATTERN_LENGTH):
p[j] = self.rand.random() * 2. - 1.
# Allocate the matrix for tracking path lengths. Zero indicates absence of a path.
self.path_len = [[0 for src in range(self.num_patterns)] for tar in range(self.num_patterns)]
# Initialize the graph with one node, and no links.
self.num_nodes = 1
self.links = []
def add_node(self):
# Add one node, linked to exactly one existing node in the graph, to build a polytree (singly-connected DAG).
new_node = self.num_nodes
# As a connection point, choose a random node that's already in the graph.
cnct_node = self.rand.randint(0, self.num_nodes-1)
# Choose a random direction for the connecting link.
if self.rand.randint(0, 1) == 1:
# Run the link from the new node to the connection node in the graph,
link = Link(self, new_node, cnct_node)
# and update path lengths to other nodes in the graph.
for node in range(self.num_nodes):
if (node != cnct_node) and (self.path_len[cnct_node][node] > 0):
self.path_len[new_node][node] = self.path_len[cnct_node][node] + 1
else:
# Run the link from the connection node in the graph to the new node,
link = Link(self, cnct_node, new_node)
# and update path lengths to other nodes in the graph.
for node in range(self.num_nodes):
if (node != cnct_node) and (self.path_len[node][cnct_node] > 0):
self.path_len[node][new_node] = self.path_len[node][cnct_node] + 1
self.links.append(link)
self.num_nodes += 1
def mean_path_ratio(self):
num_paths = 0
num_node_pairs = 0
for s in range(self.num_patterns):
for t in range(self.num_patterns):
if s != t:
if self.path_len[s][t] > 0:
num_paths += 1
num_node_pairs += 1
return num_paths / num_node_pairs
def mean_path_len(self):
path_len_sum = 0
num_paths = 0
for s in range(self.num_patterns):
for t in range(self.num_patterns):
if s != t:
if self.path_len[s][t] > 0:
path_len_sum += self.path_len[s][t]
num_paths += 1
return path_len_sum / num_paths
def max_path_len(self):
max_len = 0
for s in range(self.num_patterns):
for t in range(self.num_patterns):
if s != t:
if self.path_len[s][t] > max_len:
max_len = self.path_len[s][t]
return max_len
class Pathfinding_Env(object):
def __init__(self, seed=None, spec=None):
if seed:
self.seed = seed
else:
self.seed = 257
self.rand = random.Random(self.seed)
if not spec:
print('expected spec argument for Pathfinding_Env. Exited')
exit(1)
self.spec = spec
self.num_patterns = spec.val("NUM_PATTERNS")
self.use_display = False
self.reward = 0
self.done = False
self.correct_output = 0 # This initial value doesn't matter.
self.action_space = 2
self.observation_space = 2 * PATTERN_LENGTH + 1
self.observation = np.zeros(self.observation_space)
#self.test_graphs()
self.graph = Graph(self.rand, spec)
self.reset_online_test_sums()
self.cumulative_counts = [[0., 0.] for pattern in range(self.num_patterns-1)]
self.total_reward = 0.
self.total_steps = 0
def test_graphs(self):
num_graphs = 10000
sum_ratios = 0.
sum_lens = 0.
sum_max_lens = 0.
for i in range(num_graphs):
self.rand = random.Random(self.seed + i)
self.graph = Graph(self.rand)
self.graph.reset()
while self.graph.num_nodes < self.num_patterns:
self.graph.add_node()
# print(self.graph.patterns)
# print(self.graph.path_len)
# for link in self.graph.links:
# link.output()
# print()
sum_ratios += self.graph.mean_path_ratio()
sum_lens += self.graph.mean_path_len()
sum_max_lens += self.graph.max_path_len()
print('\n{:8.4f} % of paths are reachable'.format(100 * sum_ratios / num_graphs))
print('{:8.4f} mean path length'.format(sum_lens / num_graphs))
print('{:8.4f} max path length'.format(sum_max_lens / num_graphs))
exit(0)
def assemble_current_observation(self):
if self.quiz_agent_on_next_step:
# Decide whether the correct answer should be 0 or 1.
self.correct_output = self.rand.randint(0, 1)
# Find a random A-B key-lock pair that satisfies the answer.
while True:
node_A = self.rand.randint(0, self.graph.num_nodes - 1)
node_B = self.rand.randint(0, self.graph.num_nodes - 1)
if node_A == node_B:
continue
if (self.graph.path_len[node_A][node_B] > 0) == (self.correct_output == 1):
break
# Show patterns A and B to the agent.
self.observation = self.graph.patterns[node_A] + self.graph.patterns[node_B] + [1.]
else:
# Add one node to the graph.
if self.graph.num_nodes < self.num_patterns:
self.graph.add_node()
# Reveal the latest key-lock pattern pair to the agent.
link = self.graph.links[-1]
self.observation = self.graph.patterns[link.src] + self.graph.patterns[link.tar] + [0.]
self.quiz_agent_on_next_step = not self.quiz_agent_on_next_step
return self.observation
def reset(self, repeat=False, episode_id = None):
self.graph.reset()
self.quiz_agent_on_next_step = False
return self.assemble_current_observation()
def translate_key_to_action(self, key):
action = -1
if key == 'Up':
action = 1
elif key == 'Right':
action = 0
else:
print(("Key not found"))
return action
def step(self, action):
self.reward = 0
self.max_reward = 0.
self.done = False
if not self.quiz_agent_on_next_step:
quiz_id = self.graph.num_nodes - 2
if action == self.correct_output:
self.reward = 1.
self.cumulative_counts[quiz_id][1] += 1.
else:
self.cumulative_counts[quiz_id][0] += 1.
self.max_reward += 1.
if self.graph.num_nodes == self.num_patterns:
self.done = True
self.update_online_test_sums(self.reward, self.done)
ret = self.assemble_current_observation(), self.reward, self.done
return ret
def log_settings(self, summary_file):
return
# Online test support.
# In online testing, each training step is also used for testing the agent.
# This is permissible only in the infinite data case (like games and simulations), where separate train and test sets are not required.
# The environment must define its own (real-valued) online test metric, which may be as simple as accumulated reward.
# To smooth out the reported test results, online testing is divided into contiguous reporting periods of many time steps.
def reset_online_test_sums(self):
# Called only by the environment itself.
self.step_sum = 0
self.reward_sum = 0.
self.num_episodes = 0
self.need_to_reset_sums = False
def update_online_test_sums(self, reward, done):
# Called only by the environment itself.
if self.need_to_reset_sums:
# Another thread recently called reduce_online_test_sums(), so the previous counts are stale.
self.reset_online_test_sums()
# If another thread happens to call reduce_online_test_sums near this point in time,
# one sample from this agent might get dropped. But that's a small price to avoid locking.
self.step_sum += 1
self.reward_sum += reward
self.total_steps += 1
self.total_reward += reward
if done:
self.num_episodes += 1
def report_online_test_metric(self):
# Called by the reporting manager only.
# Calculate the final metric for this test period.
self.reward_percentage = 200.0 * self.reward_sum / self.step_sum # Reward available on every other step.
# Assemble the tuple to be returned.
# 1. The number of steps in the period. (This will be a bit different for each running thread.)
# 2. The actual metric value (must be negated if lower is better).
# 3. A string containing the formatted metric.
# 4. A string containing the metric's units for display.
#ret = (self.step_sum, self.num_episodes, self.reward_percentage, "{:7.3f}".format(self.reward_percentage), "reward percentage", False)
metrics = []
metrics.append((self.reward_percentage, "{:7.3f}".format(self.reward_percentage), "% reward"))
ret = (self.step_sum, self.num_episodes, self.reward_percentage, metrics, False)
# Reset the global sums.
self.reset_online_test_sums()
return ret
|
|
import base64
import cv2
import numpy as np
import tensorflow as tf
from starmart.input import Input, ImageInput
from starmart.result import Result, CompositeResult, NamedResult, ImageResult, ClassificationResult, Classification, \
Failure
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
model = None
def pre_start() -> None:
global model
model = tf.keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet')
def infer(data: Input) -> Result:
if not isinstance(data, ImageInput):
return Failure('Invalid input type')
# decoding bsae64 image
nparr = np.fromstring(base64.b64decode(data.data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# preprocessing image
img = cv2.resize(img, (224, 224))
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
# classifying the image
predictions = model.predict(img)
# decoding model results (with built in tensorflow function)
decoded_predictions = decode_predictions(predictions, top=10)[0]
# parsing the results in order to match the output format
classifications = []
for prediction in decoded_predictions:
classifications.append(Classification(label=prediction[1],
confidence=prediction[2].item()))
return ClassificationResult(classifications)
def input_format() -> Input:
return ImageInput(None)
def output_format() -> Result:
return CompositeResult([
NamedResult('image', ImageResult(None)),
NamedResult('classification_result', ClassificationResult(None))
])
|
|
"""
:mod:`operalib.kernels` implements some Operator-Valued Kernel
models.
"""
# Author: Romain Brault <romain.brault@telecom-paristech.fr> with help from
# the scikit-learn community.
# License: MIT
from numpy import dot, diag, sqrt
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.kernel_approximation import RBFSampler, SkewedChi2Sampler
from scipy.sparse.linalg import LinearOperator
from scipy.linalg import svd
class DotProductKernel(object):
r"""
Dot product Operator-Valued Kernel of the form:
.. math::
x, y \mapsto K(x, y) = \mu \langle x, y \rangle 1_p + (1-\mu) \langle
x, y \rangle^2 I_p
Attributes
----------
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components
p : {Int}
dimension of the targets (n_targets).
References
----------
See also
--------
DotProductKernelMap
Dot Product Kernel Map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DotProductKernel(mu=.2, p=5)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<500x500 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, mu, p):
"""Initialize the Dot product Operator-Valued Kernel.
Parameters
----------
mu : {float}
Tradeoff between shared and independant components.
p : {integer}
dimension of the targets (n_targets).
"""
self.mu = mu
self.p = p
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DotProductKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DotProductKernelMap
return DotProductKernelMap(X, self.mu, self.p)
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DotProductKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class DecomposableKernel(object):
r"""
Decomposable Operator-Valued Kernel of the form:
.. math::
X, Y \mapsto K(X, Y) = k_s(X, Y) A
where A is a symmetric positive semidefinite operator acting on the
outputs.
Attributes
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}
Additional parameters (keyword arguments) for kernel function passed as
callable object.
References
----------
See also
--------
DecomposableKernelMap
Decomposable Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 10)
>>> K = ovk.DecomposableKernel(np.eye(2))
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, A, scalar_kernel=rbf_kernel, scalar_kernel_params=None):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
A : {array, LinearOperator}, shape = [n_targets, n_targets]
Linear operator acting on the outputs
scalar_kernel : {callable}
Callable which associate to the training points X the Gram matrix.
scalar_kernel_params : {mapping of string to any}, optional
Additional parameters (keyword arguments) for kernel function
passed as callable object.
"""
self.A = A
self.scalar_kernel = scalar_kernel
self.scalar_kernel_params = scalar_kernel_params
self.p = A.shape[0]
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import DecomposableKernelMap
return DecomposableKernelMap(X, self.A,
self.scalar_kernel,
self.scalar_kernel_params)
def get_orff_map(self, X, D=100, eps=1e-5, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
u, s, v = svd(self.A, full_matrices=False, compute_uv=True)
self.B_ = dot(diag(sqrt(s[s > eps])), v[s > eps, :])
self.r = self.B_.shape[0]
if (self.scalar_kernel is rbf_kernel) and not hasattr(self, 'Xb_'):
if self.scalar_kernel_params is None:
gamma = 1.
else:
gamma = self.scalar_kernel_params['gamma']
self.phi_ = RBFSampler(gamma=gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif (self.scalar_kernel is 'skewed_chi2') and not hasattr(self,
'Xb_'):
if self.scalar_kernel_params is None:
skew = 1.
else:
skew = self.scalar_kernel_params['skew']
self.phi_ = SkewedChi2Sampler(skewedness=skew,
n_components=D,
random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X).astype(X.dtype)
elif not hasattr(self, 'Xb_'):
raise NotImplementedError('ORFF map for kernel is not '
'implemented yet')
D = self.phi_.n_components
if X is self.Xb_:
cshape = (D, self.r)
rshape = (self.Xb_.shape[0], self.p)
oshape = (self.Xb_.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(self.Xb_,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
else:
Xb = self.phi_.transform(X)
cshape = (D, self.r)
rshape = (X.shape[0], self.p)
oshape = (Xb.shape[0] * self.p, D * self.r)
return LinearOperator(oshape,
dtype=self.Xb_.dtype,
matvec=lambda b: dot(dot(Xb,
b.reshape(cshape)),
self.B_),
rmatvec=lambda r: dot(Xb.T,
dot(r.reshape(rshape),
self.B_.T)))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFCurlFreeKernel(object):
r"""
Curl-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = 2 \gamma exp(-\gamma||X - Y||^2)(I - 2\gamma(X - Y)
(X - T)^T).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFCurlFreeKernelMap
Curl-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFCurlFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFCurlFreeKernelMap
return RBFCurlFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
self.phi_.random_weights_.reshape((1, -1, Xb.shape[1])))
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
class RBFDivFreeKernel(object):
r"""
Divergence-free Operator-Valued Kernel of the form:
.. math::
X \mapsto K_X(Y) = exp(-\gamma||X-Y||^2)A_{X,Y},
where,
.. math::
A_{X,Y} = 2\gamma(X-Y)(X-T)^T+((d-1)-2\gamma||X-Y||^2 I).
Attributes
----------
gamma : {float}
RBF kernel parameter.
References
----------
See also
--------
RBFDivFreeKernelMap
Divergence-free Kernel map
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> X = np.random.randn(100, 2)
>>> K = ovk.RBFDivFreeKernel(1.)
>>> # The kernel matrix as a linear operator
>>> K(X, X) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
<200x200 _CustomLinearOperator with dtype=float64>
"""
def __init__(self, gamma):
"""Initialize the Decomposable Operator-Valued Kernel.
Parameters
----------
gamma : {float}, shape = [n_targets, n_targets]
RBF kernel parameter.
"""
self.gamma = gamma
def get_kernel_map(self, X):
r"""Return the kernel map associated with the data X.
.. math::
K_x: Y \mapsto K(X, Y)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable
.. math::
K_x: Y \mapsto K(X, Y).
"""
from .kernel_maps import RBFDivFreeKernelMap
return RBFDivFreeKernelMap(X, self.gamma)
def get_orff_map(self, X, D=100, random_state=0):
r"""Return the Random Fourier Feature map associated with the data X.
.. math::
K_x: Y \mapsto \tilde{\Phi}(X)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
\tilde{\Phi}(X) : Linear Operator, callable
"""
self.r = 1
if not hasattr(self, 'Xb_'):
self.phi_ = RBFSampler(gamma=self.gamma,
n_components=D, random_state=random_state)
self.phi_.fit(X)
self.Xb_ = self.phi_.transform(X)
self.Xb_ = (self.Xb_.reshape((self.Xb_.shape[0],
1, self.Xb_.shape[1])) *
self.phi_.random_weights_.reshape((1, -1,
self.Xb_.shape[1])))
self.Xb_ = self.Xb_.reshape((-1, self.Xb_.shape[2]))
D = self.phi_.n_components
if X is self.Xb_:
return LinearOperator(self.Xb_.shape,
matvec=lambda b: dot(self.Xb_ * b),
rmatvec=lambda r: dot(self.Xb_.T * r))
else:
Xb = self.phi_.transform(X)
# TODO:
# w = self.phi_.random_weights_.reshape((1, -1, Xb.shape[1]))
# wn = np.linalg.norm(w)
# Xb = (Xb.reshape((Xb.shape[0], 1, Xb.shape[1])) *
# wn * np.eye()w np.dot(w.T, w) / wn)
Xb = Xb.reshape((-1, Xb.shape[2]))
return LinearOperator(Xb.shape,
matvec=lambda b: dot(Xb, b),
rmatvec=lambda r: dot(Xb.T, r))
def __call__(self, X, Y=None):
r"""Return the kernel map associated with the data X.
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise.}
\end{cases}
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples1, n_features]
Samples.
Y : {array-like, sparse matrix}, shape = [n_samples2, n_features],
default = None
Samples.
Returns
-------
K_x : DecomposableKernelMap, callable or LinearOperator
.. math::
K_x: \begin{cases}
Y \mapsto K(X, Y) \enskip\text{if } Y \text{is None,} \\
K(X, Y) \enskip\text{otherwise}
\end{cases}
"""
Kmap = self.get_kernel_map(X)
if Y is None:
return Kmap
else:
return Kmap(Y)
|
|
"""Estimate tumor purity and frequency using copy number and allele freqencies with BubbleTree.
http://www.bioconductor.org/packages/release/bioc/html/BubbleTree.html
http://www.bioconductor.org/packages/release/bioc/vignettes/BubbleTree/inst/doc/BubbleTree-vignette.html
"""
from __future__ import print_function
import collections
import csv
import os
import re
import subprocess
import numpy as np
import pysam
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.heterogeneity import chromhacks
from bcbio.structural import shared
from bcbio.variation import bedutils
population_keys = ['AC_AFR', 'AC_AMR', 'AC_EAS', 'AC_FIN', 'AC_NFE', 'AC_OTH', 'AC_SAS']
def run(vrn_info, calls_by_name, somatic_info, do_plots=True, handle_failures=True):
"""Run BubbleTree given variant calls, CNVs and somatic
"""
if "seq2c" in calls_by_name:
cnv_info = calls_by_name["seq2c"]
elif "cnvkit" in calls_by_name:
cnv_info = calls_by_name["cnvkit"]
else:
raise ValueError("BubbleTree only currently support CNVkit and Seq2c: %s" % ", ".join(calls_by_name.keys()))
work_dir = _cur_workdir(somatic_info.tumor_data)
class OutWriter:
def __init__(self, out_handle):
self.writer = csv.writer(out_handle)
def write_header(self):
self.writer.writerow(["chrom", "start", "end", "freq"])
def write_row(self, rec, stats):
self.writer.writerow([_to_ucsc_style(rec.chrom), rec.start, rec.stop, stats["tumor"]["freq"]])
vcf_csv = prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"],
work_dir, somatic_info, OutWriter, cnv_info["cns"])
cnv_csv = _prep_cnv_file(cnv_info["cns"], cnv_info["variantcaller"], work_dir,
somatic_info.tumor_data)
wide_lrr = cnv_info["variantcaller"] == "cnvkit" and somatic_info.normal_bam is None
return _run_bubbletree(vcf_csv, cnv_csv, somatic_info.tumor_data, wide_lrr, do_plots,
handle_failures)
def _run_bubbletree(vcf_csv, cnv_csv, data, wide_lrr=False, do_plots=True,
handle_failures=True):
"""Create R script and run on input data
BubbleTree has some internal hardcoded paramters that assume a smaller
distribution of log2 scores. This is not true for tumor-only calls, so if
we specify wide_lrr we scale the calculations to actually get calls. Need a
better long term solution with flexible parameters.
"""
lrr_scale = 10.0 if wide_lrr else 1.0
local_sitelib = utils.R_sitelib()
base = utils.splitext_plus(vcf_csv)[0]
r_file = "%s-run.R" % base
bubbleplot_out = "%s-bubbleplot.pdf" % base
trackplot_out = "%s-trackplot.pdf" % base
calls_out = "%s-calls.rds" % base
freqs_out = "%s-bubbletree_prevalence.txt" % base
sample = dd.get_sample_name(data)
do_plots = "yes" if do_plots else "no"
with open(r_file, "w") as out_handle:
out_handle.write(_script.format(**locals()))
if not utils.file_exists(freqs_out):
cmd = "%s && %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file)
try:
do.run(cmd, "Assess heterogeneity with BubbleTree")
except subprocess.CalledProcessError as msg:
if handle_failures and _allowed_bubbletree_errorstates(str(msg)):
with open(freqs_out, "w") as out_handle:
out_handle.write('bubbletree failed:\n %s"\n' % (str(msg)))
else:
logger.exception()
raise
return {"caller": "bubbletree",
"report": freqs_out,
"plot": {"bubble": bubbleplot_out, "track": trackplot_out}}
def _allowed_bubbletree_errorstates(msg):
allowed = ["Error in p[i, ] : subscript out of bounds",
"replacement has .* rows, data has"]
return any([len(re.findall(m, msg)) > 0 for m in allowed])
def _cns_to_coords(line):
chrom, start, end = line.split()[:3]
return (chrom, start, end)
def _prep_cnv_file(cns_file, svcaller, work_dir, data):
"""Create a CSV file of CNV calls with log2 and number of marks.
"""
in_file = cns_file
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
svcaller))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
writer = csv.writer(out_handle)
writer.writerow(["chrom", "start", "end", "num.mark", "seg.mean"])
header = reader.next()
for line in reader:
cur = dict(zip(header, line))
if chromhacks.is_autosomal(cur["chromosome"]):
writer.writerow([_to_ucsc_style(cur["chromosome"]), cur["start"],
cur["end"], cur["probes"], cur["log2"]])
return out_file
def prep_vrn_file(in_file, vcaller, work_dir, somatic_info, writer_class, seg_file=None):
"""Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics.
"""
data = somatic_info.tumor_data
params = {"min_freq": 0.4,
"max_freq": 0.6,
"tumor_only": {"min_freq": 0.10, "max_freq": 0.90},
"min_depth": 20,
"hetblock": {"min_alleles": 25,
"allowed_misses": 2}}
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
#ready_bed = _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info)
ready_bed = None
if ready_bed and utils.file_exists(ready_bed):
sub_file = _create_subset_file(in_file, ready_bed, work_dir, data)
else:
sub_file = in_file
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = writer_class(out_handle)
writer.write_header()
bcf_in = pysam.VariantFile(sub_file)
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info)
if chromhacks.is_autosomal(rec.chrom) and stats is not None:
writer.write_row(rec, stats)
return out_file
def _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info):
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file.
"""
def _segment_by_cns(target_chrom, freqs, coords):
with open(seg_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
reader.next() # header
for cur_chrom, start, end in (xs[:3] for xs in reader):
if cur_chrom == target_chrom:
block_freqs = []
for i, (freq, coord) in enumerate(zip(freqs, coords)):
if coord >= int(start) and coord < int(end):
block_freqs.append(freq)
elif coord >= int(end):
break
coords = coords[max(0, i - 1):]
freqs = freqs[max(0, i - 1):]
if len(block_freqs) > params["hetblock"]["min_alleles"]:
yield start, end
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_cns, params, work_dir, somatic_info)
def _identify_heterogeneity_blocks_hmm(in_file, params, work_dir, somatic_info):
"""Use a HMM to identify blocks of heterogeneity to use for calculating allele frequencies.
The goal is to subset the genome to a more reasonable section that contains potential
loss of heterogeneity or other allele frequency adjustment based on selection.
"""
def _segment_by_hmm(chrom, freqs, coords):
cur_coords = []
for j, state in enumerate(_predict_states(freqs)):
if state == 0: # heterozygote region
if len(cur_coords) == 0:
num_misses = 0
cur_coords.append(coords[j])
else:
num_misses += 1
if num_misses > params["hetblock"]["allowed_misses"]:
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
cur_coords = []
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_hmm, params, work_dir, somatic_info)
def _identify_heterogeneity_blocks_shared(in_file, segment_fn, params, work_dir, somatic_info):
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file.
"""
out_file = os.path.join(work_dir, "%s-hetblocks.bed" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for chrom, freqs, coords in _freqs_by_chromosome(in_file, params, somatic_info):
for start, end in segment_fn(chrom, freqs, coords):
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
return out_file
def _predict_states(freqs):
"""Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1.
"""
from hmmlearn import hmm
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states
def _freqs_by_chromosome(in_file, params, somatic_info):
"""Retrieve frequencies across each chromosome as inputs to HMM.
"""
freqs = []
coords = []
cur_chrom = None
with pysam.VariantFile(in_file) as bcf_in:
for rec in bcf_in:
if _is_biallelic_snp(rec) and _passes_plus_germline(rec) and chromhacks.is_autosomal(rec.chrom):
if cur_chrom is None or rec.chrom != cur_chrom:
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords
cur_chrom = rec.chrom
freqs = []
coords = []
stats = _tumor_normal_stats(rec, somatic_info)
if tz.get_in(["tumor", "depth"], stats, 0) > params["min_depth"]:
# not a ref only call
if len(rec.samples) == 0 or sum(rec.samples[somatic_info.tumor_name].allele_indices) > 0:
freqs.append(tz.get_in(["tumor", "freq"], stats))
coords.append(rec.start)
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords
def _create_subset_file(in_file, het_region_bed, work_dir, data):
"""Subset the VCF to a set of pre-calculated smaller regions.
"""
cnv_regions = shared.get_base_cnv_regions(data, work_dir)
region_bed = bedutils.intersect_two(het_region_bed, cnv_regions, work_dir, data)
out_file = os.path.join(work_dir, "%s-origsubset.bcf" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
regions = ("-R %s" % region_bed) if utils.file_exists(region_bed) else ""
cmd = "bcftools view {regions} -o {tx_out_file} -O b {in_file}"
do.run(cmd.format(**locals()), "Extract regions for BubbleTree frequency determination")
return out_file
def _to_ucsc_style(chrom):
"""BubbleTree assumes hg19 UCSC style chromosome inputs.
"""
return "chr%s" % chrom if not str(chrom).startswith("chr") else chrom
def _passes_plus_germline(rec):
"""Check if a record passes filters (but might be germline -- labelled with REJECT).
"""
allowed = set(["PASS", "REJECT"])
filters = [x for x in rec.filter.keys() if x in allowed]
return len(filters) > 0
def _is_biallelic_snp(rec):
return _is_snp(rec) and len(rec.alts) == 1
def _is_snp(rec):
return max([len(x) for x in rec.alleles]) == 1
def _tumor_normal_stats(rec, somatic_info):
"""Retrieve depth and frequency of tumor and normal samples.
"""
out = {"normal": {"alt": None, "depth": None, "freq": None},
"tumor": {"alt": 0, "depth": 0, "freq": None}}
# Handle INFO only inputs
if len(rec.samples) == 0:
samples = [(somatic_info.tumor_name, None)]
else:
samples = rec.samples.items()
for name, sample in samples:
alt, depth, freq = sample_alt_and_depth(rec, sample)
if depth is not None and freq is not None:
if name == somatic_info.normal_name:
key = "normal"
elif name == somatic_info.tumor_name:
key = "tumor"
out[key]["freq"] = freq
out[key]["depth"] = depth
out[key]["alt"] = alt
return out
def _is_possible_loh(rec, vcf_rec, params, somatic_info):
"""Check if the VCF record is a het in the normal with sufficient support.
Only returns SNPs, since indels tend to have less precise frequency measurements.
"""
if _is_biallelic_snp(rec) and _passes_plus_germline(rec):
stats = _tumor_normal_stats(rec, somatic_info)
depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]]
depths = [d for d in depths if d is not None]
normal_freq = tz.get_in(["normal", "freq"], stats)
tumor_freq = tz.get_in(["tumor", "freq"], stats)
if all([d > params["min_depth"] for d in depths]):
if normal_freq is not None:
if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]:
return stats
elif (tumor_freq >= params["tumor_only"]["min_freq"] and
tumor_freq <= params["tumor_only"]["max_freq"]):
if not _has_population_germline(vcf_rec) or is_population_germline(rec):
return stats
def _has_population_germline(rec):
"""Check if header defines population annotated germline samples for tumor only.
"""
for k in population_keys:
if k in rec.header.info:
return True
return False
def is_population_germline(rec):
"""Identify a germline calls based on annoations with ExAC or other population databases.
"""
min_count = 50
for k in population_keys:
if k in rec.info:
val = rec.info.get(k)
if "," in val:
val = val.split(",")[0]
if isinstance(val, (list, tuple)):
val = max(val)
if int(val) > min_count:
return True
return False
def sample_alt_and_depth(rec, sample):
"""Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases.
"""
if sample and "AD" in sample:
all_counts = [int(x) for x in sample["AD"]]
alt_counts = sum(all_counts[1:])
depth = sum(all_counts)
elif sample and "AO" in sample and sample.get("RO") is not None:
alts = sample["AO"]
if not isinstance(alts, (list, tuple)):
alts = [alts]
alt_counts = sum([int(x) for x in alts])
depth = alt_counts + int(sample["RO"])
elif "DP" in rec.info and "AF" in rec.info:
af = rec.info["AF"][0] if isinstance(rec.info["AF"], (tuple, list)) else rec.info["AF"]
return None, rec.info["DP"], af
else:
alt_counts = None
if alt_counts is None or depth is None or depth == 0:
return None, None, None
else:
freq = float(alt_counts) / float(depth)
return alt_counts, depth, freq
def _cur_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "heterogeneity",
dd.get_sample_name(data), "bubbletree"))
if __name__ == "__main__":
import sys
bcf_in = pysam.VariantFile(sys.argv[1])
somatic = collections.namedtuple("Somatic", "normal_name,tumor_name")
params = {"min_freq": 0.4,
"max_freq": 0.6,
"min_depth": 15}
for rec in bcf_in:
if _is_possible_loh(rec, bcf_in, params, somatic(sys.argv[2], sys.argv[3])):
print(rec.filter.keys(), len(rec.filter))
_script = """
.libPaths(c("{local_sitelib}"))
library(BubbleTree)
library(GenomicRanges)
library(ggplot2)
vc.df = read.csv("{vcf_csv}", header=T)
vc.gr = GRanges(vc.df$chrom, IRanges(vc.df$start, vc.df$end),
freq=vc.df$freq, score=vc.df$freq)
cnv.df = read.csv("{cnv_csv}", header=T)
cnv.gr = GRanges(cnv.df$chrom, IRanges(cnv.df$start, cnv.df$end),
num.mark=cnv.df$num.mark, seg.mean=cnv.df$seg.mean,
score=cnv.df$seg.mean)
print(vc.gr)
print(cnv.gr)
r <- new("RBD")
rbd <- makeRBD(r, vc.gr, cnv.gr)
rbd$lrr <- rbd$lrr / {lrr_scale}
print(head(rbd))
calls <- new("BTreePredictor", rbd=rbd)
calls <- btpredict(calls)
saveRDS(calls, "{calls_out}")
purity <- calls@result$prev[1]
adj <- calls@result$ploidy.adj["adj"]
# when purity is low the calculation result is not reliable
ploidy <- (2*adj -2)/purity + 2
out <- data.frame(sample="{sample}",
purity=round(purity,3),
prevalences=paste(round(calls@result$prev,3), collapse=";"),
tumor_ploidy=round(ploidy,1))
write.csv(out, file="{freqs_out}", row.names=FALSE)
title <- sprintf("{sample} (%s)", info(calls))
# XXX Needs to be generalized for non-build 37/hg19 plots
# hg19.seqinfo is hardcoded in TrackPlotter but we might
# be able to work around with just an external centromere.dat import
if ("{do_plots}" == "yes") {{
load(system.file("data", "centromere.dat.rda", package="BubbleTree"))
load(system.file("data", "hg19.seqinfo.rda", package="BubbleTree"))
trackplotter <- new("TrackPlotter")
z1 <- heteroLociTrack(trackplotter, calls@result, centromere.dat, vc.gr) + ggplot2::labs(title=title)
z2 <- RscoreTrack(trackplotter, calls@result, centromere.dat, cnv.gr)
t2 <- getTracks(z1, z2)
pdf(file="{trackplot_out}", width=8, height=6)
g <- gridExtra::grid.arrange(t2, ncol=1)
print(g)
dev.off()
pdf(file="{bubbleplot_out}", width=8, height=6)
btreeplotter <- new("BTreePlotter")
g <- drawBTree(btreeplotter, calls@rbd.adj) + ggplot2::labs(title=title)
print(g)
dev.off()
}}
"""
|
|
import huobi.model.position
import huobi.model.bararray
import numpy as np
class TradeInfoArray:
def __init__(self, interval, size=100):
self.count = 0
self.inited = False
self.size = size
self.ttmu_buy_ratio = np.zeros(size)
self.ttmu_sell_ratio = np.zeros(size)
self.ttsi_buy_ratio = np.zeros(size)
self.ttsi_sell_ratio = np.zeros(size)
self.ttsi_locked_ratio = np.zeros(size)
self.market_position = np.zeros(size)
self.id = np.zeros(size)
self.interval = interval
self.boll_up_array = np.zeros(size)
self.boll_down_array = np.zeros(size)
self.boll_mid_array = np.zeros(size)
self.boll_up_cnt_array = np.zeros(size)
self.boll_up_brk_cnt_array = np.zeros(size)
self.boll_down_cnt_array = np.zeros(size)
self.boll_down_brk_cnt_array = np.zeros(size)
self.boll_up = 0
self.boll_down = 0
self.boll_mid = 0
self.boll_up_cnt = 0
self.boll_up_brk_cnt = 0
self.boll_down_cnt = 0
self.boll_down_brk_cnt = 0
def update_ttsi(self, ttsi_list: list):
for cc in range(len(ttsi_list)):
self.update_ttsi_array(ttsi_list[cc])
def update_ttsi_array(self, ttsi_list):
self.ttsi_buy_ratio[:-1] = self.ttsi_buy_ratio[1:]
self.ttsi_sell_ratio[:-1] = self.ttsi_sell_ratio[1:]
self.ttsi_locked_ratio[:-1] = self.ttsi_locked_ratio[1:]
self.ttsi_buy_ratio[-1] = ttsi_list.buy_ratio
self.ttsi_sell_ratio[-1] = ttsi_list.sell_ratio
self.ttsi_locked_ratio[-1] = ttsi_list.locked_ratio
def update_ttmu(self, ttmu_list: list):
for cc in range(len(ttmu_list)):
self.update_ttmu_array(ttmu_list[cc])
def update_ttmu_array(self, ttmu_list):
self.ttmu_buy_ratio[:-1] = self.ttmu_buy_ratio[1:]
self.ttmu_sell_ratio[:-1] = self.ttmu_sell_ratio[1:]
self.ttmu_buy_ratio[-1] = ttmu_list.buy_ratio
self.ttmu_sell_ratio[-1] = ttmu_list.sell_ratio
def update_postion(self, position_list: list):
new_lst = position_list[::-1]
for cc in range(len(new_lst)):
self.update_postion_array(new_lst[cc])
def update_postion_array(self, ttmu_list):
self.market_position[:-1] = self.market_position[1:]
self.market_position[-1] = ttmu_list.volume
def update_boll(self, barhigh, barlow, boll_up, boll_down, ccv):
"""
Update new bar data into array manager.
"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.boll_up = boll_up
self.boll_down = boll_down
self.boll_mid = (self.boll_up + self.boll_down) / 2
if barlow > self.boll_mid:
self.boll_up_cnt = self.boll_up_cnt + 1
self.boll_down_cnt = 0
else:
self.boll_up_cnt = 0
if barhigh < self.boll_mid:
self.boll_down_cnt = self.boll_down_cnt + 1
self.boll_up_cnt = 0
else:
self.boll_down_cnt = 0
if barhigh > self.boll_up:
self.boll_up_brk_cnt = self.boll_up_brk_cnt + 1
else:
self.boll_up_brk_cnt = 0
if barlow < self.boll_down:
self.boll_down_brk_cnt = self.boll_down_brk_cnt + 1
else:
self.boll_down_brk_cnt = 0
self.boll_up_cnt_array[ccv] = self.boll_up_cnt
self.boll_up_brk_cnt_array[ccv] = self.boll_up_brk_cnt
self.boll_down_cnt_array[ccv] = self.boll_down_cnt
self.boll_down_brk_cnt_array[ccv] = self.boll_down_brk_cnt
def update_boll_array(self, bar, am):
self.boll_up_array, self.boll_down_array = am.boll(20, 2, True)
for ccv in range(19, self.size):
self.update_boll(bar[ccv].high, bar[ccv].low, self.boll_up_array[ccv], self.boll_down_array[ccv], ccv)
def bollr(self, array=False):
if array:
return self.boll_up_array, self.boll_up_cnt_array, self.boll_up_brk_cnt_array, self.boll_down_array, self.boll_down_cnt_array, self.boll_down_brk_cnt_array
return self.boll_up, self.boll_up_cnt, self.boll_up_brk_cnt, self.boll_down, self.boll_down_cnt, self.boll_down_brk_cnt
def boll_to_str(self):
tt: str = """ Boll Array
self.boll_up: {},
self.boll_up_cnt: {},
self.boll_up_brk_cnt: {},
self.boll_down: {},
self.boll_down_cnt:{},
self.boll_down_brk_cnt:{}
""".format(self.boll_up, self.boll_up_cnt, self.boll_up_brk_cnt, self.boll_down, self.boll_down_cnt,
self.boll_down_brk_cnt)
return tt
def trade_to_str(self):
tt: str = """ Trade Info
TTSI: {}, Prior TTSI {}, diff :{}
TTMU: {}, Prior TTMU {}, diff : {}
Position: {} , Prior Position: {}, diff:{}
""".format(self.ttsi_buy_ratio[-1], self.ttsi_buy_ratio[-2], round(self.ttsi_buy_ratio[-1]-self.ttsi_buy_ratio[-2],2), self.ttmu_buy_ratio[-2],self.ttmu_buy_ratio[-2],round(self.ttmu_buy_ratio[-1]-self.ttmu_buy_ratio[-2],3),
self.market_position[-1],self.market_position[-2],self.market_position[-1]-self.market_position[-2])
return tt
def update_all(self, ttsi, ttmu, position, candlestick_list_1, am_1):
self.update_ttmu(ttmu)
self.update_ttsi(ttsi)
self.update_postion(position)
self.update_boll_array(candlestick_list_1, am_1)
|
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import numpy as np
import sys
sys.path.append('../codes')
from utils import hyperbolic_utils as hyp
from utils import manifolds
from run import *
from adjustText import adjust_text
import networkx as nx
import itertools
from scipy.special import comb
import multiprocessing as mp
import pdb
# # Loading checkpoint
# In[36]:
# data_path = '../data/wn18rr'
# ckpt_path = '../models/RotatCones2_wn18rr_1/checkpoint/ckpt_29999'
# data_path = '../data/synthetic_tree_2'
# ckpt_path = '../models/EmbedCones_synthetic_tree_2_-3/checkpoint/ckpt_9999'
# # filename = 'RotatCones wn18rr_small_2_1 id:4 neg:50 dim: 1'
# filename = 'none'
data_path = '../data/wn18_small_3'
ckpt_path = '../models/EmbedCones_wn18_small_3_-1/checkpoint/ckpt_19999'
# ckpt_path = '../models/PoincareEmbedding_wn18_small_1_-1/checkpoint/ckpt_7999'
# filename = 'RotatCones wn18rr_small_2_1 id:4 neg:50 dim: 1'
filename = 'none'
# data_path = '../data/synthetic_tree_1'
# ckpt_path = '../models/EmbedCones_wn18_small_1_-1/checkpoint/ckpt_9999'
# # filename = 'RotatCones wn18rr_small_2_1 id:4 neg:50 dim: 1'
# filename = 'none'
# data_path = '../data/synthetic_tree_1'
# ckpt_path = '../models/RotatTransH2_synthetic_tree_1_-2/checkpoint/ckpt_999'
# # filename = 'RotatCones wn18rr_small_2_1 id:4 neg:50 dim: 1'
# filename = 'none'
# data_path = '../data/synthetic_tree_1'
# ckpt_path = '../models/PoincareEmbedding_synthetic_tree_1_-2/checkpoint/ckpt_1999'
# # filename = 'RotatCones wn18rr_small_2_1 id:4 neg:50 dim: 1'
# filename = 'none'
# data_path = '../data/wn18rr_small_2_1'
# ckpt_path = '../models/PoincareEmbedding_wn18rr_small_2_1_-2/checkpoint/ckpt_27499'
# filename = 'Poincare Embedding wn18rr_small_2_1 id:-2 neg:50 dim: 1'
# data_path = '../data/wn18rr_small_2'
# ckpt_path = '../models/RotatCones2_wn18rr_small_2_2/checkpoint/ckpt_17499'
# filename = 'current_figure'
# data_path = '../data/wn18rr_small_2'
# ckpt_path = '../models/PoincareEmbedding_wn18rr_small_2_0/checkpoint/ckpt_67499'
# filename = 'Poincare Embedding wn18rr_small_2 id:0 neg:50 dim: 1'
# data_path = '../data/wn18rr'
# ckpt_path = '../models/PoincareEmbedding_wn18rr_0/checkpoint/ckpt_37499'
# filename = 'Poincare Embedding wn18rr id:0 neg:50 dim: 500'
# data_path = '../data/wn18rr'
# ckpt_path = '../models/RotatH_wn18rr_-1/checkpoint/ckpt_79999'
checkpoint = torch.load(ckpt_path, map_location='cpu')
print('Loading checkpoint from {}'.format(ckpt_path))
model_state_dict = checkpoint['model_state_dict']
save_entity_embedding = model_state_dict['entity_embedding']
relation_embedding = model_state_dict['relation_embedding']
c = torch.nn.Softplus()(model_state_dict['curvature'])
embedding_range = model_state_dict['embedding_range']
entity_dim = save_entity_embedding.shape[1]
if entity_dim % 2 == 1: save_entity_embedding = save_entity_embedding[:, 0:entity_dim - 1]
# # Create Graph
G = nx.read_gpickle(os.path.join(data_path, "graph.gpickle"))
with open(os.path.join('../data/wn18_small_3', 'entities.dict')) as fin:
node2id = dict()
id2entity = dict()
for line in fin:
eid, entity = line.strip().split('\t')
node2id[entity] = int(eid)
id2entity[eid] = entity
# # Generate query
# In[13]:
def lca_helper(pair):
n1, n2 = pair
stp_1 = nx.shortest_path_length(G, target=n1)
stp_2 = nx.shortest_path_length(G, target=n2)
del stp_1[n1], stp_2[n2] # delete source node from the dictionary
intersection = set.intersection(set(stp_1.keys()), stp_2.keys())
if len(intersection) == 1:
min_len = [(k, stp_1[k] + stp_2[k]) for k in intersection]
return (n1, n2, intersection, min_len[0][1])
elif len(intersection) > 1:
len_sum = [(k, stp_1[k] + stp_2[k]) for k in intersection]
min_len = min(len_sum, key=lambda x: x[1])
lca = [k for k, length in len_sum if length == min_len[1]]
return (n1, n2, lca, min_len[1])
nodes = G.nodes()
combinations = itertools.combinations(nodes, 2)
print(G.order())
print(comb(G.order(), 2))
print(mp.cpu_count())
pool = mp.Pool(4)
results = []
start = time.time()
results = pool.map_async(lca_helper, combinations)
print('func time: ', time.time() - start)
start = time.time()
results = results.get()
print('result time: ', time.time() - start, 'avg: ', (time.time() - start) / (comb(G.order(), 2)))
start = time.time()
results = list(filter(None, results))
print(len(results))
print('filter time: ', time.time() - start)
pool.close()
# # Evaluate LCA
# In[18]:
lca_query = results
K = 0.1
manifold = manifolds.PoincareManifold(K=K)
entity_embedding = save_entity_embedding[0:-1, :]
print(entity_embedding.shape)
half_aperture = manifold.half_aperture(entity_embedding)
correct_count = 0.
count = 0
def evaluate_single_lca_qeury(lca_query):
p, q, lca, min_len = lca_query
p = node2id[str(p)]
q = node2id[str(q)]
lca = list(lca)
lca = [node2id[str(i)] for i in lca]
# lca = list(lca)
p_e = entity_embedding[p, :].unsqueeze(0)
q_e = entity_embedding[q, :].unsqueeze(0)
lca_e = entity_embedding[lca[0], :]
score_cones_p = manifold.angle_at_u(entity_embedding, p_e) - half_aperture
score_cones_q = manifold.angle_at_u(entity_embedding, q_e) - half_aperture
score_sum = score_cones_p.clamp(min=0.) + score_cones_q.clamp(min=0.)
# score_sum[mask] = -1.
if (score_sum == 0).nonzero().shape[0] == 0:
pred = score_sum.argmin().item()
# pred = 0
# if pred not in lca:
# print(p, q, lca, pred)
# print('here')
else:
candidate = (score_sum == 0).nonzero()
candidate_norm = entity_embedding[candidate, :].norm(dim=-1)
# avg_norm = 0.5 * (entity_embedding[p, :].norm(dim=-1) + entity_embedding[q, :].norm(dim=-1))
# margin = 5e-3
# f = (candidate_norm < (avg_norm + margin)) & (candidate_norm > (avg_norm - margin))
# candidate_norm[f] = -1
# for i in range(candidate.shape[0]):
# if candidate[i] in leaf_nodes:
# candidate_norm[i] = -1.
pred = candidate[candidate_norm.argmax()].item()
# if pred not in lca:
# # pdb.set_trace()
# print(p, q, lca, pred, candidate, candidate_norm, candidate_norm.argmax(), len(candidate))
# # print(score_cones_p, score_cones_q)
# if pred in lca:
# correct_count += 1.
# # else:
# # print(p, q, lca, pred)
# count += 1
# if count % 1000 == 0:
# print(count, correct_count / count)
if pred in lca:
return 1.0
else:
return 0.
pool = mp.Pool(4)
results = []
start = time.time()
results = pool.map_async(evaluate_single_lca_qeury, lca_query)
print('func time: ', time.time() - start)
start = time.time()
results = results.get()
print('result time: ', time.time() - start, 'avg: ', (time.time() - start) / (comb(G.order(), 2)))
start = time.time()
# results = list(filter(None, results))
print(len(results))
print(sum(results) / len(lca_query), len(lca_query))
print('filter time: ', time.time() - start)
pool.close()
|
|
# -*- coding: utf-8 -*-
'''
test program to show various calculation result of xmensur
'''
__version__ = '0.1'
import xmensur as xmn
import argparse
import numpy as np
if __name__ == "__main__" :
# exec this as standalone program.
parser = argparse.ArgumentParser(description='Print mensur data.')
parser.add_argument('-v', '--version', action = 'version', version='%(prog)s {}'.format(__version__))
parser.add_argument('-c','--convert',action = 'store_true',help = 'convert to L,D. L: total length from beginning. D: diameter.')
parser.add_argument('filepath')
args = parser.parse_args()
path = args.filepath
if path:
#read mensur file here
fd = open(path,'r')
lines = fd.readlines()
fd.close()
mentop = xmn.build_mensur( lines )
print('#',path)
if not args.convert:
xmn.print_mensur(mentop,True)
else:
xmn.print_mensur_ld(mentop)
|
|
import numpy as np
X = np.array(([0.4, -0.7], [0.3, -0.5], [0.6, 0.1], [0.2,0.4], [0.1,-0.2]), dtype=float)
y = np.array(([0.1], [0.05], [0.3], [0.25], [0.12]), dtype=float)
class Neural_Network(object):
def __init__(self):
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 2
self.W1 = np.random.randn(self.inputSize, self.hiddenSize)
self.W2 = np.random.randn(self.hiddenSize, self.outputSize)
def forward(self, X):
self.z = np.dot(X, self.W1)
self.z2 = self.sigmoid(self.z)
self.z3 = np.dot(self.z2, self.W2)
o = self.sigmoid(self.z3)
return o
def sigmoid(self, s):
return 1/(1+np.exp(-s))
def sigmoidPrime(self, s):
return s * (1 - s)
def backward(self, X, y, o):
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o)
self.z2_error = self.o_delta.dot(self.W2.T)
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2)
self.W1 += X.T.dot(self.z2_delta)
self.W2 += self.z2.T.dot(self.o_delta)
def train (self, X, y):
o = self.forward(X)
self.backward(X, y, o)
NN = Neural_Network()
for i in range(2): # two iterations
print ("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" + str(NN.forward(X)))
print("Loss: \n" + str(np.mean(np.square(y - NN.forward(X)))))
print("\n")
NN.train(X, y)
|
|
"""Smolyak sparse grid constructor."""
from collections import defaultdict
from itertools import product
import numpy
from scipy.special import comb
import numpoly
import chaospy
def construct_sparse_grid(
order,
dist,
growth=None,
recurrence_algorithm="stieltjes",
rule="gaussian",
tolerance=1e-10,
scaling=3,
n_max=5000,
):
"""
Smolyak sparse grid constructor.
Args:
order (int, numpy.ndarray):
The order of the grid. If ``numpy.ndarray``, it overrides both
``dim`` and ``skew``.
dist (chaospy.distributions.baseclass.Distribution):
The distribution which density will be used as weight function.
growth (bool, None):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples. Defaults to the same
value as ``sparse`` if omitted.
recurrence_algorithm (str):
Name of the algorithm used to generate abscissas and weights in
case of Gaussian quadrature scheme. If omitted, ``analytical`` will
be tried first, and ``stieltjes`` used if that fails.
rule (str):
Rule for generating abscissas and weights. Either done with
quadrature rules, or with random samples with constant weights.
tolerance (float):
The allowed relative error in norm between two quadrature orders
before method assumes convergence.
scaling (float):
A multiplier the adaptive order increases with for each step
quadrature order is not converged. Use 0 to indicate unit
increments.
n_max (int):
The allowed number of quadrature points to use in approximation.
Returns:
(numpy.ndarray, numpy.ndarray):
Abscissas and weights created from sparse grid rule. Flatten such
that ``abscissas.shape == (len(dist), len(weights))``.
Example:
>>> distribution = chaospy.J(
... chaospy.Normal(0, 1), chaospy.Uniform(-1, 1))
>>> abscissas, weights = construct_sparse_grid(1, distribution)
>>> abscissas.round(4)
array([[-1. , 0. , 0. , 0. , 1. ],
[ 0. , -0.5774, 0. , 0.5774, 0. ]])
>>> weights.round(4)
array([ 0.5, 0.5, -1. , 0.5, 0.5])
>>> abscissas, weights = construct_sparse_grid([2, 1], distribution)
>>> abscissas.round(2)
array([[-1.73, -1. , -1. , -1. , 0. , 1. , 1. , 1. , 1.73],
[ 0. , -0.58, 0. , 0.58, 0. , -0.58, 0. , 0.58, 0. ]])
>>> weights.round(2)
array([ 0.17, 0.25, -0.5 , 0.25, 0.67, 0.25, -0.5 , 0.25, 0.17])
"""
orders = order*numpy.ones(len(dist), dtype=int)
assert isinstance(dist, chaospy.Distribution), "dist must be chaospy.Distribution"
dist = dist if isinstance(dist, (chaospy.J, chaospy.Iid)) else chaospy.J(dist)
if isinstance(rule, str):
rule = (rule,)*len(dist)
x_lookup, w_lookup = _construct_lookup(
orders=orders,
dists=dist,
growth=growth,
recurrence_algorithm=recurrence_algorithm,
rules=rule,
tolerance=tolerance,
scaling=scaling,
n_max=n_max,
)
collection = _construct_collection(
order, dist, x_lookup, w_lookup)
abscissas = sorted(collection)
weights = numpy.array([collection[key] for key in abscissas])
abscissas = numpy.array(abscissas).T
return abscissas, weights
def _construct_collection(
orders,
dist,
x_lookup,
w_lookup,
):
"""Create a collection of {abscissa: weight} key-value pairs."""
order = numpy.min(orders)
skew = orders-order
# Indices and coefficients used in the calculations
indices = numpoly.glexindex(
order-len(dist)+1, order+1, dimensions=len(dist))
coeffs = numpy.sum(indices, -1)
coeffs = (2*((order-coeffs+1) % 2)-1)*comb(len(dist)-1, order-coeffs)
collection = defaultdict(float)
for bidx, coeff in zip(indices+skew, coeffs.tolist()):
abscissas = [value[idx] for idx, value in zip(bidx, x_lookup)]
weights = [value[idx] for idx, value in zip(bidx, w_lookup)]
for abscissa, weight in zip(product(*abscissas), product(*weights)):
collection[abscissa] += numpy.prod(weight)*coeff
return collection
def _construct_lookup(
orders,
dists,
growth,
recurrence_algorithm,
rules,
tolerance,
scaling,
n_max,
):
"""
Create abscissas and weights look-up table so values do not need to be
re-calculatated on the fly.
"""
x_lookup = []
w_lookup = []
for max_order, dist, rule in zip(orders, dists, rules):
x_lookup.append([])
w_lookup.append([])
for order in range(max_order+1):
(abscissas,), weights = chaospy.generate_quadrature(
order=order,
dist=dist,
growth=growth,
recurrence_algorithm=recurrence_algorithm,
rule=rule,
tolerance=tolerance,
scaling=scaling,
n_max=n_max,
)
x_lookup[-1].append(abscissas)
w_lookup[-1].append(weights)
return x_lookup, w_lookup
|
|
import torch
import torch.nn as nn
import numpy as np
from src.IoU import *
from src.utils import *
from config import config
CONFIG = config()
def Validate(model, validloader, criterion, valid_loss_min, device, model_path):
valid_loss = 0
val_iou = []
val_losses = []
model.eval()
for i, val_data in enumerate(validloader):
inp, masks, _ = val_data
inp, masks = inp.to(device), masks.to(device)
out = model(inp)
val_target = masks.argmax(1)
val_loss = criterion(out, val_target.long())
valid_loss += val_loss.item() * inp.size(0)
iou = iou_pytorch(out.argmax(1), val_target)
val_iou.extend(iou)
miou = torch.FloatTensor(val_iou).mean()
valid_loss = valid_loss / len(validloader.dataset)
val_losses.append(valid_loss)
print(f'\t\t Validation Loss: {valid_loss:.4f},',f' Validation IoU: {miou:.3f}')
if np.mean(val_losses) <= valid_loss_min:
torch.save(model.state_dict(), model_path+'/state_dict.pt')
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min,np.mean(val_losses))+'\n')
valid_loss_min = np.mean(val_losses)
return valid_loss, valid_loss_min
def Test_eval(model, testloader, criterion, model_save_pth, device):
model.load_state_dict(torch.load(model_save_pth))
model.eval()
test_loss = 0
imgs, masks, preds = [], [], []
for i, test_data in enumerate(testloader):
img, mask = test_data
inp, mask = img.to(device), mask.to(device)
imgs.extend(inp.cpu().numpy())
masks.extend(mask.cpu().numpy())
out = model(inp.float())
preds.extend(out.detach().cpu().numpy())
target = mask.argmax(1)
loss = criterion(out, target.long())
test_loss += loss.item() * inp.size(0)
test_loss = loss / len(testloader.dataset)
pred = mask_to_rgb(np.array(preds), CONFIG.id2code)
print(f"Test loss is: {test_loss:.4f}")
return np.array(imgs), np.array(masks), np.array(pred)
|
|
""" Copyright (C) 2019 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
This file incorporates work covered by the following copyright and permission notice:
Copyright (c) 2019 LI RUOTENG
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
UNKNOWN_FLOW_THRESH = 1e7
def evaluate_flow(gt, pred):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt: ground truth file path
:param pred: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt) # ground truth flow
eva_flow = read_flow(pred) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe
def show_flow(filename):
"""
visualize optical flow map using matplotlib
:param filename: optical flow file
:return: None
"""
flow = read_flow(filename)
img = flow_to_image(flow)
plt.imshow(img)
plt.show()
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
def read_flow(filename):
"""
read optical flow in Middlebury .flo file format
:param filename:
:return:
"""
f = open(filename, 'rb')
magic = np.fromfile(f, np.float32, count=1)
data2d = None
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
data2d = np.fromfile(f, np.float32, count=2 * w * h)
# reshape data into 3D array (columns, rows, channels)
data2d = np.reshape(data2d, (h, w, 2))
f.close()
return data2d
# WARNING: this will work on little-endian architectures only!
def write_flow(flow, filename):
"""
write optical flow in Middlebury .flo format
:param flow: optical flow map
:param filename: optical flow file path to be saved
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = flow.shape
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
empty_map = np.zeros((height, width), dtype=np.float32)
data = np.dstack((flow, empty_map))
magic.tofile(f)
w.tofile(f)
h.tofile(f)
data.tofile(f)
f.close()
def flow_error(tu, tv, u, v):
"""
Calculate average end point error
:param tu: ground-truth horizontal flow map
:param tv: ground-truth vertical flow map
:param u: estimated horizontal flow map
:param v: estimated vertical flow map
:return: End point error of the estimated flow
"""
smallflow = 0.0
'''
stu = tu[bord+1:end-bord,bord+1:end-bord]
stv = tv[bord+1:end-bord,bord+1:end-bord]
su = u[bord+1:end-bord,bord+1:end-bord]
sv = v[bord+1:end-bord,bord+1:end-bord]
'''
stu = tu[:]
stv = tv[:]
su = u[:]
sv = v[:]
idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | (abs(stv) > UNKNOWN_FLOW_THRESH)
stu[idxUnknow] = 0
stv[idxUnknow] = 0
su[idxUnknow] = 0
sv[idxUnknow] = 0
ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)]
index_su = su[ind2]
index_sv = sv[ind2]
an = 1.0 / np.sqrt(index_su ** 2 + index_sv ** 2 + 1)
un = index_su * an
vn = index_sv * an
index_stu = stu[ind2]
index_stv = stv[ind2]
tn = 1.0 / np.sqrt(index_stu ** 2 + index_stv ** 2 + 1)
tun = index_stu * tn
tvn = index_stv * tn
'''
angle = un * tun + vn * tvn + (an * tn)
index = [angle == 1.0]
angle[index] = 0.999
ang = np.arccos(angle)
mang = np.mean(ang)
mang = mang * 180 / np.pi
'''
epe = np.sqrt((stu - su) ** 2 + (stv - sv) ** 2)
epe = epe[ind2]
mepe = np.mean(epe)
return mepe
def flow_to_image(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def scale_image(image, new_range):
"""
Linearly scale the image into desired range
:param image: input image
:param new_range: the new range to be aligned
:return: image normalized in new range
"""
min_val = np.min(image).astype(np.float32)
max_val = np.max(image).astype(np.float32)
min_val_new = np.array(min(new_range), dtype=np.float32)
max_val_new = np.array(max(new_range), dtype=np.float32)
scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new
return scaled_image.astype(np.uint8)
|
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Inference Server class used for centralised inference."""
import dataclasses
import datetime
import threading
from typing import Any, Callable, Mapping, Optional, Sequence, Union
import acme
from acme.jax import variable_utils
import jax
import launchpad as lp
@dataclasses.dataclass
class InferenceServerConfig:
"""Configuration options for centralised inference.
Attributes:
batch_size: How many elements to batch together per single inference call.
Auto-computed when not specified.
update_period: Frequency of updating variables from the variable source.
It is passed to VariableClient. Auto-computed when not specified.
timeout: Time after which incomplete batch is executed (batch is padded,
so there batch handler is always called with batch_size elements).
By default timeout is effectively disabled (set to 30 days).
"""
batch_size: Optional[int] = None
update_period: Optional[int] = None
timeout: datetime.timedelta = datetime.timedelta(days=30)
CallableOrMapping = Union[Callable[..., Any], Mapping[str, Callable[..., Any]]]
class InferenceServer:
"""Centralised, batched inference server."""
def __init__(self,
handler: CallableOrMapping,
variable_source: acme.VariableSource,
devices: Sequence[jax.xla.Device],
config: InferenceServerConfig):
"""Constructs an inference server object.
Args:
handler: A callable or a mapping of callables to be exposed
through the inference server.
variable_source: Source of variables
devices: Devices used for executing handlers. All devices are used in
parallel.
config: Inference Server configuration.
"""
self._variable_source = variable_source
self._variable_client = None
self._keys = []
self._devices = devices
self._config = config
self._call_cnt = 0
self._device_params = [None] * len(self._devices)
self._device_params_ids = [None] * len(self._devices)
self._mutex = threading.Lock()
if callable(handler):
self._build_handler(handler, '__call__')
else:
for name in handler:
self._build_handler(handler[name], name)
def _dereference_params(self, arg):
"""Replaces VariableReferences with their corresponding param values."""
if not isinstance(arg, variable_utils.VariableReference):
# All arguments but VariableReference are returned without modifications.
return arg
# Due to batching dimension we take the first element.
variable_name = arg.variable_name[0]
if variable_name not in self._keys:
# Create a new VariableClient which also serves new variables.
self._keys.append(variable_name)
self._variable_client = variable_utils.VariableClient(
client=self._variable_source,
key=self._keys,
update_period=self._config.update_period)
self._variable_client.update()
# Maybe update params, depending on client configuration.
params = self._variable_client.params
device_idx = self._call_cnt % len(self._devices)
# Select device via round robin, and update its params if they changed.
if self._device_params_ids[device_idx] != id(params):
self._device_params_ids[device_idx] = id(params)
self._device_params[device_idx] = jax.device_put(
params, self._devices[device_idx])
if len(self._keys) == 1:
return params
return params[self._keys.index(variable_name)]
def _build_handler(self, handler: Callable[..., Any], name: str):
"""Builds a batched handler for a given callable handler and its name."""
def dereference_params_and_call_handler(*args, **kwargs):
with self._mutex:
# Dereference args corresponding to params, leaving others unchanged.
args_with_dereferenced_params = [
self._dereference_params(arg) for arg in args
]
kwargs_with_dereferenced_params = {
key: self._dereference_params(value)
for key, value in kwargs.items()
}
self._call_cnt += 1
return handler(*args_with_dereferenced_params,
**kwargs_with_dereferenced_params)
self.__setattr__(
name,
lp.batched_handler(
batch_size=self._config.batch_size,
timeout=self._config.timeout,
pad_batch=True,
max_parallelism=2 *
len(self._devices))(dereference_params_and_call_handler))
|
|
import os
import re
import numpy as np
from AlphaZero.processing.go.game_converter import GameConverter
def selfplay_to_h5(model_name, base_dir='data'):
""" Takes a model that has just generated the selfplay data, combine everything into a single h5 file.
And store the h5 file as 'train.h5' in the same folder.
Arguments:
model_name: name of the model
base_dir: the directory containing the folder selfplay.
"""
feature_list = ["board_history", "color"]
converter = GameConverter(feature_list)
# From game converter
def _is_sgf(fname):
return fname.strip()[-4:] == ".sgf"
def _walk_all_sgfs(root):
"""a helper function/generator to get all SGF files in subdirectories of root
"""
for (dirpath, dirname, _files) in os.walk(root):
for filename in _files:
if _is_sgf(filename):
# find the corresponding pkl
pkl_name = filename.strip()[:-4] + '.pkl'
if os.path.exists(os.path.join(dirpath, pkl_name)):
# yield the full (relative) path to the file
yield os.path.join(dirpath, filename), os.path.join(dirpath, pkl_name)
files = _walk_all_sgfs(os.path.join(base_dir, 'selfplay', model_name))
converter.selfplay_to_hdf5(files, os.path.join(base_dir, 'selfplay', model_name, 'train.h5'), 19)
def get_current_time():
return '_'.join(re.findall('\d+', str(np.datetime64('now'))))
def combined_selfplay_h5_train_data_generator(h5_files, num_batch):
state_datasets = [h5f['states'] for h5f in h5_files]
search_probs_datasets = [h5f['search_probs'] for h5f in h5_files]
result_datasets = [h5f['results'] for h5f in h5_files]
state_dataset = np.concatenate(([ds.value for ds in state_datasets]), axis=0)
search_probs_dataset = np.concatenate(([ds.value for ds in search_probs_datasets]), axis=0)
result_datasets = np.concatenate(([ds.value for ds in result_datasets]), axis=0)
n_total_data = state_dataset.shape[0]
shuffle_indices = np.random.permutation(n_total_data)
indices = shuffle_indices[0:n_total_data]
return _selfplay_shuffled_hdf5_batch_generator(state_dataset, search_probs_dataset, result_datasets, indices,
num_batch)
def _selfplay_shuffled_hdf5_batch_generator(state_dataset, search_probs_dataset, result_dataset,
indices, batch_size):
"""A generator of batches of training data for use with the fit_generator function
of Keras. Data is accessed in the order of the given indices for shuffling.
"""
state_batch_shape = (batch_size,) + state_dataset.shape[1:]
game_size = state_batch_shape[-1]
Xbatch = np.zeros(state_batch_shape)
Ybatch = np.zeros((batch_size, game_size * game_size + 1), dtype=np.float32)
Zbatch = np.zeros(batch_size)
batch_idx = 0
while True:
for data_idx in indices:
state = np.array([plane for plane in state_dataset[data_idx]])
search_probs = search_probs_dataset[data_idx]
result = result_dataset[data_idx]
Xbatch[batch_idx] = state
Ybatch[batch_idx] = search_probs
Zbatch[batch_idx] = result
batch_idx += 1
if batch_idx == batch_size:
batch_idx = 0
yield (Xbatch, Ybatch, Zbatch)
def shuffled_npy_batch_generator(state_dataset, search_probs_dataset, result_dataset, indices, batch_size):
state_batch_shape = (batch_size,) + state_dataset.shape[1:]
game_size = state_batch_shape[-1]
Xbatch = np.zeros(state_batch_shape)
Ybatch = np.zeros((batch_size, game_size * game_size + 1), dtype=np.float32)
Zbatch = np.zeros(batch_size)
batch_idx = 0
while True:
for data_idx in indices:
state = np.array([plane for plane in state_dataset[data_idx]])
search_probs = search_probs_dataset[data_idx]
result = result_dataset[data_idx]
Xbatch[batch_idx] = state
Ybatch[batch_idx] = search_probs
Zbatch[batch_idx] = result
batch_idx += 1
if batch_idx == batch_size:
batch_idx = 0
yield (Xbatch, Ybatch, Zbatch)
|
|
"""Turns a MusicXML file into a pandas DataFrame."""
import io
from itertools import combinations
from fractions import Fraction
import music21
from music21.interval import Interval
from music21.pitch import Pitch
from music21.chord import Chord
from music21.note import Rest
import numpy as np
import pandas as pd
from .cache import m21Interval
from .common import FIXEDOFFSET, FLOATSCALE
from .texturizers import (
applyTextureTemplate,
available_durations,
available_number_of_notes,
)
S_COLUMNS = [
"s_offset",
"s_duration",
"s_measure",
"s_notes",
"s_intervals",
"s_isOnset",
]
S_LISTTYPE_COLUMNS = [
"s_notes",
"s_intervals",
"s_isOnset",
]
def _m21Parse(f, fmt=None):
return music21.converter.parse(f, format=fmt)
def _measureNumberShift(m21Score):
firstMeasure = m21Score.parts[0].measure(0) or m21Score.parts[0].measure(1)
isAnacrusis = True if firstMeasure.paddingLeft > 0.0 else False
if isAnacrusis and firstMeasure.number == 1:
measureNumberShift = -1
else:
measureNumberShift = 0
return measureNumberShift
def _lastOffset(m21Score):
lastMeasure = m21Score.parts[0].measure(-1)
filledDuration = lastMeasure.duration.quarterLength / float(
lastMeasure.barDurationProportion()
)
lastOffset = lastMeasure.offset + filledDuration
return lastOffset
def _initialDataFrame(s, fmt=None):
"""Parses a score and produces a pandas dataframe.
The features obtained are the note names, their position in the score,
measure number, and their ties (in case something fancy needs to be done,
with the tie information).
"""
dfdict = {col: [] for col in S_COLUMNS}
measureNumberShift = _measureNumberShift(s)
for c in s.chordify().flat.notesAndRests:
dfdict["s_offset"].append(round(float(c.offset), FLOATSCALE))
dfdict["s_duration"].append(round(float(c.quarterLength), FLOATSCALE))
dfdict["s_measure"].append(c.measureNumber + measureNumberShift)
if isinstance(c, Rest):
# We need dummy entries for rests at the beginning of a measure
dfdict["s_notes"].append(np.nan)
dfdict["s_intervals"].append(np.nan)
dfdict["s_isOnset"].append(np.nan)
continue
dfdict["s_notes"].append([n.pitch.nameWithOctave for n in c])
pitches = [p.nameWithOctave for p in c.pitches]
intervs = [m21Interval(pitches[0], p).simpleName for p in pitches[1:]]
dfdict["s_intervals"].append(intervs)
onsets = [(not n.tie or n.tie.type == "start") for n in c]
dfdict["s_isOnset"].append(onsets)
df = pd.DataFrame(dfdict)
currentLastOffset = float(df.tail(1).s_offset) + float(
df.tail(1).s_duration
)
deltaDuration = _lastOffset(s) - currentLastOffset
df.loc[len(df) - 1, "s_duration"] += deltaDuration
df.set_index("s_offset", inplace=True)
df = df[~df.index.duplicated()]
return df
def _reindexDataFrame(df, fixedOffset=FIXEDOFFSET):
"""Reindexes a dataframe according to a fixed note-value.
It could be said that the DataFrame produced by parseScore
is a "salami-sliced" version of the score. This is intuitive
for humans, but does not really work in machine learning.
What works, is to slice the score in fixed note intervals,
for example, a sixteenth note. This reindex function does
exactly that.
"""
firstRow = df.head(1)
lastRow = df.tail(1)
minOffset = firstRow.index.to_numpy()[0]
maxOffset = (lastRow.index + lastRow.s_duration).to_numpy()[0]
newIndex = np.arange(minOffset, maxOffset, fixedOffset)
# All operations done over the full index, i.e., fixed-timesteps
# plus original onsets. Later, original onsets (e.g., triplets)
# are removed and just the fixed-timesteps are kept
df = df.reindex(index=df.index.union(newIndex))
df.s_notes.fillna(method="ffill", inplace=True)
df.s_notes.fillna(method="bfill", inplace=True)
# the "isOnset" column is hard to generate in fixed-timesteps
# however, it allows us to encode a "hold" symbol if we wanted to
newCol = pd.Series(
[[False] * n for n in df.s_notes.str.len().to_list()], index=df.index
)
df.s_isOnset.fillna(value=newCol, inplace=True)
df.fillna(method="ffill", inplace=True)
df.fillna(method="bfill", inplace=True)
df = df.reindex(index=newIndex)
return df
def _engraveScore(df):
"""Useful for debugging _texturizeAnnotationScore."""
chords = music21.stream.Stream()
for row in df.itertuples():
if row.s_measure == 0:
continue
pitches = row.s_notes
duration = Fraction(row.s_duration).limit_denominator(2048)
chord = Chord(pitches, quarterLength=duration)
chords.append(chord)
return chords
def _texturizeAnnotationScore(df, duration, numberOfNotes):
# Preemptively, remove any notion of held notes in an annotation file
df["s_isOnset"] = df.s_isOnset.apply(lambda l: [True for _ in l])
outputdf = df.copy()
# A copy because we don't want these two temporary columns in the output
df["notesNumber"] = df.s_notes.apply(len)
df["allOnsets"] = df.s_isOnset.apply(all)
# Which block chords can we replace with a more complex texture
replaceable = df[
(df.s_duration == duration)
& (df.notesNumber == numberOfNotes)
& (df.allOnsets)
]
for row in replaceable.itertuples():
offset = row.Index
measure = row.s_measure
notes = row.s_notes
intervals = [
Interval(Pitch(n1), Pitch(n2)).simpleName
for n1, n2 in combinations(notes, 2)
]
texture = applyTextureTemplate(duration, notes, intervals)
textureF = io.StringIO(texture)
texturedf = pd.read_csv(textureF)
texturedf["s_offset"] += offset
texturedf["s_measure"] = measure
for col in S_LISTTYPE_COLUMNS:
texturedf[col] = texturedf[col].apply(eval)
texturedf.set_index("s_offset", inplace=True)
for index, row in texturedf.iterrows():
outputdf.loc[index] = row
outputdf.sort_index(inplace=True)
return outputdf
def parseScore(f, fmt=None, fixedOffset=FIXEDOFFSET, eventBased=False):
# Step 0: Use music21 to parse the score
s = _m21Parse(f, fmt)
# Step 1: Parse and produce a salami-sliced dataset
df = _initialDataFrame(s, fmt)
# Step 2: Turn salami-slice into fixed-duration steps
if not eventBased:
df = _reindexDataFrame(df, fixedOffset=fixedOffset)
return df
def parseAnnotationAsScore(
f, texturize=False, fixedOffset=FIXEDOFFSET, eventBased=False
):
"""Generates a DataFrame from a synthesized RomanText file.
Args:
f (string): The path to the input RomanText file.
texturize (bool, optional): Texturize the synthetic score. Defaults to False.
fixedOffset (float, optional): The sampling rate in quarter notes. Defaults to FIXEDOFFSET.
eventBased (bool, optional): If True, no fixedOffset sampling is done. Defaults to False.
Returns:
DataFrame: The output DataFrame
"""
fmt = "romantext"
if not texturize:
return parseScore(f, fmt=fmt, fixedOffset=fixedOffset)
# Step 0: Use music21 to parse the score
s = _m21Parse(f, fmt=fmt)
# Step 1: Parse and produce a salami-sliced dataset
df = _initialDataFrame(s, fmt=fmt)
# Step 2: Texturize the dataframe
for duration in available_durations:
for numberOfNotes in available_number_of_notes:
df = _texturizeAnnotationScore(df, duration, numberOfNotes)
# Step 3: Turn salami-slice into fixed-duration steps
if not eventBased:
df = _reindexDataFrame(df, fixedOffset=fixedOffset)
return df
|
|
from apps.geocode import geocoder
import folium
import copy
import branca.colormap as cm
import json
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
from datetime import datetime, timedelta
import leafmap.foliumap as leafmap
from vega_datasets import data
import time
from streamlit.scriptrunner.script_runner import RerunException
ROLLING_WINDOW = 7
relevant_locations = [
"US",
"US_AL",
"US_AK",
"US_AS",
"US_AZ",
"US_AR",
"US_CA",
"US_CO",
"US_CT",
"US_DE",
"US_DC",
"US_FL",
"US_GA",
"US_GU",
"US_HI",
"US_ID",
"US_IL",
"US_IN",
"US_IA",
"US_KS",
"US_KY",
"US_LA",
"US_ME",
"US_MD",
"US_MA",
"US_MI",
"US_MN",
"US_MS",
"US_MO",
"US_MT",
"US_NE",
"US_NV",
"US_NH",
"US_NJ",
"US_NM",
"US_NY",
"US_NC",
"US_ND",
"US_MP",
"US_OH",
"US_OK",
"US_OR",
"US_PA",
"US_PR",
"US_RI",
"US_SC",
"US_SD",
"US_TN",
"US_TX",
"US_UT",
"US_VT",
"US_VA",
"US_VI",
"US_WA",
"US_WV",
"US_WI",
"US_WY",
]
LOCATION_ABBREVIATION_MAP = {
"US": "US",
"ALABAMA": "US_AL",
"ALASKA": "US_AK",
"AMERICAN SAMOA": "US_AS",
"ARIZONA": "US_AZ",
"ARKANSAS": "US_AR",
"CALIFORNIA": "US_CA",
"COLORADO": "US_CO",
"CONNECTICUT": "US_CT",
"DELAWARE": "US_DE",
"DISTRICT OF COLUMBIA": "US_DC",
"FLORIDA": "US_FL",
"GEORGIA": "US_GA",
"GUAM": "US_GU",
"HAWAII": "US_HI",
"IDAHO": "US_ID",
"ILLINOIS": "US_IL",
"INDIANA": "US_IN",
"IOWA": "US_IA",
"KANSAS": "US_KS",
"KENTUCKY": "US_KY",
"LOUISIANA": "US_LA",
"MAINE": "US_ME",
"MARYLAND": "US_MD",
"MASSACHUSETTS": "US_MA",
"MICHIGAN": "US_MI",
"MINNESOTA": "US_MN",
"MISSISSIPPI": "US_MS",
"MISSOURI": "US_MO",
"MONTANA": "US_MT",
"NEBRASKA": "US_NE",
"NEVADA": "US_NV",
"NEW HAMPSHIRE": "US_NH",
"NEW JERSEY": "US_NJ",
"NEW MEXICO": "US_NM",
"NEW YORK": "US_NY",
"NORTH CAROLINA": "US_NC",
"NORTH DAKOTA": "US_ND",
"NORTHERN MARIANA IS": "US_MP",
"OHIO": "US_OH",
"OKLAHOMA": "US_OK",
"OREGON": "US_OR",
"PENNSYLVANIA": "US_PA",
"PUERTO RICO": "US_PR",
"RHODE ISLAND": "US_RI",
"SOUTH CAROLINA": "US_SC",
"SOUTH DAKOTA": "US_SD",
"TENNESSEE": "US_TN",
"TEXAS": "US_TX",
"UTAH": "US_UT",
"VERMONT": "US_VT",
"VIRGINIA": "US_VA",
"VIRGIN ISLANDS": "US_VI",
"WASHINGTON": "US_WA",
"WEST VIRGINIA": "US_WV",
"WISCONSIN": "US_WI",
"WYOMING": "US_WY",
}
INDICATORS_MAP = {
"School Closing": "school_closing",
"Workplace Closing": "workplace_closing",
"Cancel Public Events": "cancel_public_events",
"Restrictions On Gatherings": "restrictions_on_gatherings",
"Public Transport Closing": "public_transport_closing",
"Stay At Home Requirements": "stay_at_home_requirements",
"Restrictions on Internal Movement": "restrictions_on_internal_movement",
"Income Support": "income_support",
"Debt Relief": "debt_relief",
"Fiscal Measures": "fiscal_measures",
"Public Information Campaigns": "public_information_campaigns",
"Testing Policy": "testing_policy",
"Contact Tracing": "contact_tracing",
"Emergency Investment In Healthcare": "emergency_investment_in_healthcare",
"Investment In Vaccines": "investment_in_vaccines",
"Facial Coverings": "facial_coverings",
"Vaccination Policy": "vaccination_policy",
"Stringency Index": "stringency_index",
}
@st.cache
def get_geocoder():
return geocoder("data/usa_shape/usa-states-census-2014.shp")
@st.cache
def read_files():
government_response_df = pd.read_csv(
"data/oxford-government-response_filtered.csv"
)
government_response_df["date"] = pd.to_datetime(government_response_df["date"])
cases_df = pd.read_csv("data/epidemiology_filtered.csv")
cases_df["date"] = pd.to_datetime(cases_df["date"])
with open("data/us_states.json", "r") as fp:
polygons = json.load(fp)
return government_response_df, cases_df, polygons
def compute_correlation(
government_response_df, cases_df, shift, location_key, indicators
):
location_cases = cases_df[cases_df["location_key"] == location_key][
["date", "new_confirmed"]
]
cases_values = (
location_cases["new_confirmed"]
.rolling(ROLLING_WINDOW)
.mean()[ROLLING_WINDOW - 1 :]
)
dates = location_cases["date"][ROLLING_WINDOW - 1 :]
location_indicator_values = government_response_df[
government_response_df["location_key"] == location_key
]
if len(indicators):
indicator_values = location_indicator_values[indicators][
ROLLING_WINDOW - 1 :
].sum(axis=1)
else:
indicator_values = location_indicator_values[ROLLING_WINDOW - 1 :].sum(axis=1)
if shift == 0:
shifted_indicator_values = indicator_values
shifted_case_values = cases_values
else:
shifted_indicator_values = indicator_values[:-shift]
shifted_case_values = cases_values[shift:]
dates = dates[shift:]
shifted_indicator_values /= max(shifted_indicator_values)
# shifted_case_values /= max(shifted_case_values)
corref = np.corrcoef(
x=shifted_indicator_values / max(shifted_indicator_values),
y=shifted_case_values / max(shifted_case_values),
)
return round(corref[0][1], 3)
def plot_map(polygons):
usa_map = leafmap.Map(center=[37, -90], zoom=4)
colormap = cm.LinearColormap(
["DarkBlue", "LightBlue", "yellow", "red"],
vmin=-1,
vmax=1,
)
colormap.caption = "Correlation"
def get_colour(prop):
if np.isnan(prop["properties"]["Correlation"]):
return "gray"
else:
return colormap(prop["properties"]["Correlation"])
def get_weight(prop):
if prop["properties"]["State"] == st.session_state["selected_region"]:
return 8
else:
return 1
def get_boundary_colour(prop):
if prop["properties"]["State"] == st.session_state["selected_region"]:
return "white"
else:
return "white"
def get_opacity(prop):
if prop["properties"]["State"] == st.session_state["selected_region"]:
return 1.0
else:
return 0.75
geo_map = folium.features.GeoJson(
polygons,
style_function=lambda x: {
"weight": get_weight(x),
"color": get_boundary_colour(x),
"fillColor": get_colour(x),
"fillOpacity": get_opacity(x),
},
tooltip=folium.features.GeoJsonTooltip(
fields=["State", "Correlation"],
aliases=["State: ", "Correlation: "],
style=(
"background-color: white; color: #333333; font-family: arial; font-size: 12px; padding: 10px;"
),
),
)
colormap.add_to(usa_map)
usa_map.add_child(geo_map)
return usa_map
def plot_government_response_dashboard():
government_response_df, cases_df, polygons = read_files()
polygons = copy.deepcopy(polygons)
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("## Effect of government response on COVID 19 morbidity")
st.markdown("***")
st.markdown(
"""
The Oxford Covid-19 [Government Response Tracker](https://www.bsg.ox.ac.uk/research/research-projects/covid-19-government-response-tracker)
(OxCGRT) collects systematic information on policy measures that governments have taken to tackle COVID-19.
These responses are categorised into different indicators types, which include:
- Containment and closure policies (such as school closures and restrictions in movement)
- Economic policies (such as income support to citizens and debit relief)
- Health system policies (such as facial coverings)
- Vaccine policies
"""
)
st.markdown(
"""
With this dashboard, we try to gauge which government policies have been most effective in reducing the rise in COVID 19
cases across different states. **Intuitively, we expect a negative correlation between these two, since a stronger government response should entail lower morbidity.**
"""
)
st.markdown(
"""
Each indictor is assigned an integer value, reflecting the extent of government action.
A higher value indicates a more extensive policy (such as more stringent health system policies,
or higher economic relief to citizens).
More information on how these values
are calculated can be found on the [OxCGRT page](https://www.bsg.ox.ac.uk/research/research-projects/covid-19-government-response-tracker).
"""
)
st.markdown(
"""
We normalize these indicators to a scale of 0 to 1 for ease of visualisations.
"""
)
st.markdown("***")
col1, col2 = st.columns([2, 1])
with col2:
st.markdown(
"""
When multiple indicators are selected, we aggregate the value by summing up each indicator's value.
Stringency Index is calculated by aggregating all the individual indicators.
"""
)
indicator_strings = st.multiselect(
# "",
"Government Response Indicators:",
INDICATORS_MAP.keys(),
default=["Stringency Index"],
)
with col2:
st.markdown("***")
st.markdown(
"Government policy decisions, even the most extensive ones, often take time in achieving their desired results. We introduce a variable `lag`, which enables a comparision between policy indicators and number of cases `lag` days in the future."
)
shift = st.slider("Number of days in the future (Lag)", 0, 90)
indicators = list(
map(lambda indicator: INDICATORS_MAP[indicator], indicator_strings)
)
for ind, ele in enumerate(polygons["features"]):
location_key = "US_" + ele["id"]
ele["properties"]["State"] = ele["properties"]["name"]
del ele["properties"]["name"]
correlation = compute_correlation(
government_response_df, cases_df, shift, location_key, indicators
)
ele["properties"]["Correlation"] = correlation
location = LOCATION_ABBREVIATION_MAP[st.session_state["selected_region"].upper()]
location_cases = cases_df[cases_df["location_key"] == location][
["date", "new_confirmed"]
]
cases_values = (
location_cases["new_confirmed"]
.rolling(ROLLING_WINDOW)
.mean()[ROLLING_WINDOW - 1 :]
)
dates = location_cases["date"][ROLLING_WINDOW - 1 :]
location_indicator_values = government_response_df[
government_response_df["location_key"] == location
]
if len(indicators):
indicator_values = location_indicator_values[indicators][
ROLLING_WINDOW - 1 :
].sum(axis=1)
else:
indicator_values = location_indicator_values[ROLLING_WINDOW - 1 :].sum(axis=1)
if shift == 0:
shifted_indicator_values = indicator_values
shifted_case_values = cases_values
else:
shifted_indicator_values = indicator_values[:-shift]
shifted_case_values = cases_values[shift:]
dates = dates[shift:]
shifted_indicator_values /= max(shifted_indicator_values)
corref = round(
np.corrcoef(
x=shifted_indicator_values / max(shifted_indicator_values),
y=shifted_case_values / max(shifted_case_values),
)[0][1],
3,
)
with col2:
st.markdown("***")
st.markdown(
"""
<div style="width: 100%; text-align: center;">
<div style="width: 50%; float: left; ">
Selected Region
<br />
<span style = 'font-size: 40px;'>"""
+ st.session_state["selected_region"]
+ """</span>
</div>
<div style="margin-left: 50%;">
Correlation
<br />
<span style = 'font-size: 40px;'>"""
+ str(corref)
+ """</span>
</div>
</div>
""",
unsafe_allow_html=True,
)
usa_map = plot_map(polygons)
with col1:
st.markdown(
"""
This map shows the correlation between COVID 19 cases and governenment response indicators for the selected lag across different states.
<br />
""",
unsafe_allow_html=True,
)
usa_map_folium = usa_map.to_streamlit(
height=500, width=900, add_layer_control=True, bidirectional=True
)
st.markdown(
"""
<div style="width: 100%; text-align: left;">
Click on a state to examine its data.
Click outside the United States boundary to examine aggergated data across all states.
""",
unsafe_allow_html=True,
)
try:
click = usa_map.st_last_click(usa_map_folium)
except:
click = None
if click is not None:
region = get_geocoder().geocode(click[0], click[1])
if region is None:
region = "US"
else:
region = region["NAME"]
if region != st.session_state["selected_region"]:
st.session_state["selected_region"] = region
st.experimental_rerun()
col1, col2 = st.columns([2, 1])
data = pd.DataFrame()
data["shifted_case_values"] = shifted_case_values.tolist()
data["shifted_indicator_values"] = shifted_indicator_values.tolist()
data["x"] = dates.values
cases_chart = (
alt.Chart(data)
.mark_line(opacity=1, color="#57A44C")
.encode(
alt.X(
"x",
title="Date (2021)",
),
alt.Y(
"shifted_case_values",
title="Number of new cases",
axis=alt.Axis(title="Number of new cases", titleColor="#57A44C"),
),
)
.interactive()
)
trends_chart = (
alt.Chart(data)
.mark_line(opacity=1, color="#5276A7")
.encode(
alt.X(
"x",
title="Date (2021)",
# scale=alt.Scale(domain=['2021-01-01','2021-12-31'])
),
alt.Y(
"shifted_indicator_values",
title="trend",
axis=alt.Axis(
title="Government Response Indicator (normalised)", titleColor="#5276A7"
),
),
)
.interactive()
)
chart = alt.layer(cases_chart, trends_chart).resolve_scale(y="independent")
with col2:
st.markdown(
"***",
)
text = (
"This graph compares the number daily of COVID 19 cases and government response indiactors (normalised to a scale of 0 to 1) with the selected lag (%d days in the future) "
% (shift)
)
st.markdown("<p align=center> %s </p>" % (text), unsafe_allow_html=True)
st.altair_chart(chart, use_container_width=True)
############### HISTORICAL CHART ###############
if (
not len(st.session_state["indicator_history"]["region"])
or st.session_state["indicator_history"]["region"][-1]
!= st.session_state["selected_region"]
or st.session_state["indicator_history"]["indicators"][-1] != set(indicator_strings)
or st.session_state["indicator_history"]["shift"][-1] != shift
or st.session_state["indicator_history"]["correlation"][-1] != corref
):
st.session_state["indicator_history"]["region"].append(
st.session_state["selected_region"]
)
st.session_state["indicator_history"]["indicators"].append(set(indicator_strings))
st.session_state["indicator_history"]["shift"].append(shift)
st.session_state["indicator_history"]["correlation"].append(corref)
historical_data = pd.DataFrame(st.session_state["indicator_history"])
historical_chart = (
alt.Chart(historical_data.reset_index(), title="Search history")
.mark_line(point=True, color="#000000")
.encode(
alt.Y(
"correlation",
title="Correlation",
scale=alt.Scale(domain=[-1, 1]),
# axis=alt.Axis(tickSize=0),
),
alt.X(
"index",
title="History",
scale=alt.Scale(
domain=[-1, len(st.session_state["indicator_history"]["correlation"])],
type="point",
),
axis=alt.Axis(tickMinStep=1),
),
tooltip=[
alt.Tooltip("region", title="Region"),
alt.Tooltip("shift", title="Shift"),
alt.Tooltip("indicators", title="Response Indicators"),
alt.Tooltip("correlation", title="Correlation"),
],
)
.interactive()
)
with col1:
st.markdown("***")
st.markdown("")
st.markdown(
"""
<div style="width: 100%; text-align: left;">
This graph shows the correlation between COVID 19 cases and governenment response indicators for previously explored parameters in this dashboard.
<b>This can be used to track how changing certain parameters (for instance, lag) affects the correlation.</b>
<br />
<br />
""",
unsafe_allow_html=True,
)
st.altair_chart(historical_chart, use_container_width=True)
|
|
import os
import re
import cv2
import sys
import glob
import random
import numpy as np
from imgaug import augmenters as iaa
from tensorflow.keras.utils import to_categorical
#augmentations to be performed on the timeseries dataset
# seq_img = iaa.Sequential([
# iaa.Crop(px=(1, 16), keep_size=True),
# iaa.Fliplr(0.5),
# iaa.Affine(rotate=(-20, 20), order=1, mode="edge"),
# #iaa.Affine(scale={"x": (0.75, 1.25), "y": (0.75, 1.25)}, mode="edge")
# ])
seq_img = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
iaa.Crop(percent=(0, 0.1)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(
0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.LinearContrast((0.75, 1.5)),
# Add gaussian noise.
# For 50% of all images, we sample the noise once per pixel.
# For the other 50% of all images, we sample the noise per pixel AND
# channel. This can change the color (not only brightness) of the
# pixels.
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8),
mode="edge"
)
], random_order=True) # apply augmenters in random order
def split_num(s):
return list(filter(None, re.split(r'(\d+)', s)))
def augment_timeseries_imgs(images, seq):
seq_img_i = seq.to_deterministic()
aug_time_series = [seq_img_i.augment_image(frame.astype(np.float32)) for frame in images]
return aug_time_series
def time_series_generator(img_dir, config, aug = True):
#A generator that returns batch of array of images plus a label
dirs = glob.glob(img_dir + "/*")
dirs2 = []
for dir in dirs:
dirs2.append(dir.replace('\\', '/'))
random.shuffle(dirs2)
counter = 0
while True:
input_images = np.zeros((config.batch_size, config.seq_len, config.width, config.height, 3))
labels = np.zeros((config.batch_size, 3))
if (counter + config.batch_size >= len(dirs2)):
counter = 0
for i in range(config.batch_size):
input_imgs = glob.glob(dirs2[counter + i] + "/img_*.jpg")
imgs = []
input_imgs.sort(key=lambda f: int(re.sub('\D', '', f)))
for img in input_imgs:
temp = cv2.imread(img)
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
temp = cv2.resize(temp, (config.height, config.width))
imgs.append(np.asarray(temp))
input_images[i] = imgs
label = split_num(dirs2[counter + i].split('/')[-1])[0]
labels[i] = to_categorical(config.categories.index(label), len(config.categories))
#input_images[i] /= 255.
if aug:
input_images[i] = augment_timeseries_imgs(input_images[i], seq_img)
#input_images[i] *= 255.
yield (input_images, labels)
counter += config.batch_size
|
|
#Author: Michail Mamalakis
#Version: 0.1
#Licence:
#email:mmamalakis1@sheffield.ac.uk
from __future__ import division, print_function
import glob
import matplotlib.patches as patches
import json
import numpy as np
from matplotlib.path import Path
import pydicom
import pydicom.uid
import dicom
import cv2
import matplotlib.pyplot as plt
import os
import scipy.misc
from PIL import Image
from PIL.Image import fromarray
import tensorflow as tf
import vtk
from vtk.util import numpy_support
from vtk.util.numpy_support import vtk_to_numpy
from covid_pipeline import config
import argparse
from keras import utils
from pydicom.uid import ExplicitVRLittleEndian
from pydicom.uid import generate_uid
from PIL import Image, ImageDraw
#from med2image import med2image
class datasetnet:
#####################################
##### INITIALIZATION ##############
####################
def __init__ (self,analysis,path_case='main') :
"""
Initializare of the config file
"""
args = config.parse_arguments()
self.path_case=path_case
self.rotated="False"
self.channels=args.channels
self.channels2=args.classes
if self.path_case=='main':
self.image_shape=args.image_shape
self.original_image_shape=args.original_image_shape
self.roi_shape=args.roi_shape
self.data_path=args.store_data_test
self.data_path2=args.datapath
self.STORE_TXT=args.store_txt
self.counter_path='/contour/'
self.data_extention = args.data_extention
self.counter_extention = args.counter_extention
self.PATH_IMAGES=''
self.PATH_IMAGES2=''
# seperate the train of ROI with the train set for the u_net. Thus take the epi and endo seperate in u_net and train the ROI detection in both
if (analysis=='train' or analysis=='test') :
self.n_set_pre=analysis
self.n_set=analysis
else:
self.n_set_pre='train_prediction'
self.n_set='train'
self.patient_list=args.patient_list
self.store_contour=args.store_data_test
self.image_part = np.zeros([self.original_image_shape,self.original_image_shape,self.channels])
self.shuffle=args.shuffle
self.num_preprocess_threads=args.num_cores
self.batch_size=args.batch_size
self.STORE_PATH=args.store_data_test
self.type_analysis=args.type_analysis
#####################################
##### BASE FUNCTION OF CLASS############
####################
def create_dataset(self):
"""
Creating the dataset from the images and the contour for the CNN-unet-algorithm.
mask: the matrix of labels in txt no visible (exception in main is the same with cmask)
cmask: the matrix of labels in image version visible (polygon version)
"""
# Create dataset from determine json file
series = json.load(open(self.STORE_TXT+'/'+self.patient_list))[self.n_set]
if series=={}:
print('The %s is empty...' %self.n_set)
X, Y, cmask_total=[],[],[]
else:
self.count1=0
self.count3=0
# call the way that the data are stored
images_total,images_total_full,mask_total,cmask_total=self.quick_use_data(series)
# reshape the outpouts
X = np.reshape(np.array(images_total), [len(images_total), self.image_shape, self.image_shape, self.channels])
# store the masks
Y=np.array(mask_total)
Xtotal=np.array(images_total_full)
if self.type_analysis=='SE':
Y = np.reshape(np.array(mask_total), [len(mask_total), mask_total.shape[3], self.roi_shape, self.roi_shape])
Xtotal = np.reshape(np.array(images_total_full), [len(images_total_full), self.original_image_shape, self.original_image_shape, self.channels])
print('Dataset shape :', X.shape, Y.shape)
return X,Xtotal, Y, cmask_total, self.STORE_TXT
def load_images(self,contours_list_series,case=None,serie=None):
"""
load images base of the path dcm or nii, png, jpg
"""
images=[]
images_fullsize=[]
#print(len(contours_list_series))
for o in range(0,len(contours_list_series)):
c=contours_list_series[o]
# Get images path base the store style for ROI
image_path=c
# open image as numpy array
if self.data_extention=='jpeg' or self.data_extention=='png' or self.data_extention=='tif' or self.data_extention=='jpg':
#print(image_path)
if self.channels==1:
image_part_store = Image.open(image_path).convert('L')
else:
image_part_store = Image.open(image_path).convert('RGB')
#print(image_part_store.bits)
self.image_part= np.array(image_part_store)
#print(self.image_part.shape)
elif self.data_extention=='dcm':
# dicom and pydicom call
try:
if self.channels==1:
self.image_part = dicom.read_file(image_path).pixel_array
else:
self.image_part = dicom.read_file(image_path).pixel_array
#print("normal dicom")
except dicom.errors.InvalidDicomError as exc:
try:
dcm_file = pydicom.dcmread(image_path)
self.image_part =dcm_file.pixel_array.astype(float)
#print("normal pydicom")
except pydicom.errors.InvalidDicomError as exc:
print("general exception dicom")
self._dcm = dicom.read_file(image_path,force=True)
image = self._dcm.pixel_array
self.image_part=imageprint(self.image_part.shape)
except:
pass
finally:
pass
finally:
pass
#self.image_part= ArrayDicom
else:
print("Please define the extension of image data 'dcm' 'jpeg' ")
#check for rotate image
height, width = np.array(self.image_part).shape[0],np.array(self.image_part).shape[1]
if width < height:
np.rot90(np.array(self.image_part))
self.rotated="True"
else:
self.image_part=self.image_part
#resize
if (self.type_analysis=='SE'):
image_p=cv2.resize((self.image_part), (self.image_shape, self.image_shape),interpolation=cv2.INTER_NEAREST)
full_image=cv2.resize((self.image_part), (self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST)
image_p=np.reshape(np.array(image_p),[ 1, self.channels, np.array(image_p).shape[0], np.array(image_p).shape[1]])
full_image=np.reshape(np.array(full_image),[1, self.channels, np.array(full_image).shape[0], np.array(full_image).shape[1]])
if (self.type_analysis=='CL'):
image_p=np.reshape(np.array(self.image_part),[ self.channels*np.array(self.image_part).shape[0], np.array(self.image_part).shape[1]])
full_image=np.reshape(np.array(self.image_part),[ self.channels*np.array(self.image_part).shape[0], np.array(self.image_part).shape[1]])
image_p=cv2.resize(image_p, (self.channels*self.image_shape, self.image_shape),interpolation=cv2.INTER_NEAREST)
full_image=cv2.resize(full_image, (self.channels*self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST)
image_p=np.reshape(np.array(image_p),[ 1, self.channels, self.image_shape, self.image_shape])
full_image=np.reshape(np.array(full_image),[1, self.channels, self.original_image_shape, self.original_image_shape])
if o==0:
images=image_p
images_fullsize=full_image
#print(images.shape)
if o!=0:
images=np.append(image_p, images, axis=0)
images_fullsize=np.append(full_image, images_fullsize, axis=0)
#print(len(np.array(images).shape))
if (len(np.array(images).shape)==4 and self.type_analysis=='SE'):
images=np.reshape(np.array(images),[len(np.array(images))*np.array(images).shape[3], np.array(images).shape[1], np.array(images).shape[2]])
images_fullsize=np.reshape(np.array(images_fullsize),[len(np.array(images_fullsize))*np.array(images_fullsize).shape[3], np.array(images_fullsize).shape[1], np.array(images_fullsize).shape[2]])
#print(np.array(images).shape) ecoAng$osasasa
self.count1=self.count1+1
#case without dependence of mask with image in data slices etc)
return images,images_fullsize
def load_masks(self,contours_list_series,contour_path_base=None,label=None,folder=0):
"""
load masks base of the path txt, vtk, jpeg, png
"""
Ymask=np.zeros((1,2))
contours_jpeg, contours,contours_mask = [],[],[]
#print(contours_list)
for o in range(0,len(contours_list_series)):
c=contours_list_series[o]
#print('counder list :', c)
# Get contours and images path
#idx_contour = contours_list_series.index(c)
#print(idx_contour)
each_contour_path=c
if (self.type_analysis=='SE'):
if self.counter_extention=='txt':
contour,count_mask=self.txt_converter(each_contour_path)
contours.append(contour)
contours_mask.append(count_mask)
elif self.counter_extention=='vtk':
contour,count_mask=self.vtk_converter(each_contour_path)
contours.append(contour)
contours_mask.append(count_mask)
elif self.counter_extention=='jpeg' or self.counter_extention=='png'or self.data_extention=='tif' or self.data_extention=='jpg':
contour_mask_store = Image.open(each_contour_path).convert('L')
contours_mask.append(cv2.resize(np.array(contour_mask_store), (self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST) )
contours_jpeg.append(cv2.resize(np.array(contour_mask_store), (self.roi_shape, self.roi_shape),interpolation=cv2.INTER_AREA) )
# dicom and pydicom call
elif self.counter_extention=='dcm':
try:
contour_mask_store= dicom.read_file(each_contour_path).pixel_array
contours_mask.append(cv2.resize(np.array(contour_mask_store), (self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST) )
contours_jpeg.append(cv2.resize(np.array(contour_mask_store), (self.roi_shape, self.roi_shape),interpolation=cv2.INTER_AREA) )
except dicom.errors.InvalidDicomError as exc:
try:
dcm_file = pydicom.dcmread(each_contour_path)
contour_mask_store =dcm_file.pixel_array.astype(float)
contours_mask.append(cv2.resize(np.array(contour_mask_store), (self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST) )
contours_jpeg.append(cv2.resize(np.array(contour_mask_store), (self.roi_shape, self.roi_shape),interpolation=cv2.INTER_AREA) )
except pydicom.errors.InvalidDicomError as exc:
print("general exception dicom")
in_dcm = dicom.read_file(each_contour_path,force=True)
contour_mask_store = in_dcm.pixel_array
contours_mask.append(cv2.resize(np.array(contour_mask_store), (self.original_image_shape, self.original_image_shape),interpolation=cv2.INTER_NEAREST) )
contours_jpeg.append(cv2.resize(np.array(contour_mask_store), (self.roi_shape, self.roi_shape),interpolation=cv2.INTER_AREA) )
print(self.image_part.shape)
except:
pass
finally:
pass
finally:
pass
else:
print("Please define the extension of mask data 'dcm' 'txt' 'jpeg' 'vtk' ")
if self.counter_extention=='dcm' or self.counter_extention=='jpeg' or self.counter_extention=='png'or self.data_extention=='tif' or self.counter_extention=='jpg':
Ymask = (contours_jpeg)
else:
Ymask = (contours)
Ymask= np.reshape(np.array(Ymask,dtype='uint8'), [len(Ymask), self.roi_shape, self.roi_shape, self.channels2])
contours_mask=np.reshape(np.array(contours_mask,dtype='uint8'), [len(contours_mask), self.original_image_shape, self.original_image_shape, self.channels2])
elif (self.type_analysis=='CL'):
contour_mask_store =np.zeros((self.channels2), dtype=int)
contour_mask_store[folder]=1
contours_mask.append(contour_mask_store)
contours_jpeg.append(contour_mask_store)
if self.counter_extention=='dcm' or self.counter_extention=='jpeg' or self.counter_extention=='png'or self.data_extention=='tif' or self.counter_extention=='jpg':
Ymask = (contours_jpeg)
else:
Ymask = (contours)
#Ymask= convert_to_one_hot(Ymask, self.channels2).T
#contours_mask=convert_to_one_hot(contours_jpeg, self.channels2).T
else:
print("Please define type analysis in config file 'SE' or 'CL' ")
self.count3=self.count3+1
Ymask2=Ymask
return Ymask2,contours_mask
#####################################
#####STORE STYLE##############
####################
def quick_use_data(self,series):
"""
quick use of small random dataset no dependence image with masks according the path.(mask file inside the image file of the patient)
"""
images_total,images_total_full,mask_total,cmask_total=[],[],[],[]
o=0
folder=0
for case, serie in series.items():
contour_path_base = self.data_path + '/%s' % (case)
print(contour_path_base)
contours_list_single=[]
contours_list = sorted(glob.glob(contour_path_base +'/*.%s' %self.data_extention))
#print(contours_list)
if self.type_analysis=='CL':
contours_list2 = sorted(glob.glob(contour_path_base + '/*.%s' %self.data_extention))
else:
contours_list2 = sorted(glob.glob(contour_path_base + self.counter_path+'*.%s' %self.counter_extention))# need modification does not take in series but it takes the data randomly so different from images
#print(contours_list2)
#contours_list_single.append(contours_list[0])
images,images_full= self.load_images(contours_list)
mask,cmask= self.load_masks(list(contours_list2),folder=folder)
if(o==0):
cmask_total=cmask
mask_total=mask
images_total=images
images_total_full=images_full
print(np.array(images).shape)
print(np.array(images_total).shape)
print(np.array(mask).shape)
print(np.array(mask_total).shape)
if(o!=0):
mask_total=np.append(mask,mask_total,axis=0)
cmask_total=np.append(cmask,cmask_total,axis=0)
images_total=np.append(images,images_total,axis=0)
images_total_full=np.append(images_full,images_total_full,axis=0)
folder=folder+1
o=o+1
Y_total=np.array(mask_total)
cY_total=np.array(cmask_total)
X_total=np.array(images_total)
cX_total=np.array(images_total_full)
print(X_total.shape,cX_total.shape)
print(Y_total.shape,cY_total.shape)
if self.type_analysis=='SE':
X_total=np.reshape(X_total, [X_total.shape[0], X_total.shape[1], X_total.shape[2], self.channels])
cX_total=np.reshape(cX_total, [cX_total.shape[0], cX_total.shape[1], cX_total.shape[2], self.channels])
Y_total=np.reshape(Y_total, [Y_total.shape[0], Y_total.shape[1], Y_total.shape[2], self.channels2])
cY_total=np.reshape(cY_total, [cY_total.shape[0], cY_total.shape[1], cY_total.shape[2], self.channels2])
if(len(X_total.shape)==5):
X_total=np.reshape(X_total, [X_total.shape[0]*X_total.shape[1], X_total.shape[2], X_total.shape[3], self.channels])
cX_total=np.reshape(cX_total, [cX_total.shape[0]*cX_total.shape[1], cX_total.shape[2], cX_total.shape[3], self.channels])
Y_total=np.reshape(Y_total, [Y_total.shape[0]*Y_total.shape[1], Y_total.shape[2], Y_total.shape[3], self.channels2])
cY_total=np.reshape(cY_total, [cY_total.shape[0]*cY_total.shape[1], cY_total.shape[2], cY_total.shape[3], self.channels2])
print(X_total.shape,cX_total.shape)
print(Y_total.shape,cY_total.shape)
return X_total,cX_total,Y_total,cY_total
#####################################
##### Extention OF the Mask DATA##############
####################
def txt_converter(self,each_contour_path):
"""
Convert the contour from txt extention to jpg
"""
contours,contour_mask=[],[]
if self.path_case=='main':
x, y = np.loadtxt(each_contour_path).T
if self.rotated=="True":
x, y = y, self.image_height - x
BW_8BIT = 'L'
polygon = list(zip(x, y))
image_dims = (self.original_image_shape, self.original_image_shape)
img = Image.new(BW_8BIT, image_dims, color=0)
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
norm=1
contour_mask.append(norm * np.array(img, dtype='uint8'))
contours.append(norm * np.array(img, dtype='uint8'))
return contours ,contour_mask
def vtk_converter(self,each_contour_path):
"""
Convert the contour from vtk extention to jpg
"""
contours,contour_mask=[],[]
vtkarray = self.GetPointData().GetArray(each_contour_path)
if vtkarray:
array = vtk_to_numpy(vtkarray)
if array.dtype == np.int8:
array = array.astype(np.bool)
if self.path_case=='main':
x, y = np.loadtxt(each_contour_path).T
if self.rotated=="True":
x, y = y, self.image_height - x
BW_8BIT = 'L'
polygon = array
image_dims = (self.original_image_shape, self.original_image_shape)
img = Image.new(BW_8BIT, image_dims, color=0)
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
norm=1
contour_mask.append(norm * np.array(img, dtype='uint8'))
contours.append(norm * np.array(img, dtype='uint8'))
return contours,contour_mask
|
|
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.io as scio
def vis_gt(im, bboxes, plt_name='output', ext='.png', visualization_folder=None):
"""
A function to visualize the detections
:param im: The image
:param bboxes: ground truth
:param plt_name: The name of the plot
:param ext: The save extension (if visualization_folder is not None)
:param visualization_folder: The folder to save the results
:param thresh: The detections with a score less than thresh are not visualized
"""
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
if bboxes.shape[0] != 0:
for i in range(bboxes.shape[0]):
bbox = bboxes[i, :]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=(0, bbox[4], 0), linewidth=3)
)
plt.axis('off')
plt.tight_layout()
plt.draw()
if visualization_folder is not None:
if not os.path.exists(visualization_folder):
os.makedirs(visualization_folder)
plt_name += ext
plt.savefig(os.path.join(visualization_folder, plt_name), bbox_inches='tight')
print('Saved {}'.format(os.path.join(visualization_folder, plt_name)))
else:
print('Visualizing {}!'.format(plt_name))
plt.show()
plt.clf()
plt.cla()
if __name__ == '__main__':
imdb = scio.loadmat("../data/datasets/wider/wider_face_split/wider_face_val.mat")
print(imdb.items)
print("well done!")
|
|
import matplotlib.pyplot as plt
import numpy as np
fig, ax = plt.subplots()
rect = plt.Rectangle((np.pi, -0.5), 1, 1, fc=np.random.random(3), picker=True)
ax.add_patch(rect)
x = np.linspace(0, np.pi*2, 100)
y = np.sin(x)
line, = plt.plot(x, y, picker=8.0)
def on_pick(event):
artist = event.artist
if isinstance(artist, plt.Line2D):
lw = artist.get_linewidth()
artist.set_linewidth(lw % 5 + 1)
else:
artist.set_fc(np.random.random(3))
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', on_pick);
plt.show()
|
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
# Adapted for numpy/ma/cdms2 by convertcdms.py
import numpy
import genutil
import cdms2
import numpy.ma
import os
import sys
import unittest
import cdat_info
class GENUTIL(unittest.TestCase):
### EXTRACT TESTS
def assertArraysEqual(self,A,B):
self.assertTrue(numpy.all(numpy.equal(A,B)))
def test_arrayindexing_1D(self):
### 1D
A=numpy.array([6,7,8,9,2],'f')
B=numpy.argsort(A).astype('i')
C=genutil.arrayindexing.get(A,B)
self.assertArraysEqual(C,numpy.array([ 2., 6., 7., 8., 9.]))
D=numpy.array([6.5,7.5,8.5,9.5,2.5],'f')
# Sets values of D into A at indices defined in B
E=genutil.arrayindexing.set(A,B,D)
self.assertArraysEqual(E,numpy.array([ 7.5, 8.5, 9.5, 2.5, 6.5]))
def test_arrayindexing_2D(self):
## 2D
A=numpy.array([[1,2],[3,4,],[5,6],[7,8]],numpy.float)
B=numpy.array([3,2],numpy.int32)
# Extract from A at indices specified by B
# From C directly
C=genutil.array_indexing.extract(A,B)
self.assertEqual(C.dtype.char,'d')
self.assertArraysEqual(C,[7.,6.])
# From python interface
C=genutil.arrayindexing.get(A,B)
self.assertEqual(C.dtype.char,'d')
self.assertArraysEqual(C,[7.,6.])
#### Set tests
V=numpy.array([1345,34],A.dtype.char)
B=numpy.array([-3,2],numpy.int)
# Checks setting negative indices
A=genutil.arrayindexing.set(A,B,V)
self.assertArraysEqual(A,[[ 1., 2.],
[ 1345, 4.],
[ 5., 34],
[ 7., 8.]])
A=numpy.array([[1,2],[3,4,],[5,6],[7,8]],numpy.float)
B=numpy.array([[1,2],[3,0,],[1,2],[0,3]],numpy.int)
V=numpy.array([[10.,21.],[13,.4,],[1.5,6.4],[77.7,9.8]],numpy.float)
C=genutil.arrayindexing.set(A,B,V)
self.assertArraysEqual(C,[[ 77.7, 0.4],
[ 1.5, 4. ],
[ 5. , 6.4],
[ 13. , 9.8]])
f=cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))
clt=f('clt')
## clt=cdms2.MV2.average(clt,2)
M=numpy.ma.maximum.reduce(clt,axis=0)
marg=numpy.ma.argmax(clt,axis=0)
M2=genutil.arrayindexing.get(clt,marg)
self.assertArraysEqual(M2,M)
M=numpy.ma.maximum.reduce(clt,axis=1)
marg=numpy.ma.argmax(clt,axis=1)
marg=cdms2.MV2.array(marg)
marg.setAxis(0,clt.getAxis(0))
marg.setAxis(1,clt.getAxis(2))
M2=genutil.arrayindexing.get(clt,marg,axis=1)
self.assertArraysEqual(M2,M)
clt=cdms2.MV2.masked_greater(clt,80)
M=numpy.ma.maximum.reduce(clt,axis=1)
marg=numpy.ma.argmax(clt,axis=1)
marg=cdms2.MV2.array(marg)
marg.setAxis(0,clt.getAxis(0))
marg.setAxis(1,clt.getAxis(2))
M2=genutil.arrayindexing.get(clt,marg,axis=1)
self.assertArraysEqual(M2,M)
def test_arrayindexing_3D(self):
## 3D
f=cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))
clt=f('clt')
# Checks we can extract, not happy with random though...
I=numpy.random.random(clt.shape)*clt.shape[0]
I=I.astype('i') # integers required
M2=genutil.arrayindexing.get(clt,I)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 18:00:36 2021
@author: ml
"""
from code.feature_extraction.feature_extractor import FeatureExtractor
from code.util import COLUMN_WORD_COUNT, string_to_words_list
import numpy as np
#class for extracting the amount of words after general preprocessing
class NumberOfWords(FeatureExtractor):
def __init__(self, input_column: str):
"""
constructor
Parameters
----------
input_column : str
name of the input column
Returns
-------
None.
"""
super().__init__([input_column], COLUMN_WORD_COUNT)
def _get_values(self, inputs: list) -> np.ndarray :
"""
compute the word count based on the inputs
Parameters
----------
inputs : list
list of all tweets
Returns
-------
result : np.ndarray
array with the word-count of the individual tweets
"""
#transform string to list and compute length
list_of_lengths=[]
for tweet in inputs[0]:
#turn str into list of words
list_of_words = string_to_words_list(tweet)
if(list_of_words == ['']):
#if tweet is empty
list_of_lengths.append(0)
else:
list_of_lengths.append(len(list_of_words))
#saving it in an array
result = np.array(list_of_lengths)
#expand dim to (tweets, 1)
result = result.reshape(-1,1)
return result
|
|
#--------------------------------------------------------------------------------------------
# Key routines of this repository, where we implement the Sinkhorn algorithms and MMDs
#--------------------------------------------------------------------------------------------
import numpy as np
import torch
# Reference implementations
from kernel_norm import kernel_divergence
from sinkhorn_balanced import regularized_ot, hausdorff_divergence, sinkhorn_divergence
# Simpler implementation, with less features - read it first
from sinkhorn_balanced_simple import regularized_ot as regularized_ot_simple
from sinkhorn_balanced_simple import hausdorff_divergence as hausdorff_divergence_simple
from sinkhorn_balanced_simple import sinkhorn_divergence as sinkhorn_divergence_simple
# Extended implementation, with extra features which we only use to generate the "Transport Plan" figures
from sinkhorn_balanced_visualization import regularized_ot as regularized_ot_visualization
from sinkhorn_balanced_visualization import hausdorff_divergence as hausdorff_divergence_visualization
from sinkhorn_balanced_visualization import sinkhorn_divergence as sinkhorn_divergence_visualization
|
|
from controller import Robot
import PuPy
import HDPy
import numpy as np
import h5py
# Initialize a policy
bound_gait = {
'amplitude' : ( 0.8, 1.0, 0.8, 1.0),
'frequency' : (1.0, 1.0, 1.0, 1.0),
'offset' : ( -0.23, -0.23, -0.37, -0.37),
'phase' : (0.0, 0.0, 0.5, 0.5)
}
policy = HDPy.puppy.policy.LRA(PuPy.Gait(bound_gait, 'bounding'))
# OfflineCollector which follows a predefined sequence of actions
# after the initial behaviour (policy with default params for 25 steps).
class TrajectoryFollower(HDPy.puppy.OfflineCollector):
def __init__(self, trajectory, *args, **kwargs):
super(TrajectoryFollower, self).__init__(*args, **kwargs)
self.trajectory = trajectory
self._traj_idx = 0
def _next_action_hook(self, a_next):
if self._traj_idx >= self.trajectory.shape[0]:
# If all actions have been executed, signal the supervisor
# to revert the simulation
self.robot.send_msg('revert_on_demand')
return self.a_curr
# If there's a next action, execute it
a_next = np.atleast_2d(self.trajectory[self._traj_idx]).T
self._traj_idx += 1
return a_next
# Load the sequence file
f = h5py.File('/tmp/example_sequence.hdf5','a')
# Get the index of the trajectory to be executed
idx = f['idx'][()]
grp_name = 'traj_%03i' % idx
if grp_name in f:
# Not yet finished, increment the index such that the next
# trajectory is executed in the next experiment.
trajectory = f[grp_name][:]
do_quit = False
f['idx'][()] += 1
else:
# Simulation is finished, execute any trajectory and prepare for
# termination
while grp_name not in f and idx >= 0:
idx -= 1
grp_name = 'traj_%03i' % (idx)
if idx < 0:
raise Exception('Could not find last trajectory')
trajectory = f[grp_name][:]
do_quit = True
f.close()
# Initialize the collector
collector = PuPy.RobotCollector(
child = policy,
expfile = '/tmp/example_data.hdf5'
)
# Initialize the actor
actor = TrajectoryFollower(
trajectory = trajectory,
policy = collector,
init_steps = 10,
)
# Initialize the robot, bind it to webots
r = PuPy.robotBuilder(
Robot,
actor,
sampling_period_ms = 20,
ctrl_period_ms = 3000,
)
# Register robot in actor for signalling
actor.robot = r
if do_quit:
# Quit the simulation when all trajectories are handled
r.send_msg('quit_on_demand')
# Run the simulation
r.run()
|
|
""" This module provides metrics and related functions """
# Standard library imports
# Third party imports
import numpy as np
import pandas as pd
# Local imports
def avg(data):
return data["h"] / data["ab"]
def obp(data):
numerator = data["h"] + data["bb"] + data["hbp"]
denominator = data["ab"] + data["bb"] + data["hbp"] + data["sf"]
return numerator / denominator
def slg(data):
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
return (x1b + 2 * data["x2b"] + 3 * data["x3b"] + 4 * data["hr"]) / data["ab"]
def ops(data):
return obp(data) + slg(data)
def iso(data):
return (data["x2b"] + 2 * data["x3b"] + 3 * data["hr"]) / data["ab"]
def babip(data):
return (data["h"] - data["hr"]) / (
data["ab"] + data["sf"] - data["so"] - data["hr"]
)
def sar(data):
"""Steal Attempt Rate
:param data: A DataFrame
:returns: A Series
"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
return (data["sb"] + data["cs"]) / (data["hbp"] + data["bb"] + x1b)
def go_fo(data):
return data["go"] / data["fo"]
def hbp_p(data):
return data["hbp"] / data["pa"] * 100
def bb_p(data):
return data["bb"] / data["pa"] * 100
def so_p(data):
return data["so"] / data["pa"] * 100
def pa(data):
return data["ab"] + data["bb"] + data["hbp"] + data["sf"] + data["sh"]
def lob_p(data):
"""LOB% = (H+BB+HBP-R)/(H+BB+HBP-(1.4*HR))
:param
:returns:
"""
num = data["h"] + data["bb"] + data["hbp"] - data["r"]
denom = data["h"] + data["bb"] + data["hbp"] - (1.4 * data["hr"])
return num / denom
def era(data):
return data["er"] / data["ip"] * 9
def ra_9(data):
return data["r"] / data["ip"] * 9
def so_9(data):
return data["so"] / data["ip"] * 9
def bb_9(data):
return data["bb"] / data["ip"] * 9
def hr_9(data):
return data["hr"] / data["ip"] * 9
def whip(data):
return (data["bb"] + data["h"]) / data["ip"]
def basic_offensive_metrics(data, inplace=False):
"""Calculate basic offensive metrics. These metrics do not depend on league
wide metrics.
:param data: A DataFrame
:param inplace: modify the DataFrame inplace?
:returns: A DataFrame
"""
if not inplace:
data = data.copy()
data["avg"] = avg(data)
data["obp"] = obp(data)
data["slg"] = slg(data)
data["ops"] = ops(data)
data["go_fo"] = go_fo(data)
data["hbp_p"] = hbp_p(data)
data["bb_p"] = bb_p(data)
data["so_p"] = so_p(data)
data["iso"] = iso(data)
data["babip"] = babip(data)
data["sar"] = sar(data)
return data
def basic_pitching_metrics(data, conference=False, inplace=False):
"""Calculate basic pitching metrics. These metrics do not depend on league
wide metrics.
:param data: A DataFrame
:param conference: Omit metrics that cannot be calculated based on conference data
:param inplace: modify the DataFrame inplace?
:returns: A DataFrame
"""
if not inplace:
data = data.copy()
if not conference:
data["pa"] = pa(data)
data["avg"] = avg(data)
data["obp"] = obp(data)
data["slg"] = slg(data)
data["ops"] = ops(data)
data["hbp_p"] = hbp_p(data)
data["bb_p"] = bb_p(data)
data["so_p"] = so_p(data)
data["iso"] = iso(data)
data["babip"] = babip(data)
data["lob_p"] = lob_p(data)
data["era"] = era(data)
data["ra_9"] = ra_9(data)
data["so_9"] = so_9(data)
data["bb_9"] = bb_9(data)
data["hr_9"] = hr_9(data)
data["whip"] = whip(data)
return data
# *********************
# * Advanced Metrics **
# *********************
def bsr(data, bmult=1.0):
"""Base Runs
BsR = A(B/(B+C)) + D
requires ab, h, 2b, 3b, hr, bb, hbp, sf, sh, gdp, sb, cs
"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
a = data["h"] + data["bb"] + data["hbp"] - data["hr"] - data["cs"] - data["gdp"]
b = (
0.777 * x1b
+ 2.61 * data["x2b"]
+ 4.29 * data["x3b"]
+ 2.43 * data["hr"]
+ 0.03 * (data["bb"] + data["hbp"])
+ 1.30 * data["sb"]
+ 0.13 * data["cs"]
+ 1.08 * data["sh"]
+ 1.81 * data["sf"]
+ 0.70 * data["gdp"]
- 0.04 * (data["ab"] - data["h"])
)
c = data["ab"] - data["h"] + data["sh"] + data["sf"]
d = data["hr"]
b = b * bmult
return a * (b / (b + c)) + d
def bsr_bmult(data):
"""Base Runs B multiplier"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
a = data["h"] + data["bb"] + data["hbp"] - data["hr"] - data["cs"] - data["gdp"]
a = data["h"] + data["bb"] + data["hbp"] - data["hr"] - data["cs"] - data["gdp"]
b = (
0.777 * x1b
+ 2.61 * data["x2b"]
+ 4.29 * data["x3b"]
+ 2.43 * data["hr"]
+ 0.03 * (data["bb"] + data["hbp"])
+ 1.30 * data["sb"]
+ 0.13 * data["cs"]
+ 1.08 * data["sh"]
+ 1.81 * data["sf"]
+ 0.70 * data["gdp"]
- 0.04 * (data["ab"] - data["h"])
)
c = data["ab"] - data["h"] + data["sh"] + data["sf"]
d = data["hr"]
b_act = c * (d - data["r"]) / (data["r"] - d - a)
b_est = b
return b_act / b_est
def linear_weights_incr(data, incr=0.00000001):
"""Calculate Linear Weights using the increment method (plus one method)
:param data: A Series with league totals
:param incr: Increment value
:returns: A Series with linear weights
"""
cols = ["bb", "hbp", "ab", "h", "x2b", "x3b", "hr", "sb", "cs", "sf", "sh", "gdp"]
v_input = data[cols]
M_incr = pd.DataFrame(
[
[0, incr, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # hbp
[incr, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # bb
[0, 0, incr, incr, 0, 0, 0, 0, 0, 0, 0, 0], # 1b
[0, 0, incr, incr, incr, 0, 0, 0, 0, 0, 0, 0], # 2b
[0, 0, incr, incr, 0, incr, 0, 0, 0, 0, 0, 0], # 3b
[0, 0, incr, incr, 0, 0, incr, 0, 0, 0, 0, 0], # hr
[0, 0, 0, 0, 0, 0, 0, incr, 0, 0, 0, 0], # sb
[0, 0, 0, 0, 0, 0, 0, 0, incr, 0, 0, 0], # cs
[0, 0, incr, 0, 0, 0, 0, 0, 0, 0, 0, 0], # out
],
index=[
"lw_hbp",
"lw_bb",
"lw_x1b",
"lw_x2b",
"lw_x3b",
"lw_hr",
"lw_sb",
"lw_cs",
"lw_out",
],
columns=cols,
)
M_input = M_incr + v_input
bmult = bsr_bmult(data)
baseruns = bsr(data, bmult=bmult)
# NOTE: The following are equivalent:
# [(bsr(row, bmult) - baseruns) * (1 / incr) for _, row in M_input.iterrows()])
return M_input.apply(lambda row: (bsr(row, bmult) - baseruns) * (1 / incr), axis=1)
def woba_weights(data, target):
"""Calculate the woba weights for hbp, bb, 1b, 2b, 3b, hr
:param
:returns:
"""
data = data.copy()
data["x1b"] = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
lw = data.loc[:, "lw_hbp":"lw_out"]
# subtract the value of the out
lw = (lw.T - lw["lw_out"]).T
lw.drop(columns=["lw_sb", "lw_cs", "lw_out"], inplace=True)
lw.columns = ["hbp", "bb", "x1b", "x2b", "x3b", "hr"]
totals = data[["hbp", "bb", "x1b", "x2b", "x3b", "hr"]]
# calculate the dot product
acc = totals.mul(lw).apply(np.sum, axis=1)
raw = acc / data["pa"]
scale = target / raw
ww = (lw.T * scale).T
ww.columns = ["ww_hbp", "ww_bb", "ww_x1b", "ww_x2b", "ww_x3b", "ww_hr"]
ww["woba_scale"] = scale
return ww
def woba(data, weights):
"""Weighted On-Base Average (wOBA)
:param data: DataFrame or Series of player, team, or league totals
:param weights: DataFrame or Series of wOBA weights
:returns: Series of wOBA values
"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
return (
weights["ww_hbp"] * data["hbp"]
+ weights["ww_bb"] * data["bb"]
+ weights["ww_x1b"] * x1b
+ weights["ww_x2b"] * data["x2b"]
+ weights["ww_x3b"] * data["x3b"]
+ weights["ww_hr"] * data["hr"]
) / data["pa"]
def sbr(data, weights):
"""Stolen Base Runs (SBR)
SBR = runSB * SB + runCS * CS
:param data: DataFrame or Series of player, team, or league totals
:param weights: DataFrame or Series of linear weights
:returns: Series of SBR values
"""
return weights["lw_sb"] * data["sb"] + weights["lw_cs"] * data["cs"]
def lg_wsb(data, weights):
"""lgwSB = (SB * runSB + CS * runCS) / (1B + BB + HBP – IBB)
Used in the calculation of wSB.
:param data: DataFrame or Series of league totals
:param weights: DataFrame or Series of linear weights
:returns: Series of lgwSB values
"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
return (weights["lw_sb"] * data["sb"] + weights["lw_cs"] * data["cs"]) / (
x1b + data["bb"] + data["hbp"]
)
def wsb(data, lg_wsb):
"""Weighted Stolen Base Runs (wSB)
wSB = (SB * runSB) + (CS * runCS) – (lgwSB * (1B + BB + HBP – IBB))
OR
wSB = SBR - (lgwSB * (1B + BB + HBP – IBB))
"""
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
return data["sbr"] - lg_wsb * (x1b + data["bb"] + data["hbp"])
def wraa(data, lg_woba, scale):
"""Weighted Runs Above Average (wRAA)
wRAA = ((wOBA - league wOBA) / wOBA scale) * PA
:param
:returns:
"""
return ((data["woba"] - lg_woba) / scale) * data["pa"]
def off(data):
"""Offensive Runs Above Average (OFF)
OFF = wSB + wRAA
:param
:returns:
"""
return data["wsb"] + data["wraa"]
def wrc(data, lg_woba, woba_scale, lg_r_pa):
"""Weighted Runs Created (wRC)
wRC = (((wOBA - league wOBA)/wOBA Scale) + (league R/PA))*PA
:param
:returns:
"""
return (((data["woba"] - lg_woba) / woba_scale) + lg_r_pa) * data["pa"]
def wrc_p(data, lg_r_pa):
"""Weighted Runs Created Plus (wRC+)
Official formula:
wRC+ = (((wRAA/PA + lgR/PA) + (lgR/PA - (park factor*lgR/PA))) / lgwRC/PA excl. pitchers)*100
For our purposes:
wRC+ = (((wRAA/PA) + lgR/PA) / lgR/PA)*100
:param
:returns:
"""
return (((data["wraa"] / data["pa"]) + lg_r_pa) / lg_r_pa) * 100
def off_p(data, lg_r_pa):
"""Offensive Runs Plus (OFF+)
OFF+ = (((OFF/PA) + lgR/PA) / lgR/PA)*100
:param
:returns:
"""
return (((data["off"] / data["pa"]) + lg_r_pa) / lg_r_pa) * 100
def rar(data, replacement_level):
"""Runs Above Replacement (RAR)
RAR = OFF - (repl level * PA)
:param
:returns:
"""
return data["off"] - (replacement_level * data["pa"])
def multi_season(data, totals, func, inplace=False):
"""Calculate metrics for multiple seasons
:param
:returns:
"""
if not inplace:
data = data.copy()
new_totals = totals.copy()
if new_totals.index.name != "season":
new_totals = new_totals.set_index("season")
df = pd.DataFrame()
for name, group in data.groupby("season"):
temp = pd.DataFrame(group)
# totals_season must be a Series
totals_season = new_totals.loc[name]
temp = func(temp, totals_season)
df = df.append(temp)
return df
def season_offensive_metrics(data, totals_season):
"""Calculate offensive metrics for a single season
:param data: A DataFrame of single season data
:param totals_season: A Series of league totals
:returns: A DataFrame
"""
if not isinstance(totals_season, pd.Series):
raise TypeError("Expected {}. Got {}.".format(pd.Series, type(totals_season)))
temp = data.copy()
temp["sbr"] = sbr(temp, totals_season)
temp["wsb"] = wsb(temp, totals_season["lg_wsb"])
temp["woba"] = woba(temp, totals_season)
temp["wraa"] = wraa(temp, totals_season["woba"], totals_season["woba_scale"])
temp["off"] = off(temp)
temp["wrc"] = wrc(
temp,
totals_season["woba"],
totals_season["woba_scale"],
totals_season["lg_r_pa"],
)
temp["wrc_p"] = wrc_p(temp, totals_season["lg_r_pa"])
temp["off_p"] = off_p(temp, totals_season["lg_r_pa"])
return temp
def season_offensive_metrics_rar(data, totals_season):
"""Calculate offensive metrics for a single season including RAR
:param data: A DataFrame of single season data
:param totals_season: A Series of league totals
:returns: A DataFrame
"""
temp = season_offensive_metrics(data, totals_season)
temp["rar"] = rar(temp, totals_season["rep_level"])
return temp
def advanced_offensive_metrics(data, totals, inplace=False):
"""Calculate advanced offensive metrics. These metrics do depend on league
wide metrics.
:param data: A DataFrame
:param totals: A DataFrame of league wide totals and weights
:param inplace: modify the DataFrame inplace?
:returns: A DataFrame
"""
if not inplace:
data = data.copy()
new_totals = totals.copy()
if new_totals.index.name != "season":
new_totals = new_totals.set_index("season")
df = pd.DataFrame()
for name, group in data.groupby("season"):
temp = pd.DataFrame(group)
# totals_season must be a Series
totals_season = new_totals.loc[name]
temp["sbr"] = sbr(group, totals_season)
temp["wsb"] = wsb(temp, totals_season["lg_wsb"])
temp["woba"] = woba(temp, totals_season)
temp["wraa"] = wraa(temp, totals_season["woba"], totals_season["woba_scale"])
temp["off"] = off(temp)
temp["wrc"] = wrc(
temp,
totals_season["woba"],
totals_season["woba_scale"],
totals_season["lg_r_pa"],
)
temp["wrc_p"] = wrc_p(temp, totals_season["lg_r_pa"])
temp["off_p"] = off_p(temp, totals_season["lg_r_pa"])
df = df.append(temp)
return df
# *********************
# * Pitching Metrics **
# *********************
def fip(data, constant):
"""Fielding Independent Pitching (FIP)
:param
:returns:
"""
return (
(13 * data["hr"] + 3 * (data["bb"] + data["hbp"]) - 2 * data["so"]) / data["ip"]
) + constant
def fip_constant(data):
"""FIP Constant = lgERA – (((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIP)
:param
:returns:
"""
return data["era"] - (
((13 * data["hr"]) + (3 * (data["bb"] + data["hbp"])) - 2 * data["so"])
/ data["ip"]
)
def fip_minus(data, lg_fip):
"""FIP- = FIP / lgFIP * 100
:param
:returns:
"""
return data["fip"] / lg_fip * 100
def era_minus(data, lg_era):
"""ERA- = ERA / lgERA * 100
:param
:returns:
"""
return data["era"] / lg_era * 100
def bsr_pitch(data, bmult=1.0):
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
a = data["h"] + data["bb"] + data["hbp"] - data["hr"]
b = (
0.78 * x1b
+ 2.34 * data["x2b"]
+ 3.9 * data["x3b"]
+ 2.34 * data["hr"]
+ 0.039 * (data["bb"] + data["hbp"])
)
c = data["ab"] - data["h"]
d = data["hr"]
return a * (b * bmult / (b * bmult + c)) + d
def bsr_pitch_bmult(data):
x1b = data["h"] - data["x2b"] - data["x3b"] - data["hr"]
a = data["h"] + data["bb"] + data["hbp"] - data["hr"]
b = (
0.78 * x1b
+ 2.34 * data["x2b"]
+ 3.9 * data["x3b"]
+ 2.34 * data["hr"]
+ 0.039 * (data["bb"] + data["hbp"])
)
c = data["ab"] - data["h"]
d = data["hr"]
b_act = c * (d - data["r"]) / (data["r"] - d - a)
b_est = b
return b_act / b_est
def bsr_9(data):
return data["bsr"] / data["ip"] * 9
def bsr_minus(data, lg_bsr_9):
return data["bsr_9"] / lg_bsr_9 * 100
def bsraa(data, lg_bsr_9):
return (lg_bsr_9 - data["bsr_9"]) * data["ip"] / 9
def raa(data, lg_ra_9):
return (lg_ra_9 - data["ra_9"]) * data["ip"] / 9
def fipraa(data, lg_fip):
return (lg_fip - data["fip"]) * data["ip"] / 9
|
|
import copy
import numpy as np
from scipy.spatial.transform import Rotation as R
from ase.data import atomic_numbers,atomic_masses_iupac2016
from mcse.core.structure import Structure
def check_molecule(struct, exception=True):
# Check for valid molecule_struct
if len(struct.get_lattice_vectors()) > 0:
if exception:
raise Exception("Structure with lattice vectors {} was passed "
.format(struct.get_lattice_vectors_better())+
"into MoleculeBonding class. Molecule structure "+
"without lattice vectors should be passed to "+
"MoleculeBonding.")
else:
return False
else:
return True
def center_com(struct):
"""
Move center of mass of the structure to the origin.
"""
temp_com = com(struct)
struct.translate(-com)
return struct
def com(struct):
"""
Calculates center of mass of the system.
"""
geo_array = struct.get_geo_array()
element_list = struct.elements
mass = np.array([atomic_masses_iupac2016[atomic_numbers[x]]
for x in element_list]).reshape(-1)
total = np.sum(mass)
com = np.sum(geo_array*mass[:,None], axis=0)
com = com / total
return com
def rot_mol(rot, struct, wrt="origin", degrees=True, seq="xyz"):
"""
Rotate molecule using rotation matrix.
Arguments
---------
rot: array
Can be either a list of 3 euler angles in the given order or a 3,3
rotation matrix.
wrt: str
Rotation performed with respect to any of the following options,
["origin", "com"]. Although, only origin implemented now.
order: str
Order for euler angles if rotation is given as euler angles.
"""
if wrt != "origin":
raise Exception("Not Implemented")
rot = np.array(rot)
if rot.shape == (3,3):
pass
elif rot.ravel().shape == (3,):
### Assume euler angles
Rot = R.from_euler(seq, rot.ravel(), degrees=degrees)
rot = Rot.as_matrix()
else:
raise Exception(
"Only rotation matrices and euler angles are currently implemented.")
geo = struct.get_geo_array()
ele = struct.elements
rot_geo = np.dot(rot, geo.T).T
struct.from_geo_array(rot_geo, ele)
return struct
def combine(struct1, struct2, lat=True,
bonds=True,
bonds_kw={"mult": 1.20, "skin": 0.0, "update": False}):
"""
Combines two structures.
Arguments
---------
lat: bool
If True, keeps lattice vectors of first structure.
"""
if bonds:
bonds1 = copy.deepcopy(struct1.get_bonds(**bonds_kw))
bonds2 = copy.deepcopy(struct2.get_bonds(**bonds_kw))
### Need to adjust index of bonds2 for combined structure
adjust_idx = len(struct1.get_geo_array())
for idx1,bond_list in enumerate(bonds2):
for idx2,atom_idx in enumerate(bond_list):
bonds2[idx1][idx2] = atom_idx + adjust_idx
combined_bonds = bonds1 + bonds2
geo1 = struct1.get_geo_array()
ele1 = struct1.elements
combined = Structure.from_geo(geo1,ele1)
if lat == True:
lattice = struct1.get_lattice_vectors()
if len(lattice) > 0:
combined.set_lattice_vectors(lattice)
geo2 = struct2.get_geo_array()
ele2 = struct2.elements
combined.append(geo2, ele2)
# for idx,coord in enumerate(geo2):
# combined.append(coord[0],coord[1],coord[2],ele2[idx])
combined.properties["combined"] = {
struct1.struct_id: struct1.document(),
struct2.struct_id: struct2.document()
}
if bonds:
combined.properties["bonds"] = combined_bonds
combined.get_bonds(**bonds_kw)
return combined
|
|
import numpy as np
import matplotlib.pyplot as plt
import scanpy as sc
from anndata import AnnData
from kneed import KneeLocator
from scipy.sparse import isspmatrix, csr_matrix, spmatrix
from sklearn.decomposition import PCA
from typing import Optional, Tuple, List, Union
# Convert sparse matrix to dense matrix.
to_dense_matrix = lambda X: np.array(X.todense()) if isspmatrix(X) else X
def compute_pca_components(
matrix: Union[np.ndarray, spmatrix], save_curve_img: Optional[str] = None
) -> Tuple[int, float]:
"""
Calculate the inflection point of the PCA curve to
obtain the number of principal components that the PCA should retain.
Args:
matrix: A dense or sparse matrix.
save_curve_img: If save_curve_img != None, save the image of the PCA curve and inflection points.
Returns:
new_n_components: The number of principal components that PCA should retain.
new_components_stored: Percentage of variance explained by the retained principal components.
"""
# Convert sparse matrix to dense matrix.
matrix = to_dense_matrix(matrix)
# Principal component analysis (PCA).
pca = PCA(n_components=None)
pca.fit_transform(matrix)
# Percentage of variance explained by each of the selected components.
# If n_components is not set then all components are stored and the sum of the ratios is equal to 1.0.
raw_components_ratio = pca.explained_variance_ratio_
raw_n_components = np.arange(1, raw_components_ratio.shape[0] + 1)
# Calculate the inflection point of the PCA curve.
kl = KneeLocator(
raw_n_components, raw_components_ratio, curve="convex", direction="decreasing"
)
new_n_components = int(kl.knee)
new_components_stored = round(
float(np.sum(raw_components_ratio[:new_n_components])), 3
)
# Whether to save the image of PCA curve and inflection point.
if save_curve_img is not None:
kl.plot_knee()
plt.tight_layout()
plt.savefig(save_curve_img, dpi=100)
return new_n_components, new_components_stored
def qc_scanpy(
adata: AnnData,
min_genes: int = 200,
min_cells: int = 3,
mt_label: Optional[str] = "mt-",
mt_counts_threshold: Optional[int] = None,
total_counts_threshold: Optional[int] = None,
save_qc_img: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""
Quality control based on scanpy.
The violin image will plot the following computed quality measures:
n_genes_by_counts: the number of genes expressed in the count matrix
total_counts: the total counts per cell
pct_counts_mt: the percentage of counts in mitochondrial genes
Args:
adata: An Anndata object.
min_genes: Minimum number of genes expressed required for a cell to pass filtering.
min_cells: Minimum number of cells expressed required for a gene to pass filtering.
mt_label: Label of mitochondrial genes. For example, in Drosophila, genes whose names start with 'mt-' are mitochondrial genes.
mt_counts_threshold: Remove cells with mitochondrial gene expression counts greater than `mt_counts_threshold`.
total_counts_threshold: Remove cells with total counts greater than `total_counts_threshold`.
save_qc_img: If save_qc_img != None, save the image of some computed quality measures.
copy: Whether to copy `adata` or modify it inplace.
Returns:
Updates adata, containing some computed quality measures.
"""
adata = adata.copy() if copy else adata
# Remove duplicate genes.
adata.var_names_make_unique()
# Remove cells that have too many mitochondrial genes expressed or too many total counts:
adata.var["mt"] = adata.var_names.str.startswith(mt_label)
sc.pp.calculate_qc_metrics(
adata, percent_top=None, qc_vars=["mt"], log1p=False, inplace=True
)
if mt_counts_threshold is not None:
adata = adata[adata.obs.pct_counts_mt < mt_counts_threshold, :]
if total_counts_threshold is not None:
adata = adata[adata.obs.n_genes_by_counts < total_counts_threshold, :]
# Filter genes based on number of cells or counts.
sc.pp.filter_genes(adata, min_cells=min_cells)
# Filter cell outliers based on counts and numbers of genes expressed.
sc.pp.filter_cells(adata, min_genes=min_genes)
if save_qc_img is not None:
sc.pl.violin(
adata,
["n_genes_by_counts", "total_counts", "pct_counts_mt"],
rotation=90,
jitter=0.4,
multi_panel=True,
save=False,
show=False,
)
plt.tight_layout()
plt.savefig(save_qc_img, dpi=100)
return adata if copy else None
def sctransform(
adata: AnnData,
n_top_genes: int = 3000,
save_sct_img_1: Optional[str] = None,
save_sct_img_2: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""
Use sctransform with an additional flag vst.flavor="v2" to perform normalization and dimensionality reduction
Original Code Repository: https://github.com/saketkc/pySCTransform
Installation:
Conda:
```conda install R```
R:
```if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager")```
```BiocManager::install(version = "3.14")```
```BiocManager::install("glmGamPoi")```
Python:
```pip install rpy2```
```pip install git+https://github.com/saketkc/pysctransform```
Examples:
1.In pycharm:
adata = sc.read_h5ad("./E16_18_S01.h5ad")
sctransform(adata=adata, save_sct_img_1="sct10.png", save_sct_img_2="sct20.png")
cluster_spagcn(adata=adata)
adata.write_h5ad('test_clu.h5ad',compression='gzip')
2.In remote server:
os.environ['R_HOME'] = '/hwfssz1/ST_SUPERCELLS/P21Z10200N0090/tuzhencheng/software/anaconda3/envs/SCT-SpaGCN/lib/R' # Specify the R environment
qc_scanpy(adata=adata, mt_counts_threshold=5, save_qc_img="qc0.png")
sctransform(adata=adata, save_sct_img_1="sct10.png", save_sct_img_2="sct20.png")
cluster_spagcn(adata=adata)
adata.write_h5ad('test_clu.h5ad',compression='gzip')
Args:
adata: An Anndata object.
n_top_genes: Number of highly-variable genes to keep.
save_sct_img_1: If save_sct_img_1 != None, save the image of the GLM model parameters.
save_sct_img_2: If save_sct_img_2 != None, save the image of the final residual variances.
copy: Whether to copy `adata` or modify it inplace.
Returns:
Updates adata with the field ``adata.layers["raw_X"]``, containing raw expression matrix for n_top_genes(highly variable genes).
"""
try:
from pysctransform import vst, SCTransform
from pysctransform.plotting import plot_fit, plot_residual_var
except ImportError:
raise ImportError("\nPlease install pysctransform:"
"\n\n\tpip install git+https://github.com/saketkc/pysctransform")
adata = adata.copy() if copy else adata
# Get pearson residuals for n_top_genes(highly variable genes).
residuals = SCTransform(adata, vst_flavor="v2", var_features_n=n_top_genes)
# Plot model characteristics.
if save_sct_img_1 is not None or save_sct_img_2 is not None:
# adata.obsm["pearson_residuals"] = residuals
vst_out = vst(
adata.X.T,
gene_names=adata.var_names.tolist(),
cell_names=adata.obs_names.tolist(),
method="fix-slope",
exclude_poisson=True,
)
# Visualize the GLM model parameters.
if save_sct_img_1 is not None:
_ = plot_fit(vst_out)
plt.savefig(save_sct_img_1, dpi=100)
# Visualize the final residual variances with respect to mean and highlight highly variable genes.
if save_sct_img_2 is not None:
_ = plot_residual_var(vst_out)
plt.savefig(save_sct_img_2, dpi=100)
# Only store highly variable genes.
adata = adata[:, adata.var_names.isin(residuals.columns.tolist())]
adata.layers["raw_X"] = adata.X
# Highly variable genes' expression matrix.
residuals = residuals.reindex(columns=adata.var_names.tolist())
hvgs_matrix = (
csr_matrix(residuals.values) if isspmatrix(adata.X) else residuals.values
)
adata.X = hvgs_matrix
return adata if copy else None
def integrate(
adatas: List[AnnData],
batch_key: str = "slice",
) -> AnnData:
"""
Args:
adatas: AnnData matrices to concatenate with.
batch_key: Add the batch annotation to :attr:`obs` using this key.
Returns:
integrated_adata: The concatenated AnnData, where adata.obs[batch_key] stores a categorical variable labeling the batch.
"""
batch_ca = [adata.obs[batch_key][0] for adata in adatas]
integrated_adata = adatas[0].concatenate(
adatas[1:], batch_key=batch_key, batch_categories=batch_ca, join="outer"
)
return integrated_adata
def harmony_debatch(
adata: AnnData,
key: str,
basis: str = "X_pca",
adjusted_basis: str = "X_pca_harmony",
max_iter_harmony: int = 10,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Use harmonypy [Korunsky19]_ to remove batch effects.
This function should be run after performing PCA but before computing the neighbor graph.
Original Code Repository: https://github.com/slowkow/harmonypy
Interesting example: https://slowkow.com/notes/harmony-animation/
Args:
adata: An Anndata object.
key: The name of the column in ``adata.obs`` that differentiates among experiments/batches.
basis: The name of the field in ``adata.obsm`` where the PCA table is stored.
adjusted_basis: The name of the field in ``adata.obsm`` where the adjusted PCAbtable will be stored after running this function.
max_iter_harmony: Maximum number of rounds to run Harmony. One round of Harmony involves one clustering and one correction step.
copy: Whether to copy `adata` or modify it inplace.
Returns:
Updates adata with the field ``adata.obsm[adjusted_basis]``, containing principal components adjusted by Harmony.
"""
try:
import harmonypy
except ImportError:
raise ImportError("\nPlease install harmonypy:\n\n\tpip install harmonypy")
adata = adata.copy() if copy else adata
# Convert sparse matrix to dense matrix.
matrix = to_dense_matrix(adata.obsm[basis])
# Use Harmony to adjust the PCs.
harmony_out = harmonypy.run_harmony(
matrix, adata.obs, key, max_iter_harmony=max_iter_harmony
)
adjusted_matrix = harmony_out.Z_corr.T
# Convert dense matrix to sparse matrix.
if isspmatrix(adata.obsm[basis]):
adjusted_matrix = csr_matrix(adjusted_matrix)
adata.obsm[adjusted_basis] = adjusted_matrix
return adata if copy else None
|
|
#FOUR Ultrasonic sensors (HC-SR04) FUNCTION
#Version 2
#Two conditions:
#1. Every half a second determine location on boardSize_updown
#2. In front of washer, determine location on board
#Length is 23 cm with pushing mechanism
#Goal: Assign four sensors to the four directions depending on the configuration
#When turn occurs, the sensor that aligns with each direction will change = FOUR CONFIGURATIONS
#Four sensors: sensor_1, sensor_2, sensor_3, sensor_4
#Four directions: sensor_N, sensor_W, sensor_S, sensor_E
#Starting orientation: S1=S_N, S2=S_W, S3=S_S, S4=S_E
#if moving straight, will remember configuration (variables sent back from function) and jump straight to determining squareLoc
#if turning right, add one to rightIndex and match leftIndex - remember both right and leftIndex
#if turning left, add one to leftIndex and match rightIndex- remember both right and leftIndex
#Main function (mainFn) will be part of FSM code --> will call determineLoc
#determineLoc: sends back (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E)
#i.e. remembers config, indexes, and sends back square location (i.e. location)
#determineLoc will be called every 0.5 seconds and square location will be sent to display station via bluetooth
#will also be called if washer is found (these square locations will be remembered in order to print out on LCD at end)
def main (): #This will all be in FSM main code
#Importing
import RPi.GPIO as GPIO
import time
import numpy as np
#Declare board setup
GPIO.setmode(GPIO.BCM) #sets GPIO pin numbering
#Remove warnings
GPIO.setwarnings(False)
#Delarations - these will change
turningright = 0 #sent to this function by encoder?
turningleft = 0 #sent to this function by encoder?
movingforward = 0 #no turning!
rightIndex = 0 #original orientation
leftIndex = 0 #original orientation
#Now, determine location of car
#Sending back configuration (sensor_N, etc.), so if continuing straight will "remember" configuration until a turn occurs
(location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = determineLoc(movingforward,turningright,turningleft,rightIndex,leftIndex,washerinFront)
#Hold onto index values and sensor directions
#LOCATION is square location
return (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) #to main function
"""
"""
def determineLoc(movingforward,turningright,turningleft,rightIndex,leftIndex,washerinFront):
#Adding washer in front = if measuring distance(s) before deploying patch
#Declare GPIO pins for 4 ultrasonic sensors: THESE DO NOT CHANGE
#Ultrasonic sensor 1: starting off north
TRIG_sensor1 = 5 #output pin - triggers the sensor
ECHO_sensor1 = 6 #input pin - reads the return signal from the sensor
#Ultrasonic sensor 2: starting off west
TRIG_sensor2 = 19
ECHO_sensor2 = 26
#Ultrasonic sensor 3: starting off south
TRIG_sensor3 = 8
ECHO_sensor3 = 7
#Ultrasonic sensor 4: starting off east
TRIG_sensor4 = 20
ECHO_sensor4 = 21
#Now account for turning - DETERMINE CONFIGURATION
if (washerinFront == 0): #only determine configuration if NOT during washerinFront condition
if ((turningright == 1) and (turningleft == 0) and (movingforward == 0)) :
rightIndex = rightIndex + 1
#leftIndex does not change -> calling function
(rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = assignDirection(rightIndex, leftIndex, turningright, turningleft)
#leftIndex set to match config of rightIndex
elif ((turningleft == 1) and (turningright == 0) and (movingforward == 0)):
leftIndex = leftIndex + 1
#rightIndex does not change -> calling function
(rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = assignDirection(rightIndex, leftIndex, turningright, turningleft)
#rightIndex set to match config of leftIndex
elif ((turningright == 1) and (turningleft == 1)):
#Error: cannot turn both ways at same time!
print("ERROR!!!! Cannot turn right and left at same time")
elif ((movingforward == 1) and (turningright == 0) and (turningleft == 0)):
#No turning, move forward
print("Continue Moving Forward!")
else:
print("ERROR!")
`
(TRIG_N, ECHO_N, TRIG_W, ECHO_W, TRIG_S, ECHO_S, TRIG_E, ECHO_E, config_N, config_W, config_S, config_E, sensor1, sensor2, sensor3, sensor4) = declareSensor(sensor_N, sensor_W, sensor_S, sensor_E, TRIG_sensor1, ECHO_sensor1, TRIG_sensor2, ECHO_sensor2, TRIG_sensor3, ECHO_sensor3, TRIG_sensor4, ECHO_sensor4)
#Declare N,W,S,E TRIG & ECHO pins
#Ultrasonic sensor - North
GPIO.setup(TRIG_N,GPIO.OUT)
GPIO.setup(ECHO_N,GPIO.IN)
#Ultrasonic sensor - West
GPIO.setup(TRIG_W,GPIO.OUT)
GPIO.setup(ECHO_W,GPIO.IN)
#Ultrasonic sensor - South
GPIO.setup(TRIG_S,GPIO.OUT)
GPIO.setup(ECHO_S,GPIO.IN)
#Ultrasonic sensor - East
GPIO.setup(TRIG_E,GPIO.OUT)
GPIO.setup(ECHO_E,GPIO.IN)
#Read distance from each sensor -> calling function
distance_N = readDistance(1) #determine north distance
distance_W = readDistance(2) #determine west distance
distance_S = readDistance(3) #determine south distance
distance_E = readDistance(4) #determine east distance
#Assign directions back to sensor numbers
#First sensor 1
if (sensor1 == "N"):
distance1_full = distance_N
elif (sensor1 == "W"):
distance1_full = distance_W
elif (sensor1 == "S"):
distance1_full = distance_S
elif (sensor1 == "E"):
distance1_full = distance_E
#Now sensor 2
if (sensor2 == "N"):
distance2_full = distance_N
elif (sensor2 == "W"):
distance2_full = distance_W
elif (sensor2 == "S"):
distance2_full = distance_S
elif (sensor2 == "E"):
distance2_full = distance_E
#Now sensor 3
if (sensor3 == "N"):
distance3_full = distance_N
elif (sensor3 == "W"):
distance3_full = distance_W
elif (sensor3 == "S"):
distance3_full = distance_S
elif (sensor3 == "E"):
distance3_full = distance_E
#Finally sensor 4
if (sensor4 == "N"):
distance4_full = distance_N
elif (sensor4 == "W"):
distance4_full = distance_W
elif (sensor4 == "S"):
distance4_full = distance_S
elif (sensor4 == "E"):
distance4_full = distance_E
#Now distance1, distance2, distance3, distance4 are distances of all 4 sensors
#Subtract 0.5ft in all directions - 7x7 ft board
#Distance not a part of the board
leftoverDistance_all = 0.5
#Subtract leftoverDistance(s) from total distance
distance1 = distance1_full - leftoverDistance_all
distance2 = distance2_full - leftoverDistance_all
distance3 = distance3_full - leftoverDistance_all
distance4 = distance4_full - leftoverDistance_all
#Now all distances within 7x7 ft board
#Send back distance1, distance2, distance3, and distance4 continuously to function1
#Assign length and width of car according to configuration
(dim1, dim2) = assignLengthWidth(config_N, config_S, config_W, config_E, washerinFront):
#Determine location -> calling function
if (washerinFront == 0): #determines square in reference to center of car (i.e. length/2, width/2)
location = squareLocGeneral(distance_N, distance_W, distance_S, distance_E, dim1, dim2) #general square location (every 0.5 sec)
elif (washerinFront == 1): #determines square in reference to front center of pushing mechanism
location = squareLocWasher(distance_N, distance_W, distance_S, distance_E, dim1, dim2, sensorPatch) #only called when washer is in front of car (i.e. found)
#Print out location
#Will need to send this location to the display station VIA BLUETOOTH!
print ("Square:",location)
# return (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E, distance_N, distance_W, distance_S, distance_E) #to mainFn (i.e. FSM code)
return (location, rightIndex, leftIndex, distance1, distance2, distance3, distance4)
#now, sends back distance (in feet) of all four sensor in all four directions (N, S, W, E)
"""
"""
def declareSensor(sensor_N, sensor_W, sensor_S, sensor_E, TRIG_sensor1, ECHO_sensor1, TRIG_sensor2, ECHO_sensor2, TRIG_sensor3, ECHO_sensor3, TRIG_sensor4, ECHO_sensor4):
#Set two GPIO ports as inputs/outputs depending on CONFIGURATION!
#First, declare North sensor
if (sensor_N == 1):
TRIG_N = TRIG_sensor1
ECHO_N = ECHO_sensor1
config_N = 1 #dummy variable to track which direction of length/width
sensor1 = "N"
elif (sensor_N == 2):
TRIG_N = TRIG_sensor2
ECHO_N = ECHO_sensor2
config_N = 0
sensor2 = "N"
elif (sensor_N == 3):
TRIG_N = TRIG_sensor3
ECHO_N = ECHO_sensor3
config_N = 1
sensor3 = "N"
elif (s == 4):
TRIG_N = TRIG_sensor4
ECHO_N = ECHO_sensor4
config_N = 0
sensor4 = "N"
#Second, declare West sensor
if (sensor_W == 1):
TRIG_W = TRIG_sensor1
ECHO_W = ECHO_sensor1
config_W = 0
sensor1 = "W"
elif (sensor_W == 2):
TRIG_W = TRIG_sensor2
ECHO_W = ECHO_sensor2
config_W = 1
sensor2 = "W"
elif (sensor_W == 3):
TRIG_W = TRIG_sensor3
ECHO_W = ECHO_sensor3
config_W = 0
sensor3 = "W"
elif (sensor_W == 4):
TRIG_W = TRIG_sensor4
ECHO_W = ECHO_sensor4
config_W = 1
sensor3 = "W"
#Third, declare South sensor
if (sensor_S == 1):
TRIG_S = TRIG_sensor1
ECHO_S = ECHO_sensor1
config_S = 1
sensor1 = "S"
elif (sensor_S == 2):
TRIG_S = TRIG_sensor2
ECHO_S = ECHO_sensor2
config_S = 0
sensor2 = "S"
elif (sensor_S == 3):
TRIG_S = TRIG_sensor3
ECHO_S = ECHO_sensor3
config_S = 1
sensor3 = "S"
elif (sensor_S == 4):
TRIG_S = TRIG_sensor4
ECHO_S = ECHO_sensor4
config_S = 0
sensor4 = "S"
#Fourth, declare East sensor
if (sensor_E == 1):
TRIG_E = TRIG_sensor1
ECHO_E = ECHO_sensor1
config_E = 0
sensor1 = "E"
elif (sensor_E == 2):
TRIG_E = TRIG_sensor2
ECHO_E = ECHO_sensor2
config_E = 1
sensor2 = "E"
elif (sensor_E == 3):
TRIG_E = TRIG_sensor3
ECHO_E = ECHO_sensor3
config_E = 0
sensor3 = "E"
elif (sensor_E == 4):
TRIG_E = TRIG_sensor4
ECHO_E = ECHO_sensor4
config_E = 1
sensor4 = "E"
return (TRIG_N, ECHO_N, TRIG_W, ECHO_W, TRIG_S, ECHO_S, TRIG_E, ECHO_E, config_N, config_W, config_S, config_E, sensor1, sensor2, sensor3, sensor4) #to determineLoc -- here!
"""
"""
#Function to read from encoder, assign N,W,S,E sensors
def assignDirection(rightIndex, leftIndex, turningright, turningleft):
#First check to reset to original orientation, loop complete
if (rightIndex == 4):
rightIndex = 0
#Set leftIndex to match rightIndex
leftIndex = 0
elif (leftIndex == 4):
leftIndex = 0
#Set rightIndex to match leftIndex
rightIndex = 0
#Turning right initiated
if (turningright == 1): #only rightIndex should have changed
print ("Turned right")
if (rightIndex == 0):
#Assign sensors
sensor_N = 1 #sensor_1
sensor_W = 2 #sensor_2
sensor_S = 3 #sensor_3
sensor_E = 4 #sensor_4
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 0
elif (rightIndex == 1):
#Assign sensors
sensor_N = 2 #sensor_2
sensor_W = 3 #sensor_3
sensor_S = 4 #sensor_4
sensor_E = 1 #sensor_1
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 3
elif (rightIndex == 2):
#Assign sensors
sensor_N = 3 #sensor_3
sensor_W = 4 #sensor_4
sensor_S = 1 #sensor_1
sensor_E = 2 #sensor_2
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 2
elif (rightIndex == 3):
#Assign sensors
sensor_N = 4 #sensor_4
sensor_W = 1 #sensor_1
sensor_S = 2 #sensor_2
sensor_E = 3 #sensor_3
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 1
else: #already checked for value of 4!
print("ERROR!!!") #should only be values 1, 2, 3
elif (turningleft == 1): #only leftIndex should have changed
print ("Turned left")
if (leftIndex == 0):
#Assign sensors
sensor_N = 1 #sensor_1
sensor_W = 2 #sensor_2
sensor_S = 3 #sensor_3
sensor_E = 4 #sensor_4
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 0
elif (leftIndex == 1):
#Assign sensors
sensor_N = 4 #sensor_4
sensor_W = 1 #sensor_1
sensor_S = 2 #sensor_2
sensor_E = 3 #sensor_3
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 3
elif (leftIndex == 2):
#Assign sensors
sensor_N = 3 #sensor_3
sensor_W = 4 #sensor_4
sensor_S = 1 #sensor_1
sensor_E = 2 #sensor_2
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 2
elif (leftIndex == 3):
#Assign sensors
sensor_N = 2 #sensor_2
sensor_W = 3 #sensor_3
sensor_S = 4 #sensor_4
sensor_E = 1 #sensor_1
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 1
else: #already checked for value of 4!
print("ERROR!!!") #should only be values 1, 2, 3
else: #No turning, move forward
print ("Moving forward") #remembers configuration if no turning occurs!
#Send back all these variables
#hold onto values of rightIndex and leftIndex (int of values 0, 1, 2, 3)
#sensor_N, sensor_W, sensor_S, sensor_E are int values of 1, 2, 3, 4
return (rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) #determineLoc
"""
"""
#Function to read distance from one ultrasonic sensor
def readDistance(direction):
#First declare TRIG and ECHO
if (direction == 1): #north
TRIG = TRIG_N
ECHO = ECHO_N
elif (direction == 2): #west
TRIG = TRIG_W
ECHO = ECHO_W
elif (direction == 3): #south
TRIG = TRIG_S
ECHO = ECHO_S
elif (direction == 4): #east
TRIG = TRIG_E
ECHO = ECHO_E
#Now begin determining distance
#Ensure the trigger pin is set low
GPIO.output(TRIG, False)
#Give the sensor a second to settle
time.sleep(1)
#Create trigger pulse
GPIO.output(TRIG,True)
#Set trigger pin high for 10uS
time.sleep(0.00001)
#Set is low again
GPIO.output(TRIG,False)
#Determine pulse_start
while GPIO.input(ECHO)==0:
pulse_start = time.time()
#Determine pulse_end
while GPIO.input(ECHO)==1:
pulse_end = time.time()
#Speed = Distance/Time, speed of sound at sea level = 343 m/s
#34300 = distance/(time/2)
#17150 = distance/time
#distance = 17150*pulse_duration
#Calculating distance
pulse_duration = pulse_end - pulse_start
distance_cm = pulse_duration*17150
#distance_cm = round(distance_cm,2)
distance_inch = distance_cm/2.54 #2.54 cm per inch
#distance_inch = round(distance_inch,2)
distance_feet = distance_inch/12 #12 in per foot
#distance_feet = round(distance_feet,2)
return distance_feet #to determineLoc
"""
"""
#Function to assign length and width of car according to the configuration
def assignLengthWidth(config_N, config_S, config_W, config_E):
#Assign length and width of car according to configuration
length = 0.585
width = 0.36 #width (ft) in original configuration
if ((config_N == 1) and (config_S == 1) and (config_W == 1) and (config_E == 1)): #N/S up-down, W/E left-right
dim1 = length #up-down distance
dim2 = width #left-right distance
elif ((config_N == 0) and (config_S == 0) and (config_W == 0) and (config_E == 0)): #W/E up-down, N/S left-right
dim1 = width #up-down distance
dim2 = length #left-right distance
else:
print('ERROR!')
return(dim1, dim2) #to determineLoc
"""
"""
#Function to determine the car's square location on 7x7 board
#called every half a second to determine square location on board
def squareLocGeneral(distance_N, distance_W, distance_S, distance_E, dim1, dim2):
#North: A1->G1; West: A7->A1; South:G1->A1; East: A1->A7
#Declarations
#All distances in ft - MAYBE change these to be in CENTIMETERS or INCHES?
maxdistance_updown = 8
boardSize_updown = 7
maxdistance_leftright = 8
boardSize_leftright = 7
square_updown = 1
square_leftright = 1
#Distance not a part of the board
leftoverDistance_updown = 0.5
leftoverDistance_leftright = 0.5
#Subtract leftoverDistance(s) from total distance
distance_N = distance_N - leftoverDistance_updown
distance_S = distance_S - leftoverDistance_updown
distance_W = distance_W - leftoverDistance_leftright
distance_E = distance_E - leftoverDistance_leftright
#Now all distances within 7x7 ft board
#Now *measure distance in reference to center of car* - these change according to configuration - determined in assignLengthWidth function
updown_car = dim1
leftright_car = dim2
#Account for length and width of car - adding in the (1/2)(length/width) in all directions
distance_N = distance_N + (updown_car/2)
distance_S = distance_S + (updown_car/2)
distance_W = distance_W + (lefright_car/2)
distance_E = distance_E + (leftright_car/2)
#Now all distances are in reference to center of car
#Overall distance (one direction) = Sensor reading (in ft) + (1/2)*(length/width of car, depending on config)
#Determine square: updown first - ROUNDING DOWN
#Range: 0-6 because ROUNDING DOWN
distance_N_square = np.ceil(distance_N/square_updown)
distance_S_square = np.ceil(distance_S/square_updown)
#Determine square: leftright second - ROUNDING DOWN
#Range: 0-6 because ROUNDING DOWN
distance_W_square = np.ceil(distance_W/square_leftright)
distance_E_square = np.ceil(distance_E/square_leftright)
#Ranges of squares: updown first
if ((distance_N_square == 6) and (distance_S_square == 0)):
letter = 'A'
elif ((distance_N_square == 5) and (distance_S_square == 1)):
letter = 'B'
elif ((distance_N_square == 4) and (distance_S_square == 2)):
letter = 'C'
elif ((distance_N_square == 3) and (distance_S_square == 3)):
letter = 'D'
elif ((distance_N_square == 2) and (distance_S_square == 4)):
letter = 'E'
elif ((distance_N_square == 1) and (distance_S_square == 5)):
letter = 'F'
elif ((distance_N_square == 0) and (distance_S_square == 6)):
letter = 'G'
#Ranges of squares: updown first
if ((distance_W_square == 0) and (distance_E_square == 6)):
digit = '1'
elif ((distance_W_square == 1) and (distance_E_square == 5)):
digit = '2'
elif ((distance_W_square == 2) and (distance_E_square == 4)):
digit = '3'
elif ((distance_W_square == 3) and (distance_E_square == 3)):
digit = '4'
elif ((distance_W_square == 4) and (distance_E_square == 2)):
digit = '5'
elif ((distance_W_square == 5) and (distance_E_square == 1)):
digit = '6'
elif ((distance_W_square == 6) and (distance_E_square == 0)):
digit = '7'
#Now combine letter and digit
location = letter + digit
return location #to determineLoc
"""
"""
#Function to determine the car's square location on 7x7 board
#called every half a second to determine square location on board
def squareLocWasher(distance_N, distance_W, distance_S, distance_E, dim1, dim2, sensorPatch):
#North: A1->G1; West: A7->A1; South:G1->A1; East: A1->A7
#Declarations
#All distances in ft - MAYBE change these to be in CENTIMETERS or INCHES?
maxdistance_updown = 8
boardSize_updown = 7
maxdistance_leftright = 8
boardSize_leftright = 7
square_updown = 1
square_leftright = 1
#Distance not a part of the board
leftoverDistance_updown = 0.5
leftoverDistance_leftright = 0.5
#Subtract leftoverDistance(s) from total distance
distance_N = distance_N - leftoverDistance_updown
distance_S = distance_S - leftoverDistance_updown
distance_W = distance_W - leftoverDistance_leftright
distance_E = distance_E - leftoverDistance_leftright
#Now all distances within 7x7 ft board
#Now *measure distance in reference to center of car* - these change according to configuration - determined in assignLengthWidth function
updown_car = dim1
leftright_car = dim2
#Account for length and width of car - washerinFront condition - ADDED THIS
#sensorPatch = direction of sensor 1 in CONFIGURATION
#In direction of sensor1 (i.e. N), subtract distance addition of pushing mechanism
#In "opposite" direction of sensor1 (i.e. S), add in length of entire updown_car
#In other directions (i.e. W, E), follow same procedure: 1/2(width) -> center of car
#Length: all in reference to FRONT CENTER of pushing mechanism
#Note in original config: length = 0.585 ft, width = 0.36 ft
#Pushing mechanism adds 0.17ft in sensor1 direction (N in config1)
pushaddition = 0.17
if (sensorPatch == "N"): #configuration 1
distance_N = distance_N - pushaddition #subtract length of pushing mechanism
distance_S = distance_S + updown_car #add entire length of car to S reading
distance_W = distance_W + (leftright_car/2)
distance_E = distance_E + (leftright_car/2)
elif (sensorPatch == "S"): #configuration 3
distance_N = distance_N + updown_car
distance_S = distance_S - pushaddition
distance_W = distance_W + (leftright_car/2)
distance_E = distance_E + (leftright_car/2)
elif (sensorPatch == "W"): #configuration 2
distance_N = distance_N + (updown_car/2)
distance_S = distance_S + (updown_car/2)
distance_W = distance_W - pushaddition
distance_E = distance_E + leftright_car
elif (sensorPatch == "E"): #configuration 2 or 4
distance_N = distance_N + (updown_car/2)
distance_S = distance_S + (updown_car/2)
distance_W = distance_W + leftright_car
distance_E = distance_E - pushaddition
#Now all distances are in reference to middle center of pushing mechanism!
#ONLY when we are in front of washer use this!
#Determine square: updown first - ROUNDING DOWN
#Range: 0-6 because ROUNDING DOWN
distance_N_square = np.ceil(distance_N/square_updown)
distance_S_square = np.ceil(distance_S/square_updown)
#Determine square: leftright second - ROUNDING DOWN
#Range: 0-6 because ROUNDING DOWN
distance_W_square = np.ceil(distance_W/square_leftright)
distance_E_square = np.ceil(distance_E/square_leftright)
#Ranges of squares: updown first
if ((distance_N_square == 6) and (distance_S_square == 0)):
letter = 'A'
elif ((distance_N_square == 5) and (distance_S_square == 1)):
letter = 'B'
elif ((distance_N_square == 4) and (distance_S_square == 2)):
letter = 'C'
elif ((distance_N_square == 3) and (distance_S_square == 3)):
letter = 'D'
elif ((distance_N_square == 2) and (distance_S_square == 4)):
letter = 'E'
elif ((distance_N_square == 1) and (distance_S_square == 5)):
letter = 'F'
elif ((distance_N_square == 0) and (distance_S_square == 6)):
letter = 'G'
#Ranges of squares: updown first
if ((distance_W_square == 0) and (distance_E_square == 6)):
digit = '1'
elif ((distance_W_square == 1) and (distance_E_square == 5)):
digit = '2'
elif ((distance_W_square == 2) and (distance_E_square == 4)):
digit = '3'
elif ((distance_W_square == 3) and (distance_E_square == 3)):
digit = '4'
elif ((distance_W_square == 4) and (distance_E_square == 2)):
digit = '5'
elif ((distance_W_square == 5) and (distance_E_square == 1)):
digit = '6'
elif ((distance_W_square == 6) and (distance_E_square == 0)):
digit = '7'
#Now combine letter and digit
location = letter + digit
return location #to determineLoc
|
|
import numpy as np
import Domains
import argparse
from multiprocessing import Pool
import subprocess, random, os
# Modify the following lines according to your NUPACK installation:
nupack_path = os.environ['HOME'] + '/nupack3.2.2/build/bin'
nupack_env = {'NUPACKHOME' : os.environ['HOME'] + '/nupack3.2.2'}
subopt_gap = 0.99
# Change following routines for other environments:
L_init = 5 # Initiation unit
dL = 5 # elongation unit (also means CG unit)
MULTI_PROCESS = 192
def nupack_mfe(sequence, T=37):
# Use NUPACK to calculate the minimum-free-energy secondary structure of a sequence
# NOTE: Returns a secondary structure in the (((.))) notation
seq = sequence
rint = int(random.random() * 1.e9)
tmp = nupack_path + '/tmp/%d' % rint
with open(tmp + '.in', 'w') as f:
f.write("%s\n" % seq)
subprocess.call([nupack_path + '/mfe', '-T', str(T), tmp]) #, env=nupack_env) #Using system path env
with open(tmp + '.mfe', 'r') as f:
flag = False
for line in f:
if len(line) > 1 and all(c in '(.)' for c in line.strip()):
ss = line.strip()
os.remove(tmp + '.in')
os.remove(tmp + '.mfe')
return ss
def nupack_subopt(sequence, T=37, gap=subopt_gap):
# Use NUPACK to calculate the minimum-free-energy secondary structure of a sequence
# NOTE: Returns a secondary structure in the (((.))) notation
seq = sequence
# print(seq)
rint = int(random.random() * 1.e9)
tmp = nupack_path + '/tmp/%d' % rint
with open(tmp + '.in', 'w') as f:
f.write("%s\n" % seq)
f.write("%f\n" % gap)
subprocess.call([nupack_path + '/subopt', '-T', str(T), tmp]) # , env=nupack_env) Using system path env
sss = []
with open(tmp + '.subopt', 'r') as f:
flag = False
for line in f:
if len(line) > 1 and all(c in '(.)' for c in line.strip()):
ss = line.strip()
sss.append(ss)
os.remove(tmp + '.in')
os.remove(tmp + '.subopt')
return sss
def save_foldon(l_bound, r_bound, ss, f):
f.write(f'{l_bound} {r_bound} {ss} \n')
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('sequence', type=str, help="RNA sequence prefix (one line input)")
parser.add_argument('--path', type=str, default="foldons.dat", help="path to store foldons")
parser.add_argument('--mfe-only', action='store_true', help="Use mfe structures only")
clargs = parser.parse_args()
with open(clargs.sequence + '.in', 'r') as sequence_file:
full_sequence = sequence_file.readline().rstrip('\n')
# NOTE: Initiation [create active population]
all_foldons = Domains.FoldonCollection()
sequence_length = len(full_sequence)
sequence_length = 300
foldons = open(clargs.path, 'w+')
if clargs.mfe_only:
for current_length in range(L_init, sequence_length+dL, dL):
l_bounds = np.arange(0, current_length, dL)
multi_pool = Pool()
new_foldons_ss = list(multi_pool.map(nupack_mfe, [full_sequence[l_bound:current_length] for l_bound in l_bounds]))
multi_pool.close()
multi_pool.join()
for i in range(len(l_bounds)):
save_foldon(l_bounds[i], current_length, new_foldons_ss[i], foldons)
foldons.flush()
else:
for current_length in range(L_init, sequence_length, dL):
l_bounds = np.arange(0, current_length, dL)
multi_pool = Pool()
new_foldons_sss = list(multi_pool.map(nupack_subopt, [full_sequence[l_bound:current_length] for l_bound in l_bounds]))
multi_pool.close()
multi_pool.join()
for i in range(len(l_bounds)):
for ss in new_foldons_sss[i]:
save_foldon(l_bounds[i], current_length, ss, foldons)
foldons.flush()
current_length = sequence_length
l_bounds = np.arange(0, current_length, dL)
multi_pool = Pool()
new_foldons_sss = list(multi_pool.map(nupack_subopt, [full_sequence[l_bound:current_length] for l_bound in l_bounds]))
multi_pool.close()
multi_pool.join()
for i in range(len(l_bounds)):
for ss in new_foldons_sss[i]:
save_foldon(l_bounds[i], current_length, ss, foldons)
foldons.flush()
foldons.close()
exit()
|
|
"""
Apply PCA first, and then zscoring to spatiotemporal covariates
"""
import argparse
import sys
import os
os.chdir(os.path.join(".."))
sys.path.insert(0, 'SSF_mip/')
import numpy as np
import pandas as pd
import preprocess
import cfg_target_test as cfg
parser = argparse.ArgumentParser()
parser.add_argument('--var', type=str, help='var name')
parser.add_argument('--location', type=str, help='var location')
args = parser.parse_args()
var_location = args.location
var_name = args.var
# idx = pd.IndexSlice
# date_id = 'start_date'
# Load data from path
rootpath = cfg.rootpath_data
path_to_save = cfg.savepath_data
if var_location == 'global':
filename = 'covariates_global.h5'
elif var_location == 'us':
filename = 'covariates_us.h5'
elif var_location == 'pacific':
filename = 'covariates_pacific.h5'
elif var_location == 'atlantic':
filename = 'covariates_atlantic.h5'
else:
raise ValueError("No such covariate!")
train_start = pd.Timestamp(cfg.train_start_date)
train_end = pd.Timestamp(cfg.train_end_date)
test_start = pd.Timestamp(cfg.test_start_date)
test_end = pd.Timestamp(cfg.end_date)
n_components = cfg.num_pcs
data = pd.read_hdf(rootpath + filename)
preprocess.get_pca_from_covariate(path_to_save,
data,
var_name, var_location,
train_start, train_end,
test_start, test_end,
n_components=n_components)
pca_filename = '{}_{}_pca_all.h5'.format(var_location, var_name)
pca_data = pd.read_hdf(path_to_save + pca_filename)
for i in range(n_components):
var = '{}_{}_pca_{}'.format(var_location, var_name, i)
preprocess.zscore_temporal(path_to_save,
pca_data, var,
train_start, train_end,
test_start, test_end)
|
|
import enum
from typing import List, Tuple
from numpy import dstack, dtype, iinfo
import numpy
from pandas.core.frame import DataFrame
import zarr
import pandas
import pkg_resources
DATE_COLUMNS = ['53-0.0', '53-1.0', '53-2.0']
def load_dtype_dictionary():
path = pkg_resources.resource_filename('ukb_loader', 'all_dtypes.csv')
dtype_frame = pandas.read_csv(path)
dtype_dict = {}
convert_dict = {
'string': pandas.StringDtype(),
'Int64': pandas.Int64Dtype(),
'Float64': pandas.Float64Dtype()
}
for col, d in zip(dtype_frame.column, dtype_frame.inferred_dtype):
dtype_dict[col] = convert_dict[d]
return dtype_dict
def get_all_columns(dataset_path: str) -> List[str]:
with open(dataset_path, 'r') as d:
all_columns = [c.strip().strip('"') for c in d.readline().split(',')]
return all_columns
def get_bad_date_columns(columns, date_fields_path: str = 'date_fields.csv') -> List[str]:
dc = set(pandas.read_csv(date_fields_path).field_id)
bad = []
for c in columns[1:]: # without eid
if int(c.split('-')[0]) in dc:
bad.append(c)
return bad
def find_indices(source, to_find):
indices = []
found = []
for fi, f in enumerate(to_find):
for i, s in enumerate(source):
if f == s:
indices.append(i)
found.append(f)
break
return indices, found
class IndexData:
def __init__(self, date: List[int], date_found: List[str],
str_col: List[int], str_found: List[str],
float_col: List[int], float_found: List[str]) -> None:
self.date = date
self.str_col = str_col
self.float_col = float_col
self.date_found = date_found
self.str_found = str_found
self.float_found = float_found
class Converter:
def __init__(self, datasets: List[str], zarr_path: str, rows_count: int = None, batch_size: int = 1024, columns: List[str] = None, verbose: bool = False) -> None:
self.datasets = datasets
self.zarr_path = zarr_path
self.rows_count = rows_count
self.columns = columns
self.batch_size = batch_size
self.str_batch_size = batch_size // 8
self.verbose = verbose
if not isinstance(datasets, List) or len(datasets) == 0:
raise ValueError(f'Datasets should be a list of full paths to them, not {datasets}')
if columns is not None and 'eid' in columns:
raise ValueError('Please remove eid from columns, it will be read anyway')
if columns is not None:
for d in DATE_COLUMNS:
if d in columns:
raise ValueError(f'Please remove {d} column, date assessments will be loaded anyway')
self.all_columns = {path: get_all_columns(path) for path in self.datasets}
self.eid_index = 0
self.dtype_dict = load_dtype_dictionary()
self.index_dict = self._create_index_dict(self.datasets, self.all_columns, self.columns, self.dtype_dict)
self.common_eids, self.masks_indices = self._create_eid_data(datasets)
if self.rows_count is None:
self.rows_count = len(self.common_eids)
def _get_ac_str_columns(self, requested_columns, columns, all_str_columns, index_dict):
old_ac_set = set()
old_str_set = set()
for index_data in index_dict.values():
old_ac_set |= set(index_data.float_found)
old_str_set |= set(index_data.str_found)
if requested_columns is None:
ac_columns = [d for d in columns if d not in DATE_COLUMNS and d not in all_str_columns and d not in old_ac_set]
str_columns = [d for d in columns if d in all_str_columns and d not in old_str_set]
else:
ac_columns = [d for d in columns if d not in DATE_COLUMNS and d in requested_columns and d not in all_str_columns and d not in old_ac_set]
str_columns = [d for d in columns if d in requested_columns and d in all_str_columns and d not in old_str_set]
return ac_columns, str_columns
def _create_index_dict(self, datasets, all_columns, requested_columns, dtype_dict):
index_dict = {}
for path in datasets:
ac = all_columns[path]
# all_str_columns = get_bad_columns(ac, DATE_FIELDS_PATH, BAD_COLS_PATH)
all_str_columns = set()
for col, dtype in dtype_dict.items():
if dtype == object or dtype == pandas.StringDtype():
if col not in DATE_COLUMNS and col in ac:
all_str_columns.add(col)
date_indices, date_found = find_indices(ac, DATE_COLUMNS)
# good float columns from path
ac_columns, str_columns = self._get_ac_str_columns(requested_columns, ac, all_str_columns, index_dict)
float_col, found_float_col = find_indices(ac, ac_columns)
str_col, found_str_col = find_indices(ac, str_columns)
index_data = IndexData(date_indices, date_found, str_col, found_str_col, float_col, found_float_col)
index_dict[path] = index_data
return index_dict
def _create_eid_data(self, datasets):
eid_dict = {}
common_eids = None
for path in datasets:
if self.verbose:
print(f'starting reading eid data from {path}')
eid = pandas.read_csv(path, usecols=[0], nrows=self.rows_count).iloc[:, 0].values
if common_eids is None:
common_eids = set(eid)
else:
common_eids |= set(eid)
eid_dict[path] = eid
if self.verbose:
print(f'ended reading eid data from {path}')
masks_indices = {}
ce_list = sorted(list(common_eids))
for path, eid in eid_dict.items():
eid_set = set(eid)
mask = numpy.array([True if e in eid_set else False for e in ce_list])
mask_indices = numpy.arange(len(ce_list))[mask]
masks_indices[path] = mask_indices
if self.verbose:
print(f'mask analysis for {path}')
print(f'min: {min(mask_indices)}, max: {max(mask_indices)}, sum: {mask.sum()}')
if self.verbose:
print(f'created masks_indices and eid_dict')
return ce_list, masks_indices
def _read_dates_eid(self, source_path: str, date_indices, dates: zarr.Array, eid_array: zarr.Array, eid_mask_indices):
for i, chunk in enumerate(
pandas.read_csv(
source_path,
usecols=[self.eid_index] + date_indices,
chunksize=self.batch_size,
nrows=self.rows_count,
encoding='cp1252',
parse_dates=list(range(1, len(date_indices) + 1)))
):
start, end = i*self.batch_size, i*self.batch_size + chunk.shape[0]
chunk_indices = eid_mask_indices[start: end]
dates.set_orthogonal_selection((chunk_indices, slice(None)), chunk.iloc[:, 1:].values)
eid_chunk = chunk.iloc[:, 0].values.reshape(-1, 1)
eid_array.set_orthogonal_selection((chunk_indices, slice(None)), eid_chunk)
def _read_float(self, source_path: str, indices, dataset: zarr.Array, left, col_array, cols, eid_mask_indices):
for j, fc in enumerate(
pandas.read_csv(
source_path,
usecols=indices,
chunksize=self.batch_size,
nrows=self.rows_count,
encoding='cp1252',
low_memory=False)
):
start, end = j*self.batch_size, j*self.batch_size + fc.shape[0]
right = left + len(indices)
chunk_indices = eid_mask_indices[start: end]
dataset.set_orthogonal_selection((chunk_indices, slice(left, right)), fc.apply(pandas.to_numeric, errors='coerce').values)
# dataset[chunk_indices, left:right] = fc.apply(pandas.to_numeric, errors='coerce').values
col_array[left:right] = cols
if self.verbose:
print(f'converted float batch number {j} with {len(cols)} columns')
def _read_str(self, source_path: str, indices, str_dataset: zarr.Array, left, col_array, cols, eid_mask_indices):
for j, fc in enumerate(
pandas.read_csv(
source_path,
usecols=indices,
chunksize=self.str_batch_size,
nrows=self.rows_count,
encoding='cp1252',
low_memory=False)
):
start, end = j*self.str_batch_size, j*self.str_batch_size + fc.shape[0]
fc.fillna('', inplace=True)
right = left + len(indices)
chunk_indices = eid_mask_indices[start: end]
str_dataset.set_orthogonal_selection((chunk_indices, slice(left, right)), fc.astype(str).values)
# str_dataset[chunk_indices, left:right] = fc.astype(str).values
col_array[left:right] = cols
if self.verbose:
print(f'converted str batch number {j} with {len(cols)} columns')
def _calculate_str_float_array_len(self):
return sum([len(indices.str_col) for indices in self.index_dict.values()]),\
sum([len(indices.float_col) for indices in self.index_dict.values()])
def convert(self):
with zarr.open_group(self.zarr_path, mode='w') as group:
str_len, float_len = self._calculate_str_float_array_len()
eid_array = group.zeros('eid', mode='w', shape=(self.rows_count, 1), dtype='i4')
array = group.zeros('dataset', mode='w', shape=(self.rows_count, float_len), chunks=(self.batch_size, float_len), dtype='f4')
col_array = group.create('columns', shape=(float_len, ), dtype='U16')
dates_array = group.create('dates', mode='w', shape=(self.rows_count, len(DATE_COLUMNS)), dtype='M8[D]')
str_array = group.create('str_dataset', mode='w', shape=(self.rows_count, str_len), dtype='U16', chunks=(self.batch_size, str_len))
str_col_array = group.create('str_columns', mode='w', shape=(str_len, ), dtype='U16')
str_left, float_left = 0, 0
for path, indices in self.index_dict.items():
if self.verbose:
print()
print(f'Starting to convert dataset {path}')
eid_mask_indices = self.masks_indices[path]
if len(indices.date) > 0:
self._read_dates_eid(path, indices.date, dates_array, eid_array, eid_mask_indices)
if len(indices.str_col) > 0:
self._read_str(path, indices.str_col, str_array, str_left, str_col_array, indices.str_found, eid_mask_indices)
str_left += len(indices.str_col)
if len(indices.float_col) > 0:
self._read_float(path, indices.float_col, array, float_left, col_array, indices.float_found, eid_mask_indices)
float_left += len(indices.float_col)
|
|
import time
import numpy as np
from dqo.datasets import QueriesDataset
from dqo.relational import SQLParser
from dqo.relational.tree.parser import parse_ast
from dqo.relational.query.parser import parse_tree
from tqdm.auto import tqdm
from multiprocessing import Pool
if __name__ == '__main__':
ds = QueriesDataset('imdb:small_uniform')
df = ds.load()
p = Pool(16)
start = time.time()
asts = [ast.root for ast in tqdm(p.imap(SQLParser.to_ast, df['query']), total=len(df))]
took = time.time() - start
print(took / len(df))
assert len(df) == len(asts)
start = time.time()
trees = list(tqdm(p.imap(parse_ast, asts), total=len(asts)))
took = time.time() - start
print(took / len(df))
assert len(df) == len(trees)
start = time.time()
queries = list(tqdm(p.imap(parse_tree, trees), total=len(trees)))
took = time.time() - start
print(took / len(df))
assert len(df) == len(queries)
|
|
import sys
sys.path.append("..")
from geneticAlgorithm.solver import *
import numpy as np
chromosome = solve(np.mat([1, 1, 1, 1, 0]).tolist()[0], 5, binCapacity=2)
position = chrom2position(chromosome)
print(position)
|
|
# TODO: introduce better object naming system for logging
# e.g. <Sound object at 0x7fa535d23870> -> Sound X
import logging
import time
from numpy import exp
from typing import List
from .braillify import H_STEP, V_STEP
from .common import Vec2
from .events import AnimationEndedEvent, CollisionEvent, Event, CollisionTypes, PlayerShootEvent
from .graphics import Canvas, Camera
from .objects import Bullet, Enemy, ExplosionFactory, Player, Goal, \
ObjectManager, BulletFactory, Block, Explosion
from .physics import CollisionManager
from .sounds import SoundManager
from .textures import TextureManager
logger = logging.getLogger(__name__)
def terminal_size(stdscr) -> Vec2:
"""
Get terminal size in characters and multiply by V_STEP and H_STEP
"""
rows, cols = stdscr.getmaxyx()
return Vec2((cols - 1) * H_STEP, rows * V_STEP)
class Clock:
def __init__(self) -> None:
self.time = 0
def start(self) -> None:
self.time = time.time()
def get_elapsed(self) -> float:
now = time.time()
elapsed = now - self.time
self.time = now
return elapsed
class Profiler:
PROFILE_CAPTION = ['clear_canvas', 'update_objects', 'check_collisions',
'resolve_events', 'update_camera', 'update_canvas']
def __init__(self) -> None:
self.clock = Clock()
self.profile_times = []
def start(self):
self.clock.start()
def tick(self):
self.profile_times.append(self.clock.get_elapsed())
def dump(self):
result = ', '.join([f'{c}={dt:.5f}s'
for c, dt in zip(self.PROFILE_CAPTION, self.profile_times)])
self.profile_times = []
return result
class Game:
def __init__(self, window, config) -> None:
self.window = window
self.clock = Clock()
self.debug = config['debug']
if self.debug:
self.profiler = Profiler()
# Set up sounds
SoundManager.init(config['sounds'])
# Set up canvas
canvas_config = config['canvas']
size = list(terminal_size(window))
canvas_config['size'][1] = size[1]
canvas_config['window_size'] = size
self.canvas = Canvas(window, canvas_config)
# Set full height for goal texture
texture_config = config['textures']
assert 'goal' in texture_config
texture_config['goal']['height'] = size[1]
logger.info(texture_config)
# Load textures
TextureManager.init(texture_config)
# Set up camera
camera_mode = config['camera']['mode']
if camera_mode != 'follow':
raise ValueError("Unsupported camera mode")
camera_speed = config['objects']['player']['speed']
self.camera = Camera(x=0, mode=camera_mode, speed=camera_speed)
# Set up in-game objects
object_config = config['objects']
object_config['goal']['start_pos'][1] = size[1] // 2
self.object_manager = ObjectManager()
self.bullet_factory = BulletFactory(object_config['bullet'])
self.explosion_factory = ExplosionFactory(object_config['explosion'])
self.object_manager.add_object(Player(**object_config['player'],
ymax=self.canvas.height))
self.object_manager.add_object(Goal(object_config['goal']))
for block in object_config['block']:
self.object_manager.add_object(Block(block))
for enemy in object_config['enemy']:
self.object_manager.add_object(Enemy(enemy))
# Set up collision physics
self.collision_manager = CollisionManager(self.object_manager, [
(Bullet.kind, Block.kind),
(Bullet.kind, Enemy.kind),
(Player.kind, Block.kind),
(Player.kind, Enemy.kind),
(Player.kind, Goal.kind)
])
self.is_running = True
self.result = None
def process_input(self, key: int) -> List[Event]:
events = self.object_manager.process_input(key)
if key == ord('q'):
self.is_running = False
return events
def run(self):
self.clock.start()
if self.debug:
self.profiler.start()
while self.is_running:
delta = self.clock.get_elapsed()
input_events = self.process_input(self.window.getch())
self.update(delta, input_events)
if self.debug:
debug_str = 'FPS: {:.2f}'.format(1 / delta)
self.window.addstr(0, 0, debug_str)
self.window.refresh()
return self.result
def update(self, delta: float, input_events: List[Event]) -> None:
self.canvas.clear()
if self.debug:
self.profiler.tick()
events = self.object_manager.update(self.canvas, delta)
if self.debug:
self.profiler.tick()
collisions = list(self.collision_manager.update())
if self.debug:
self.profiler.tick()
self.resolve(input_events + events + collisions)
if self.debug:
self.profiler.tick()
self.camera.update(delta)
if self.debug:
self.profiler.tick()
self.canvas.update(self.camera)
if self.debug:
self.profiler.tick()
logger.info(self.profiler.dump())
def resolve(self, events: List[Event]):
for e in events:
logger.info(f'Resolving event {e}')
if isinstance(e, CollisionEvent):
self.resolve_collision(e)
elif isinstance(e, AnimationEndedEvent):
self.resolve_animation_end(e)
elif isinstance(e, PlayerShootEvent):
self.resolve_player_shoot(e)
else:
raise TypeError("Unsupported event type in resolve")
def resolve_animation_end(self, e: AnimationEndedEvent):
obj = e.sender
logger.info(f'Resolving animation end event from {type(obj)}')
if isinstance(obj, Explosion):
self.object_manager.remove_object(obj)
else:
raise TypeError("Unexpected object type in resolve_animation_end")
def resolve_collision(self, c: CollisionEvent):
obj1, obj2 = c.collider, c.collided
typ = (obj1.kind, obj2.kind)
# Spawn an explosion at the center of the collision overlap
# TODO: spawns a little higher then one would expect
explosion = self.explosion_factory.create(c.pos)
if typ == CollisionTypes.BULLET_BLOCK.value:
# when bullet hits block, remove bullet
self.object_manager.remove_object(obj1)
self.object_manager.add_object(explosion)
SoundManager.play('block_hit')
elif typ == CollisionTypes.BULLET_ENEMY.value:
# when bullet hits enemy, remove both enemy and bullet
self.object_manager.remove_object(obj1)
self.object_manager.remove_object(obj2)
self.object_manager.add_object(explosion)
SoundManager.play('player_hit')
elif typ == CollisionTypes.PLAYER_ENEMY.value or typ == CollisionTypes.PLAYER_BLOCK.value:
# when player hits block and enemy, remove player and end the game as a loss
self.object_manager.remove_object(obj1)
self.object_manager.add_object(explosion)
self.result = False
self.is_running = False
SoundManager.play('player_hit')
elif typ == CollisionTypes.PLAYER_GOAL.value:
# when player hits goal, remove player and end the game as a win
self.object_manager.remove_object(obj1)
self.result = True
self.is_running = False
# TODO: add winning sound
# TODO: add a short delay after game ends to finish the animations and sound
elif typ == CollisionTypes.BULLET_ENEMY.value:
# when bullet hits enemy, remove both enemy and bullet
self.object_manager.remove_object(obj1)
self.object_manager.remove_object(obj2)
SoundManager.play('spider_hit')
else:
raise ValueError(f"bad collision type: {typ}")
def resolve_player_shoot(self, e):
bullet = self.bullet_factory.create(e.sender.bullet_spawn_pos())
self.object_manager.add_object(bullet)
SoundManager.play('shoot')
|
|
from __future__ import absolute_import, division
import numpy as np
from ._base import PreprocessingBase
class NormalizeRmZeroStd(PreprocessingBase):
"""
Not exact a PCA, only remove features with std 0.
"""
def fit(self, X):
self.means = np.empty((0,))
self.stds = np.empty((0,))
transformed = np.empty(X.shape)
transformed_i = 0
for c in xrange(X.shape[1]):
col = X[:,c]
mean_col = np.mean(col)
std_col = np.std(col)
self.means = np.append(self.means, mean_col)
self.stds = np.append(self.stds, std_col)
if std_col != 0.0:
col -= mean_col
col /= std_col
transformed[:, transformed_i] = col
transformed_i += 1
self.num_features = transformed_i
return transformed[:,:transformed_i]
def transform(self, X):
if len(X) == 0:
# TODO: handle edge case nicely.
return np.zeros((len(X), np.count_nonzero(self.stds)))
transformed = np.empty(X.shape)
transformed_i = 0
non_zero_counter = 0
for c in xrange(X.shape[1]):
col = X[:,c]
mean_col = self.means[c]
std_col = self.stds[c]
x_std_col = np.std(col)
if x_std_col != 0 and std_col == 0:
non_zero_counter += 1
if std_col != 0.0:
col -= mean_col
col /= std_col
transformed[:, transformed_i] = col
transformed_i += 1
if non_zero_counter != 0:
print '{0} features with non zero std ' \
'are removed during this transform.'.format(non_zero_counter)
assert transformed_i == np.count_nonzero(self.stds)
return transformed[:,:transformed_i]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.