repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/RecursiveConstraintsSpec.scala
|
<filename>scalactic-test/src/test/scala/org/scalactic/RecursiveConstraintsSpec.scala
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import java.text._
import org.scalatest._
import java.util.Date
import scala.collection.mutable
//
// Going to need to deal with Array more specially at the nested level. Would need to take the Array
// Equality for the nested one. I think I could do this in general: have special implicits when the
// contained type is Array, for any and all containers. I think that would fix List[Array[T]] too.
// Nope, not now that it is recursive, but the TODO is to write tests for that.
//
class RecursiveConstraintsSpec extends Spec with Matchers with CheckedEquality {
object `Recursive constraints should enable equality comparisons` {
def `on Seqs and Arrays` {
List(1, 2, 3) shouldEqual Vector(1L, 2L, 3L)
List(1, 2, 3) shouldEqual List(1L, 2L, 3L)
Vector(1, 2, 3) shouldEqual List(1L, 2L, 3L)
List(1, 2, 3) shouldEqual Array(1L, 2L, 3L)
Array(1, 2, 3) shouldEqual Array(1L, 2L, 3L)
Array(1, 2, 3) shouldEqual List(1L, 2L, 3L)
}
def `on nested Seqs` {
Vector(List(1, 2, 3)) shouldEqual List(Vector(1L, 2L, 3L))
List(List(1, 2, 3)) shouldEqual List(List(1L, 2L, 3L))
List(Vector(1, 2, 3)) shouldEqual Vector(List(1L, 2L, 3L))
}
def `on Sets` {
Set(1, 2, 3) shouldEqual mutable.HashSet(1L, 2L, 3L)
Set(1, 2, 3) shouldEqual Set(1L, 2L, 3L)
mutable.HashSet(1, 2, 3) shouldEqual Set(1L, 2L, 3L)
}
def `on nested Sets` {
mutable.HashSet(Set(1, 2, 3)) shouldEqual Set(mutable.HashSet(1L, 2L, 3L))
Set(Set(1, 2, 3)) shouldEqual Set(Set(1L, 2L, 3L))
Set(mutable.HashSet(1, 2, 3)) shouldEqual mutable.HashSet(Set(1L, 2L, 3L))
}
def `on Maps` {
Map("1" -> 1, "2" -> 2, "3" -> 3) shouldEqual mutable.HashMap("1" -> 1L, "2" -> 2L, "3" -> 3L)
Map("1" -> 1, "2" -> 2, "3" -> 3) shouldEqual Map("1" -> 1L, "2" -> 2L, "3" -> 3L)
mutable.HashMap("1" -> 1, "2" -> 2, "3" -> 3) shouldEqual Map("1" -> 1L, "2" -> 2L, "3" -> 3L)
}
def `on nested Maps` {
mutable.HashMap(0 -> Map("1" -> 1, "2" -> 2, "3" -> 3)) shouldEqual Map(0 -> mutable.HashMap("1" -> 1L, "2" -> 2L, "3" -> 3L))
Map(0 -> Map("1" -> 1, "2" -> 2, "3" -> 3)) shouldEqual Map(0 -> Map("1" -> 1L, "2" -> 2L, "3" -> 3L))
Map(0 -> mutable.HashMap("1" -> 1, "2" -> 2, "3" -> 3)) shouldEqual mutable.HashMap(0 -> Map("1" -> 1L, "2" -> 2L, "3" -> 3L))
}
def `on Every` {
One(1) shouldEqual One(1L)
Many(1, 2) shouldEqual Many(1L, 2L)
Every(1) shouldEqual One(1L)
Every(1, 2) shouldEqual Many(1L, 2L)
One(1) shouldEqual Every(1L)
Many(1, 2) shouldEqual Every(1L, 2L)
Every(1) shouldEqual Every(1L)
Every(1, 2) shouldEqual Every(1L, 2L)
// But if a One and Many are compared, that can never be equal, so it should not be allowed
"""One(1) === Many(1, 2)""" shouldNot typeCheck
"""Many(1) === One(1, 2)""" shouldNot typeCheck
}
def `on nested Every` {
List(One(1)) shouldEqual Vector(One(1L))
List(Many(1, 2)) shouldEqual Vector(Many(1L, 2L))
List(Every(1)) shouldEqual Vector(One(1L))
List(Every(1, 2)) shouldEqual Vector(Many(1L, 2L))
List(One(1)) shouldEqual Vector(Every(1L))
List(Many(1, 2)) shouldEqual Vector(Every(1L, 2L))
List(Every(1)) shouldEqual Vector(Every(1L))
List(Every(1, 2)) shouldEqual Vector(Every(1L, 2L))
// But if a One and Many are compared, that can never be equal, so it should not be allowed
"""List(One(1)) === Vector(Many(1, 2))""" shouldNot typeCheck
"""List(Many(1)) === Vector(One(1, 2))""" shouldNot typeCheck
}
def `on Or` {
// Both sides Good
(Good(1): Good[Int]) shouldEqual (Good(1L): Good[Long])
(Good(1): Good[Int]) shouldEqual (Good(1): Good[Int])
(Good(1L): Good[Long]) shouldEqual (Good(1): Good[Int])
(Good(1): Good[Int]) shouldEqual (Good(1): Good[Int])
// Given both sides are Good, it shouldn't matter if the Bad type has no constraint
(Good(1): Good[Int]) shouldEqual (Good(1): Good[Int])
(Good(1L): Good[Long]) shouldEqual (Good(1): Good[Int])
// But if both sides are Good but without a constraint, it should not compile, even
// if the Bad type has a constraint.
"""(Good(1L): Good[Long]) shouldEqual (Good("one"): Good[String])""" shouldNot typeCheck
// Left side Good, right side Or
(Good(1): Good[Int]) shouldEqual (Good(1L): Long Or Int)
(Good(1): Good[Int]) shouldEqual (Good(1): Int Or Long)
(Good(1L): Good[Long]) shouldEqual (Good(1): Int Or Int)
(Good(1): Good[Int]) shouldEqual (Good(1): Int Or Int)
// Given left side is Good, it shouldn't matter if the Bad type has no constraint
(Good(1): Good[Int]) shouldEqual (Good(1): Int Or String)
(Good(1L): Good[Long]) shouldEqual (Good(1): Int Or String)
// But if left side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""(Good(1L): Good[Long]) shouldEqual (Good("one"): Good[String])""" shouldNot typeCheck
// Right side Good, left side Or
(Good(1): Int Or Int) shouldEqual (Good(1L): Good[Long])
(Good(1): Int Or Int) shouldEqual (Good(1): Good[Int])
(Good(1L): Long Or Int) shouldEqual (Good(1): Good[Int])
(Good(1): Int Or Long) shouldEqual (Good(1): Good[Int])
// Given right side is Good, it shouldn't matter if the Bad type has no constraint
(Good(1): Int Or Long) shouldEqual (Good(1): Good[Int])
(Good(1L): Long Or Int) shouldEqual (Good(1): Good[Int])
// But if right side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""(Good(1L): Long Or Int) shouldEqual (Good("one"): Good[String])""" shouldNot typeCheck
// Both sides Bad
(Bad(1): Bad[Int]) shouldEqual (Bad(1): Bad[Int])
(Bad(1): Bad[Int]) shouldEqual (Bad(1L): Bad[Long])
(Bad(1): Bad[Int]) shouldEqual (Bad(1): Bad[Int])
(Bad(1L): Bad[Long]) shouldEqual (Bad(1): Bad[Int])
// Given both sides are Bad, it shouldn't matter if the Good type has no constraint
(Bad(1): Bad[Int]) shouldEqual (Bad(1): Bad[Int])
(Bad(1L): Bad[Long]) shouldEqual (Bad(1): Bad[Int])
// But if both sides are Bad but without a constraint, it should not compile, even
// if the Good type has a constraint.
"""(Bad(1L): Bad[Long]) shouldEqual (Bad("one"): Bad[String])""" shouldNot typeCheck
// Left side Bad, right side Or
(Bad(1): Bad[Int]) shouldEqual (Bad(1): Long Or Int)
(Bad(1): Bad[Int]) shouldEqual (Bad(1L): Int Or Long)
(Bad(1): Bad[Int]) shouldEqual (Bad(1): Int Or Int)
(Bad(1L): Bad[Long]) shouldEqual (Bad(1): Int Or Int)
// Given left side is Bad, it shouldn't matter if the Good type has no constraint
(Bad(1): Bad[Int]) shouldEqual (Bad(1): String Or Int)
(Bad(1L): Bad[Long]) shouldEqual (Bad(1): String Or Int)
// But if left side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(Bad(1L): Bad[Long]) shouldEqual (Bad("one"): Int Or String)""" shouldNot typeCheck
// Right side Bad, left side Or
(Bad(1): Int Or Int) shouldEqual (Bad(1): Bad[Int])
(Bad(1): Int Or Int) shouldEqual (Bad(1L): Bad[Long])
(Bad(1): Long Or Int) shouldEqual (Bad(1): Bad[Int])
(Bad(1L): Int Or Long) shouldEqual (Bad(1): Bad[Int])
// Given right side is Bad, it shouldn't matter if the Good type has no constraint
(Bad(1): Long Or Int) shouldEqual (Bad(1): Bad[Int])
(Bad(1L): Int Or Long) shouldEqual (Bad(1): Bad[Int])
// But if right side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(Bad(1L): Int Or Long) shouldEqual (Bad("one"): Bad[String])""" shouldNot typeCheck
// Both sides Or
(Good(1): Int Or Int) shouldEqual (Good(1L): Long Or Int)
(Good(1): Int Or Int) shouldEqual (Good(1): Int Or Long)
(Good(1L): Long Or Int) shouldEqual (Good(1): Int Or Int)
(Good(1): Int Or Long) shouldEqual (Good(1): Int Or Int)
(Bad(1): Int Or Int) shouldEqual (Bad(1): Long Or Int)
(Bad(1): Int Or Int) shouldEqual (Bad(1L): Int Or Long)
(Bad(1): Long Or Int) shouldEqual (Bad(1): Int Or Int)
(Bad(1L): Int Or Long) shouldEqual (Bad(1): Int Or Int)
// So long as an equality constraint exists for one the Good or Bad side of type Or,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Good[Int].orBad[String] can equal a Good[Int].orBad[java.util.Date]
//
// scala> Good(1).orBad[String] == Good(1L).orBad[java.util.Date]
// res0: Boolean = true
//
// Similarly, a Good[Int].orBad[String] can equal a Good[java.util.Date].orBad[String]
// scala> Good[Int].orBad("hi") == Good[java.util.Date].orBad("hi")
// res1: Boolean = true
(Good(1).orBad[String]: Int Or String) shouldEqual (Good(1L).orBad[Date]: Long Or Date)
(Good[Int].orBad("hi"): Int Or String) shouldEqual (Good[Date].orBad("hi"): Date Or String)
// The only way an equality comparison of two Ors will not be allowed to compile, therefore, is if
// no constraint exists between either the Good or Bad types:
"""(Good(1): Int Or String) shouldEqual (Good("one"): String Or Int)""" shouldNot typeCheck
// Much ado about Nothing
// Both sides Good
Good(1) shouldEqual Good(1L)
Good(1) shouldEqual Good(1)
Good(1L) shouldEqual Good(1)
Good(1) shouldEqual Good(1)
// Left side Good, right side Or
Good(1) shouldEqual Good(1L).asOr
Good(1) shouldEqual Good(1).asOr
Good(1L) shouldEqual Good(1).asOr
// Right side Good, left side Or
Good(1).asOr shouldEqual Good(1L)
Good(1).asOr shouldEqual Good(1)
Good(1L).asOr shouldEqual Good(1)
// Both sides Bad
Bad(1) shouldEqual Bad(1)
Bad(1) shouldEqual Bad(1L)
Bad(1L) shouldEqual Bad(1)
// Left side Bad, right side Or
Bad(1) shouldEqual Bad(1).asOr
Bad(1) shouldEqual Bad(1L).asOr
Bad(1L) shouldEqual Bad(1).asOr
// Right side Bad, left side Or
Bad(1).asOr shouldEqual Bad(1)
Bad(1).asOr shouldEqual Bad(1L)
Bad(1L).asOr shouldEqual Bad(1)
// Both sides Or
Good(1).asOr shouldEqual Good(1L).asOr
Good(1).asOr shouldEqual Good(1).asOr
Good(1L).asOr shouldEqual Good(1).asOr
Bad(1).asOr shouldEqual Bad(1).asOr
Bad(1).asOr shouldEqual Bad(1L).asOr
Bad(1L).asOr shouldEqual Bad(1).asOr
}
object `on Nested Or` {
def `with List (which is covariant)` {
// Both sides Good
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1L)): List[Good[Long]])
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1L)): List[Good[Long]]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Good[Int]])
// Given both sides are Good, it shouldn't matter if the Bad type has no constraint
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1L)): List[Good[Long]]) shouldEqual (List(Good(1)): List[Good[Int]])
// But if both sides are Good but without a constraint, it should not compile, even
// if the Bad type has a constraint.
"""(List(Good(1L)): List[Good[Long]]) shouldEqual (List(Good("one")): List[Good[String]])""" shouldNot typeCheck
// Left side Good, right side Or
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1L)): List[Long Or Int])
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Int Or Long])
(List(Good(1L)): List[Good[Long]]) shouldEqual (List(Good(1)): List[Int Or Int])
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Int Or Int])
// Given left side is Good, it shouldn't matter if the Bad type has no constraint
(List(Good(1)): List[Good[Int]]) shouldEqual (List(Good(1)): List[Int Or String])
(List(Good(1L)): List[Good[Long]]) shouldEqual (List(Good(1)): List[Int Or String])
// But if left side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""List((Good(1L)): List[Good[Long]]) shouldEqual (List(Good("one")): List[Good[String]])""" shouldNot typeCheck
// Right side Good, left side Or
(List(Good(1)): List[Int Or Int]) shouldEqual (List(Good(1L)): List[Good[Long]])
(List(Good(1)): List[Int Or Int]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1L)): List[Long Or Int]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1)): List[Int Or Long]) shouldEqual (List(Good(1)): List[Good[Int]])
// Given right side is Good, it shouldn't matter if the Bad type has no constraint
(List(Good(1)): List[Int Or Long]) shouldEqual (List(Good(1)): List[Good[Int]])
(List(Good(1L)): List[Long Or Int]) shouldEqual (List(Good(1)): List[Good[Int]])
// But if right side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""(List(Good(1L)): List[Long Or Int]) shouldEqual (List(Good("one")): List[Good[String]])""" shouldNot typeCheck
// Both sides Bad
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1L)): List[Bad[Long]])
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad(1)): List[Bad[Int]])
// Given both sides are Bad, it shouldn't matter if the Good type has no constraint
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad(1)): List[Bad[Int]])
// But if both sides are Bad but without a constraint, it should not compile, even
// if the Good type has a constraint.
"""(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad("one")): List[Bad[String]])""" shouldNot typeCheck
// Left side Bad, right side Or
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[Long Or Int])
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1L)): List[Int Or Long])
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[Int Or Int])
(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad(1)): List[Int Or Int])
// Given left side is Bad, it shouldn't matter if the Good type has no constraint
(List(Bad(1)): List[Bad[Int]]) shouldEqual (List(Bad(1)): List[String Or Int])
(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad(1)): List[String Or Int])
// But if left side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(List(Bad(1L)): List[Bad[Long]]) shouldEqual (List(Bad("one")): List[Int Or String])""" shouldNot typeCheck
// Right side Bad, left side Or
(List(Bad(1)): List[Int Or Int]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1)): List[Int Or Int]) shouldEqual (List(Bad(1L)): List[Bad[Long]])
(List(Bad(1)): List[Long Or Int]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1L)): List[Int Or Long]) shouldEqual (List(Bad(1)): List[Bad[Int]])
// Given right side is Bad, it shouldn't matter if the Good type has no constraint
(List(Bad(1)): List[Long Or Int]) shouldEqual (List(Bad(1)): List[Bad[Int]])
(List(Bad(1L)): List[Int Or Long]) shouldEqual (List(Bad(1)): List[Bad[Int]])
// But if right side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(List(Bad(1L)): List[Int Or Long]) shouldEqual (List(Bad("one")): List[Bad[String]])""" shouldNot typeCheck
// Both sides Or
(List(Good(1)): List[Int Or Int]) shouldEqual (List(Good(1L)): List[Long Or Int])
(List(Good(1)): List[Int Or Int]) shouldEqual (List(Good(1)): List[Int Or Long])
(List(Good(1L)): List[Long Or Int]) shouldEqual (List(Good(1)): List[Int Or Int])
(List(Good(1)): List[Int Or Long]) shouldEqual (List(Good(1)): List[Int Or Int])
(List(Bad(1)): List[Int Or Int]) shouldEqual (List(Bad(1)): List[Long Or Int])
(List(Bad(1)): List[Int Or Int]) shouldEqual (List(Bad(1L)): List[Int Or Long])
(List(Bad(1)): List[Long Or Int]) shouldEqual (List(Bad(1)): List[Int Or Int])
(List(Bad(1L)): List[Int Or Long]) shouldEqual (List(Bad(1)): List[Int Or Int])
// So long as an equality constraint exists for one the Good or Bad side of type Or,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Good[Int].orBad[String] can equal a Good[Int].orBad[java.util.Date]
//
// scala> Good(1).orBad[String] == Good(1L).orBad[java.util.Date]
// res0: Boolean = true
//
// Similarly, a Good[Int].orBad[String] can equal a Good[java.util.Date].orBad[String]
// scala> Good[Int].orBad("hi") == Good[java.util.Date].orBad("hi"])
// res1: Boolean = true
(List(Good(1).orBad[String]): List[Int Or String]) shouldEqual (List(Good(1L).orBad[Date]): List[Long Or Date])
(List(Good[Int].orBad("hi")): List[Int Or String]) shouldEqual (List(Good[Date].orBad("hi")): List[Date Or String])
// The only way an equality comparison of two Ors will not be allowed to compile, therefore, is if
// no constraint exists between either the Good or Bad types:
"""(List(Good(1)): List[Int Or String]) shouldEqual (List(Good("one")): List[String Or Int])""" shouldNot typeCheck
// Much ado about Nothing
// Both sides List[Good]
List(Good(1)) shouldEqual List(Good(1L))
List(Good(1)) shouldEqual List(Good(1))
List(Good(1L)) shouldEqual List(Good(1))
List(Good(1)) shouldEqual List(Good(1))
// Left side List[Good], right side List[Or]
List(Good(1)) shouldEqual List(Good(1L).asOr)
List(Good(1)) shouldEqual List(Good(1).asOr)
List(Good(1L)) shouldEqual List(Good(1).asOr)
// Right side List[Good], left side List[Or]
List(Good(1).asOr) shouldEqual List(Good(1L))
List(Good(1).asOr) shouldEqual List(Good(1))
List(Good(1L).asOr) shouldEqual List(Good(1))
// Both sides List[Bad]
List(Bad(1)) shouldEqual List(Bad(1))
List(Bad(1)) shouldEqual List(Bad(1L))
List(Bad(1L)) shouldEqual List(Bad(1))
// Left side List[Bad], right side List[Or]
List(Bad(1)) shouldEqual List(Bad(1).asOr)
List(Bad(1)) shouldEqual List(Bad(1L).asOr)
List(Bad(1L)) shouldEqual List(Bad(1).asOr)
// Right side List[Bad], left side List[Or]
List(Bad(1).asOr) shouldEqual List(Bad(1))
List(Bad(1).asOr) shouldEqual List(Bad(1L))
List(Bad(1L).asOr) shouldEqual List(Bad(1))
// Both sides List[Or]
List(Good(1).asOr) shouldEqual List(Good(1L).asOr)
List(Good(1).asOr) shouldEqual List(Good(1).asOr)
List(Good(1L).asOr) shouldEqual List(Good(1).asOr)
List(Bad(1).asOr) shouldEqual List(Bad(1).asOr)
List(Bad(1).asOr) shouldEqual List(Bad(1L).asOr)
List(Bad(1L).asOr) shouldEqual List(Bad(1).asOr)
}
def `with Set (which is invariant)` {
// Both sides Good
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1L)): Set[Good[Long]])
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Good[Int]])
// Given both sides are Good, it shouldn't matter if the Bad type has no constraint
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good(1)): Set[Good[Int]])
// But if both sides are Good but without a constraint, it should not compile, even
// if the Bad type has a constraint.
"""(Set(Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good("one")): Set[Good[String]])""" shouldNot typeCheck
// Left side Good, right side Or
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1L)): Set[Long Or Int])
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Int Or Long])
(Set(Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good(1)): Set[Int Or Int])
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Int Or Int])
// Given left side is Good, it shouldn't matter if the Bad type has no constraint
(Set(Good(1)): Set[Good[Int]]) shouldEqual (Set(Good(1)): Set[Int Or String])
(Set(Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good(1)): Set[Int Or String])
// But if left side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""Set((Good(1L)): Set[Good[Long]]) shouldEqual (Set(Good("one")): Set[Good[String]])""" shouldNot typeCheck
// Right side Good, left side Or
(Set(Good(1)): Set[Int Or Int]) shouldEqual (Set(Good(1L)): Set[Good[Long]])
(Set(Good(1)): Set[Int Or Int]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1L)): Set[Long Or Int]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1)): Set[Int Or Long]) shouldEqual (Set(Good(1)): Set[Good[Int]])
// Given right side is Good, it shouldn't matter if the Bad type has no constraint
(Set(Good(1)): Set[Int Or Long]) shouldEqual (Set(Good(1)): Set[Good[Int]])
(Set(Good(1L)): Set[Long Or Int]) shouldEqual (Set(Good(1)): Set[Good[Int]])
// But if right side is Good but without a constraint between left and right Good types, it should not compile, even
// if the Bad type has a constraint.
"""(Set(Good(1L)): Set[Long Or Int]) shouldEqual (Set(Good("one")): Set[Good[String]])""" shouldNot typeCheck
// Both sides Bad
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1L)): Set[Bad[Long]])
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
// Given both sides are Bad, it shouldn't matter if the Good type has no constraint
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
// But if both sides are Bad but without a constraint, it should not compile, even
// if the Good type has a constraint.
"""(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad("one")): Set[Bad[String]])""" shouldNot typeCheck
// Left side Bad, right side Or
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[Long Or Int])
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1L)): Set[Int Or Long])
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[Int Or Int])
(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad(1)): Set[Int Or Int])
// Given left side is Bad, it shouldn't matter if the Good type has no constraint
(Set(Bad(1)): Set[Bad[Int]]) shouldEqual (Set(Bad(1)): Set[String Or Int])
(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad(1)): Set[String Or Int])
// But if left side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(Set(Bad(1L)): Set[Bad[Long]]) shouldEqual (Set(Bad("one")): Set[Int Or String])""" shouldNot typeCheck
// Right side Bad, left side Or
(Set(Bad(1)): Set[Int Or Int]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1)): Set[Int Or Int]) shouldEqual (Set(Bad(1L)): Set[Bad[Long]])
(Set(Bad(1)): Set[Long Or Int]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1L)): Set[Int Or Long]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
// Given right side is Bad, it shouldn't matter if the Good type has no constraint
(Set(Bad(1)): Set[Long Or Int]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
(Set(Bad(1L)): Set[Int Or Long]) shouldEqual (Set(Bad(1)): Set[Bad[Int]])
// But if right side is Bad but without a constraint between left and right Bad types, it should not compile, even
// if the Good type has a constraint.
"""(Set(Bad(1L)): Set[Int Or Long]) shouldEqual (Set(Bad("one")): Set[Bad[String]])""" shouldNot typeCheck
// Both sides Or
(Set(Good(1)): Set[Int Or Int]) shouldEqual (Set(Good(1L)): Set[Long Or Int])
(Set(Good(1)): Set[Int Or Int]) shouldEqual (Set(Good(1)): Set[Int Or Long])
(Set(Good(1L)): Set[Long Or Int]) shouldEqual (Set(Good(1)): Set[Int Or Int])
(Set(Good(1)): Set[Int Or Long]) shouldEqual (Set(Good(1)): Set[Int Or Int])
(Set(Bad(1)): Set[Int Or Int]) shouldEqual (Set(Bad(1)): Set[Long Or Int])
(Set(Bad(1)): Set[Int Or Int]) shouldEqual (Set(Bad(1L)): Set[Int Or Long])
(Set(Bad(1)): Set[Long Or Int]) shouldEqual (Set(Bad(1)): Set[Int Or Int])
(Set(Bad(1L)): Set[Int Or Long]) shouldEqual (Set(Bad(1)): Set[Int Or Int])
// So long as an equality constraint exists for one the Good or Bad side of type Or,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Good[Int].orBad[String] can equal a Good[Int].orBad[java.util.Date]
//
// scala> Good(1).orBad[String] == Good(1L).orBad[java.util.Date]
// res0: Boolean = true
//
// Similarly, a Good[Int].orBad[String] can equal a Good[java.util.Date].orBad[String]
// scala> Good[Int].orBad("hi") == Good[java.util.Date].orBad("hi"])
// res1: Boolean = true
(Set(Good(1).orBad[String]): Set[Int Or String]) shouldEqual (Set(Good(1L).orBad[Date]): Set[Long Or Date])
(Set(Good[Int].orBad("hi")): Set[Int Or String]) shouldEqual (Set(Good[Date].orBad("hi")): Set[Date Or String])
// The only way an equality comparison of two Ors will not be allowed to compile, therefore, is if
// no constraint exists between either the Good or Bad types:
"""(Set(Good(1)): Set[Int Or String]) shouldEqual (Set(Good("one")): Set[String Or Int])""" shouldNot typeCheck
// Much ado about Nothing
// Both sides Set[Good]
Set(Good(1)) shouldEqual Set(Good(1L))
Set(Good(1)) shouldEqual Set(Good(1))
Set(Good(1L)) shouldEqual Set(Good(1))
Set(Good(1)) shouldEqual Set(Good(1))
// Left side Set[Good], right side Set[Or]
Set(Good(1)) shouldEqual Set(Good(1L).asOr)
Set(Good(1)) shouldEqual Set(Good(1).asOr)
Set(Good(1L)) shouldEqual Set(Good(1).asOr)
// Right side Set[Good], left side Set[Or]
Set(Good(1).asOr) shouldEqual Set(Good(1L))
Set(Good(1).asOr) shouldEqual Set(Good(1))
Set(Good(1L).asOr) shouldEqual Set(Good(1))
// Both sides Set[Bad]
Set(Bad(1)) shouldEqual Set(Bad(1))
Set(Bad(1)) shouldEqual Set(Bad(1L))
Set(Bad(1L)) shouldEqual Set(Bad(1))
// Left side Set[Bad], right side Set[Or]
Set(Bad(1)) shouldEqual Set(Bad(1).asOr)
Set(Bad(1)) shouldEqual Set(Bad(1L).asOr)
Set(Bad(1L)) shouldEqual Set(Bad(1).asOr)
// Right side Set[Bad], left side Set[Or]
Set(Bad(1).asOr) shouldEqual Set(Bad(1))
Set(Bad(1).asOr) shouldEqual Set(Bad(1L))
Set(Bad(1L).asOr) shouldEqual Set(Bad(1))
// Both sides Set[Or]
Set(Good(1).asOr) shouldEqual Set(Good(1L).asOr)
Set(Good(1).asOr) shouldEqual Set(Good(1).asOr)
Set(Good(1L).asOr) shouldEqual Set(Good(1).asOr)
Set(Bad(1).asOr) shouldEqual Set(Bad(1).asOr)
Set(Bad(1).asOr) shouldEqual Set(Bad(1L).asOr)
Set(Bad(1L).asOr) shouldEqual Set(Bad(1).asOr)
}
}
implicit class RightConvenience[L, R](either: Right[L, R]) {
def asEither: Either[L, R] = either
}
implicit class LeftConvenience[L, R](either: Left[L, R]) {
def asEither: Either[L, R] = either
}
implicit class RightConvenienceNothing[R](either: Right[Nothing, R]) {
def asEither: Either[Nothing, R] = either
}
implicit class LeftConvenienceNothing[L](either: Left[L, Nothing]) {
def asEither: Either[L, Nothing] = either
}
def `on Either` {
// Both sides Left
(Left(1): Left[Int, Int]) shouldEqual (Left(1L): Left[Long, Int])
(Left(1): Left[Int, Int]) shouldEqual (Left(1): Left[Int, Long])
(Left(1L): Left[Long, Int]) shouldEqual (Left(1): Left[Int, Int])
(Left(1): Left[Int, Long]) shouldEqual (Left(1): Left[Int, Int])
// Given both sides are Left, it shouldn't matter if the Right type has no constraint
(Left(1): Left[Int, Long]) shouldEqual (Left(1): Left[Int, String])
(Left(1L): Left[Long, Int]) shouldEqual (Left(1): Left[Int, String])
// But if both sides are Left but without a constraint, it should not compile, even
// if the Right type has a constraint.
"""(Left(1L): Left[Long, Int]) shouldEqual (Left("one"): Left[String, Int])""" shouldNot typeCheck
// Left side Left, right side Either
(Left(1): Left[Int, Int]) shouldEqual (Left(1L): Either[Long, Int])
(Left(1): Left[Int, Int]) shouldEqual (Left(1): Either[Int, Long])
(Left(1L): Left[Long, Int]) shouldEqual (Left(1): Either[Int, Int])
(Left(1): Left[Int, Long]) shouldEqual (Left(1): Either[Int, Int])
// Given left side is Left, it shouldn't matter if the Right type has no constraint
(Left(1): Left[Int, Long]) shouldEqual (Left(1): Either[Int, String])
(Left(1L): Left[Long, Int]) shouldEqual (Left(1): Either[Int, String])
// But if left side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""(Left(1L): Left[Long, Int]) shouldEqual (Left("one"): Left[String, Int])""" shouldNot typeCheck
// Right side Left, left side Either
(Left(1): Either[Int, Int]) shouldEqual (Left(1L): Left[Long, Int])
(Left(1): Either[Int, Int]) shouldEqual (Left(1): Left[Int, Long])
(Left(1L): Either[Long, Int]) shouldEqual (Left(1): Left[Int, Int])
(Left(1): Either[Int, Long]) shouldEqual (Left(1): Left[Int, Int])
// Given right side is Left, it shouldn't matter if the Right type has no constraint
(Left(1): Either[Int, Long]) shouldEqual (Left(1): Left[Int, String])
(Left(1L): Either[Long, Int]) shouldEqual (Left(1): Left[Int, String])
// But if right side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""(Left(1L): Either[Long, Int]) shouldEqual (Left("one"): Left[String, Int])""" shouldNot typeCheck
// Both sides Right
(Right(1): Right[Int, Int]) shouldEqual (Right(1): Right[Long, Int])
(Right(1): Right[Int, Int]) shouldEqual (Right(1L): Right[Int, Long])
(Right(1): Right[Long, Int]) shouldEqual (Right(1): Right[Int, Int])
(Right(1L): Right[Int, Long]) shouldEqual (Right(1): Right[Int, Int])
// Given both sides are Right, it shouldn't matter if the Left type has no constraint
(Right(1): Right[Long, Int]) shouldEqual (Right(1): Right[String, Int])
(Right(1L): Right[Int, Long]) shouldEqual (Right(1): Right[String, Int])
// But if both sides are Right but without a constraint, it should not compile, even
// if the Left type has a constraint.
"""(Right(1L): Right[Int, Long]) shouldEqual (Right("one"): Right[Int, String])""" shouldNot typeCheck
// Left side Right, right side Either
(Right(1): Right[Int, Int]) shouldEqual (Right(1): Either[Long, Int])
(Right(1): Right[Int, Int]) shouldEqual (Right(1L): Either[Int, Long])
(Right(1): Right[Long, Int]) shouldEqual (Right(1): Either[Int, Int])
(Right(1L): Right[Int, Long]) shouldEqual (Right(1): Either[Int, Int])
// Given left side is Right, it shouldn't matter if the Left type has no constraint
(Right(1): Right[Long, Int]) shouldEqual (Right(1): Either[String, Int])
(Right(1L): Right[Int, Long]) shouldEqual (Right(1): Either[String, Int])
// But if left side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(Right(1L): Right[Int, Long]) shouldEqual (Right("one"): Either[Int, String])""" shouldNot typeCheck
// Right side Right, left side Either
(Right(1): Either[Int, Int]) shouldEqual (Right(1): Right[Long, Int])
(Right(1): Either[Int, Int]) shouldEqual (Right(1L): Right[Int, Long])
(Right(1): Either[Long, Int]) shouldEqual (Right(1): Right[Int, Int])
(Right(1L): Either[Int, Long]) shouldEqual (Right(1): Right[Int, Int])
// Given right side is Right, it shouldn't matter if the Left type has no constraint
(Right(1): Either[Long, Int]) shouldEqual (Right(1): Right[String, Int])
(Right(1L): Either[Int, Long]) shouldEqual (Right(1): Right[String, Int])
// But if right side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(Right(1L): Either[Int, Long]) shouldEqual (Right("one"): Right[Int, String])""" shouldNot typeCheck
// Both sides Either
(Left(1): Either[Int, Int]) shouldEqual (Left(1L): Either[Long, Int])
(Left(1): Either[Int, Int]) shouldEqual (Left(1): Either[Int, Long])
(Left(1L): Either[Long, Int]) shouldEqual (Left(1): Either[Int, Int])
(Left(1): Either[Int, Long]) shouldEqual (Left(1): Either[Int, Int])
(Right(1): Either[Int, Int]) shouldEqual (Right(1): Either[Long, Int])
(Right(1): Either[Int, Int]) shouldEqual (Right(1L): Either[Int, Long])
(Right(1): Either[Long, Int]) shouldEqual (Right(1): Either[Int, Int])
(Right(1L): Either[Int, Long]) shouldEqual (Right(1): Either[Int, Int])
// So long as an equality constraint exists for one the Left or Right side of type Either,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Left[Int, String] can equal a Left[Int, java.util.Date]
//
// scala> Left[Int, String](1) == Left[Long, java.util.Date](1L)
// res0: Boolean = true
//
// Similarly, a Right[Int, String] can equal a Right[java.util.Date, String]
// scala> Right[Int, String]("hi") == Right[java.util.Date, String]("hi")
// res1: Boolean = true
(Left[Int, String](1): Either[Int, String]) shouldEqual (Left[Long, Date](1L): Either[Long, Date])
(Right[Int, String]("hi"): Either[Int, String]) shouldEqual (Right[Date, String]("hi"): Either[Date, String])
// The only way an equality comparison of two Eithers will not be allowed to compile, therefore, is if
// no constraint exists between either the Left or Right types:
"""(Left[Int, String](1): Either[Int, String]) shouldEqual (Left[String, Int]("one"): Either[String, Int])""" shouldNot typeCheck
// Much ado about Nothing
// Both sides Left
Left(1) shouldEqual Left(1L)
Left(1) shouldEqual Left(1)
Left(1L) shouldEqual Left(1)
Left(1) shouldEqual Left(1)
// Target side Left, parameter side Or
Left(1) shouldEqual Left(1L).asEither
Left(1) shouldEqual Left(1).asEither
Left(1L) shouldEqual Left(1).asEither
// Parameter side Left, target side Or
Left(1).asEither shouldEqual Left(1L)
Left(1).asEither shouldEqual Left(1)
Left(1L).asEither shouldEqual Left(1)
// Both sides Right
Right(1) shouldEqual Right(1)
Right(1) shouldEqual Right(1L)
Right(1L) shouldEqual Right(1)
// Target side Right, parameter side Or
Right(1) shouldEqual Right(1).asEither
Right(1) shouldEqual Right(1L).asEither
Right(1L) shouldEqual Right(1).asEither
// Parameter side Right, target side Or
Right(1).asEither shouldEqual Right(1)
Right(1).asEither shouldEqual Right(1L)
Right(1L).asEither shouldEqual Right(1)
// Both sides Or
Left(1).asEither shouldEqual Left(1L).asEither
Left(1).asEither shouldEqual Left(1).asEither
Left(1L).asEither shouldEqual Left(1).asEither
Right(1).asEither shouldEqual Right(1).asEither
Right(1).asEither shouldEqual Right(1L).asEither
Right(1L).asEither shouldEqual Right(1).asEither
}
def `on Nested Either` {
def `with List (which is covariant)` {
// Both sides Left
(List(Left(1)): List[Left[Int, Int]]) shouldEqual (List(Left(1L)): List[Left[Long, Int]])
(List(Left(1)): List[Left[Int, Int]]) shouldEqual (List(Left(1)): List[Left[Int, Long]])
(List(Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left(1)): List[Left[Int, Int]])
(List(Left(1)): List[Left[Int, Long]]) shouldEqual (List(Left(1)): List[Left[Int, Int]])
// Given both sides are Left, it shouldn't matter if the Right type has no constraint
(List(Left(1)): List[Left[Int, Long]]) shouldEqual (List(Left(1)): List[Left[Int, String]])
(List(Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left(1)): List[Left[Int, String]])
// But if both sides are Left but without a constraint, it should not compile, even
// if the Right type has a constraint.
"""(List(Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left("one")): List[Left[String, Int]])""" shouldNot typeCheck
// Left side Left, right side Either
(List(Left(1)): List[Left[Int, Int]]) shouldEqual (List(Left(1L)): List[Either[Long, Int]])
(List(Left(1)): List[Left[Int, Int]]) shouldEqual (List(Left(1)): List[Either[Int, Long]])
(List(Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left(1)): List[Either[Int, Int]])
(List(Left(1)): List[Left[Int, Long]]) shouldEqual (List(Left(1)): List[Either[Int, Int]])
// Given left side is Left, it shouldn't matter if the Right type has no constraint
(List(Left(1)): List[Left[Int, Long]]) shouldEqual (List(Left(1)): List[Either[Int, String]])
(List(Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left(1)): List[Either[Int, String]])
// But if left side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""List((Left(1L)): List[Left[Long, Int]]) shouldEqual (List(Left("one")): List[Left[String, Int]])""" shouldNot typeCheck
// Right side Left, left side Either
(List(Left(1)): List[Either[Int, Int]]) shouldEqual (List(Left(1L)): List[Left[Long, Int]])
(List(Left(1)): List[Either[Int, Int]]) shouldEqual (List(Left(1)): List[Left[Int, Long]])
(List(Left(1L)): List[Either[Long, Int]]) shouldEqual (List(Left(1)): List[Left[Int, Int]])
(List(Left(1)): List[Either[Int, Long]]) shouldEqual (List(Left(1)): List[Left[Int, Int]])
// Given right side is Left, it shouldn't matter if the Right type has no constraint
(List(Left(1)): List[Either[Int, Long]]) shouldEqual (List(Left(1)): List[Left[Int, String]])
(List(Left(1L)): List[Either[Long, Int]]) shouldEqual (List(Left(1)): List[Left[Int, String]])
// But if right side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""(List(Left(1L)): List[Either[Long, Int]]) shouldEqual (List(Left("one")): List[Left[String, Int]])""" shouldNot typeCheck
// Both sides Right
(List(Right(1)): List[Right[Int, Int]]) shouldEqual (List(Right(1)): List[Right[Long, Int]])
(List(Right(1)): List[Right[Int, Int]]) shouldEqual (List(Right(1L)): List[Right[Int, Long]])
(List(Right(1)): List[Right[Long, Int]]) shouldEqual (List(Right(1)): List[Right[Int, Int]])
(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right(1)): List[Right[Int, Int]])
// Given both sides are Right, it shouldn't matter if the Left type has no constraint
(List(Right(1)): List[Right[Long, Int]]) shouldEqual (List(Right(1)): List[Right[String, Int]])
(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right(1)): List[Right[String, Int]])
// But if both sides are Right but without a constraint, it should not compile, even
// if the Left type has a constraint.
"""(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right("one")): List[Right[Int, String]])""" shouldNot typeCheck
// Left side Right, right side Either
(List(Right(1)): List[Right[Int, Int]]) shouldEqual (List(Right(1)): List[Either[Long, Int]])
(List(Right(1)): List[Right[Int, Int]]) shouldEqual (List(Right(1L)): List[Either[Int, Long]])
(List(Right(1)): List[Right[Long, Int]]) shouldEqual (List(Right(1)): List[Either[Int, Int]])
(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right(1)): List[Either[Int, Int]])
// Given left side is Right, it shouldn't matter if the Left type has no constraint
(List(Right(1)): List[Right[Long, Int]]) shouldEqual (List(Right(1)): List[Either[String, Int]])
(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right(1)): List[Either[String, Int]])
// But if left side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(List(Right(1L)): List[Right[Int, Long]]) shouldEqual (List(Right("one")): List[Either[Int, String]])""" shouldNot typeCheck
// Right side Right, left side Either
(List(Right(1)): List[Either[Int, Int]]) shouldEqual (List(Right(1)): List[Right[Long, Int]])
(List(Right(1)): List[Either[Int, Int]]) shouldEqual (List(Right(1L)): List[Right[Int, Long]])
(List(Right(1)): List[Either[Long, Int]]) shouldEqual (List(Right(1)): List[Right[Int, Int]])
(List(Right(1L)): List[Either[Int, Long]]) shouldEqual (List(Right(1)): List[Right[Int, Int]])
// Given right side is Right, it shouldn't matter if the Left type has no constraint
(List(Right(1)): List[Either[Long, Int]]) shouldEqual (List(Right(1)): List[Right[String, Int]])
(List(Right(1L)): List[Either[Int, Long]]) shouldEqual (List(Right(1)): List[Right[String, Int]])
// But if right side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(List(Right(1L)): List[Either[Int, Long]]) shouldEqual (List(Right("one")): List[Right[Int, String]])""" shouldNot typeCheck
// Both sides Either
(List(Left(1)): List[Either[Int, Int]]) shouldEqual (List(Left(1L)): List[Either[Long, Int]])
(List(Left(1)): List[Either[Int, Int]]) shouldEqual (List(Left(1)): List[Either[Int, Long]])
(List(Left(1L)): List[Either[Long, Int]]) shouldEqual (List(Left(1)): List[Either[Int, Int]])
(List(Left(1)): List[Either[Int, Long]]) shouldEqual (List(Left(1)): List[Either[Int, Int]])
(List(Right(1)): List[Either[Int, Int]]) shouldEqual (List(Right(1)): List[Either[Long, Int]])
(List(Right(1)): List[Either[Int, Int]]) shouldEqual (List(Right(1L)): List[Either[Int, Long]])
(List(Right(1)): List[Either[Long, Int]]) shouldEqual (List(Right(1)): List[Either[Int, Int]])
(List(Right(1L)): List[Either[Int, Long]]) shouldEqual (List(Right(1)): List[Either[Int, Int]])
// So long as an equality constraint exists for one the Left or Right side of type Either,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Left[Int, String] can equal a Left[Int, java.util.Date]
//
// scala> Left[Int, String](1) == Left[Long, java.util.Date](1L)
// res0: Boolean = true
//
// Similarly, a Right[Int, String] can equal a Right[java.util.Date, String]
// scala> Right[Int, String]("hi") == Right[java.util.Date, String]("hi")
// res1: Boolean = true
(List(Left[Int, String](1)): List[Either[Int, String]]) shouldEqual (List(Left[Long, Date](1L)): List[Either[Long, Date]])
(List(Right[Int, String]("hi")): List[Either[Int, String]]) shouldEqual (List(Right[Date, String]("hi")): List[Either[Date, String]])
// The only way an equality comparison of two Eithers will not be allowed to compile, therefore, is if
// no constraint exists between either the Left or Right types:
"""(List(Left(1)): List[Either[Int, String]]) shouldEqual (List(Left("one")): List[Either[String, Int]])""" shouldNot typeCheck
// Much ado about Nothing
// Both sides List[Left]
List(Left(1)) shouldEqual List(Left(1L))
List(Left(1)) shouldEqual List(Left(1))
List(Left(1L)) shouldEqual List(Left(1))
List(Left(1)) shouldEqual List(Left(1))
// Target side List[Left], parameter side List[Or]
List(Left(1)) shouldEqual List(Left(1L).asEither)
List(Left(1)) shouldEqual List(Left(1).asEither)
List(Left(1L)) shouldEqual List(Left(1).asEither)
// Parameter side List[Left], target side List[Or]
List(Left(1).asEither) shouldEqual List(Left(1L))
List(Left(1).asEither) shouldEqual List(Left(1))
List(Left(1L).asEither) shouldEqual List(Left(1))
// Both sides List[Right]
List(Right(1)) shouldEqual List(Right(1))
List(Right(1)) shouldEqual List(Right(1L))
List(Right(1L)) shouldEqual List(Right(1))
// Target side List[Right], parameter side List[Or]
List(Right(1)) shouldEqual List(Right(1).asEither)
List(Right(1)) shouldEqual List(Right(1L).asEither)
List(Right(1L)) shouldEqual List(Right(1).asEither)
// Parameter side List[Right], target side List[Or]
List(Right(1).asEither) shouldEqual List(Right(1))
List(Right(1).asEither) shouldEqual List(Right(1L))
List(Right(1L).asEither) shouldEqual List(Right(1))
// Both sides List[Or]
List(Left(1).asEither) shouldEqual List(Left(1L).asEither)
List(Left(1).asEither) shouldEqual List(Left(1).asEither)
List(Left(1L).asEither) shouldEqual List(Left(1).asEither)
List(Right(1).asEither) shouldEqual List(Right(1).asEither)
List(Right(1).asEither) shouldEqual List(Right(1L).asEither)
List(Right(1L).asEither) shouldEqual List(Right(1).asEither)
}
def `with Set (which is invariant)` {
// Both sides Left
(Set(Left(1)): Set[Left[Int, Int]]) shouldEqual (Set(Left(1L)): Set[Left[Long, Int]])
(Set(Left(1)): Set[Left[Int, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, Long]])
(Set(Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, Int]])
(Set(Left(1)): Set[Left[Int, Long]]) shouldEqual (Set(Left(1)): Set[Left[Int, Int]])
// Given both sides are Left, it shouldn't matter if the Right type has no constraint
(Set(Left(1)): Set[Left[Int, Long]]) shouldEqual (Set(Left(1)): Set[Left[Int, String]])
(Set(Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, String]])
// But if both sides are Left but without a constraint, it should not compile, even
// if the Right type has a constraint.
"""(Set(Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left("one")): Set[Left[String, Int]])""" shouldNot typeCheck
// Left side Left, right side Either
(Set(Left(1)): Set[Left[Int, Int]]) shouldEqual (Set(Left(1L)): Set[Either[Long, Int]])
(Set(Left(1)): Set[Left[Int, Int]]) shouldEqual (Set(Left(1)): Set[Either[Int, Long]])
(Set(Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left(1)): Set[Either[Int, Int]])
(Set(Left(1)): Set[Left[Int, Long]]) shouldEqual (Set(Left(1)): Set[Either[Int, Int]])
// Given left side is Left, it shouldn't matter if the Right type has no constraint
(Set(Left(1)): Set[Left[Int, Long]]) shouldEqual (Set(Left(1)): Set[Either[Int, String]])
(Set(Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left(1)): Set[Either[Int, String]])
// But if left side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""Set((Left(1L)): Set[Left[Long, Int]]) shouldEqual (Set(Left("one")): Set[Left[String, Int]])""" shouldNot typeCheck
// Right side Left, left side Either
(Set(Left(1)): Set[Either[Int, Int]]) shouldEqual (Set(Left(1L)): Set[Left[Long, Int]])
(Set(Left(1)): Set[Either[Int, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, Long]])
(Set(Left(1L)): Set[Either[Long, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, Int]])
(Set(Left(1)): Set[Either[Int, Long]]) shouldEqual (Set(Left(1)): Set[Left[Int, Int]])
// Given right side is Left, it shouldn't matter if the Right type has no constraint
(Set(Left(1)): Set[Either[Int, Long]]) shouldEqual (Set(Left(1)): Set[Left[Int, String]])
(Set(Left(1L)): Set[Either[Long, Int]]) shouldEqual (Set(Left(1)): Set[Left[Int, String]])
// But if right side is Left but without a constraint between left and right Left types, it should not compile, even
// if the Right type has a constraint.
"""(Set(Left(1L)): Set[Either[Long, Int]]) shouldEqual (Set(Left("one")): Set[Left[String, Int]])""" shouldNot typeCheck
// Both sides Right
(Set(Right(1)): Set[Right[Int, Int]]) shouldEqual (Set(Right(1)): Set[Right[Long, Int]])
(Set(Right(1)): Set[Right[Int, Int]]) shouldEqual (Set(Right(1L)): Set[Right[Int, Long]])
(Set(Right(1)): Set[Right[Long, Int]]) shouldEqual (Set(Right(1)): Set[Right[Int, Int]])
(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right(1)): Set[Right[Int, Int]])
// Given both sides are Right, it shouldn't matter if the Left type has no constraint
(Set(Right(1)): Set[Right[Long, Int]]) shouldEqual (Set(Right(1)): Set[Right[String, Int]])
(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right(1)): Set[Right[String, Int]])
// But if both sides are Right but without a constraint, it should not compile, even
// if the Left type has a constraint.
"""(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right("one")): Set[Right[Int, String]])""" shouldNot typeCheck
// Left side Right, right side Either
(Set(Right(1)): Set[Right[Int, Int]]) shouldEqual (Set(Right(1)): Set[Either[Long, Int]])
(Set(Right(1)): Set[Right[Int, Int]]) shouldEqual (Set(Right(1L)): Set[Either[Int, Long]])
(Set(Right(1)): Set[Right[Long, Int]]) shouldEqual (Set(Right(1)): Set[Either[Int, Int]])
(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right(1)): Set[Either[Int, Int]])
// Given left side is Right, it shouldn't matter if the Left type has no constraint
(Set(Right(1)): Set[Right[Long, Int]]) shouldEqual (Set(Right(1)): Set[Either[String, Int]])
(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right(1)): Set[Either[String, Int]])
// But if left side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(Set(Right(1L)): Set[Right[Int, Long]]) shouldEqual (Set(Right("one")): Set[Either[Int, String]])""" shouldNot typeCheck
// Right side Right, left side Either
(Set(Right(1)): Set[Either[Int, Int]]) shouldEqual (Set(Right(1)): Set[Right[Long, Int]])
(Set(Right(1)): Set[Either[Int, Int]]) shouldEqual (Set(Right(1L)): Set[Right[Int, Long]])
(Set(Right(1)): Set[Either[Long, Int]]) shouldEqual (Set(Right(1)): Set[Right[Int, Int]])
(Set(Right(1L)): Set[Either[Int, Long]]) shouldEqual (Set(Right(1)): Set[Right[Int, Int]])
// Given right side is Right, it shouldn't matter if the Left type has no constraint
(Set(Right(1)): Set[Either[Long, Int]]) shouldEqual (Set(Right(1)): Set[Right[String, Int]])
(Set(Right(1L)): Set[Either[Int, Long]]) shouldEqual (Set(Right(1)): Set[Right[String, Int]])
// But if right side is Right but without a constraint between left and right Right types, it should not compile, even
// if the Left type has a constraint.
"""(Set(Right(1L)): Set[Either[Int, Long]]) shouldEqual (Set(Right("one")): Set[Right[Int, String]])""" shouldNot typeCheck
// Both sides Either
(Set(Left(1)): Set[Either[Int, Int]]) shouldEqual (Set(Left(1L)): Set[Either[Long, Int]])
(Set(Left(1)): Set[Either[Int, Int]]) shouldEqual (Set(Left(1)): Set[Either[Int, Long]])
(Set(Left(1L)): Set[Either[Long, Int]]) shouldEqual (Set(Left(1)): Set[Either[Int, Int]])
(Set(Left(1)): Set[Either[Int, Long]]) shouldEqual (Set(Left(1)): Set[Either[Int, Int]])
(Set(Right(1)): Set[Either[Int, Int]]) shouldEqual (Set(Right(1)): Set[Either[Long, Int]])
(Set(Right(1)): Set[Either[Int, Int]]) shouldEqual (Set(Right(1L)): Set[Either[Int, Long]])
(Set(Right(1)): Set[Either[Long, Int]]) shouldEqual (Set(Right(1)): Set[Either[Int, Int]])
(Set(Right(1L)): Set[Either[Int, Long]]) shouldEqual (Set(Right(1)): Set[Either[Int, Int]])
// So long as an equality constraint exists for one the Left or Right side of type Either,
// the comparison will be allowed. This is because it may be true. At the
// end of the day, a Left[Int, String] can equal a Left[Int, java.util.Date]
//
// scala> Left[Int, String](1) == Left[Long, java.util.Date](1L)
// res0: Boolean = true
//
// Similarly, a Right[Int, String] can equal a Right[java.util.Date, String]
// scala> Right[Int, String]("hi") == Right[java.util.Date, String]("hi")
// res1: Boolean = true
(Set(Left[Int, String](1)): Set[Either[Int, String]]) shouldEqual (Set(Left[Long, Date](1L)): Set[Either[Long, Date]])
(Set(Right[Int, String]("hi")): Set[Either[Int, String]]) shouldEqual (Set(Right[Date, String]("hi")): Set[Either[Date, String]])
// The only way an equality comparison of two Eithers will not be allowed to compile, therefore, is if
// no constraint exists between either the Left or Right types:
"""(Set(Left(1)): Set[Either[Int, String]]) shouldEqual (Set(Left("one")): Set[Either[String, Int]])""" shouldNot typeCheck
// Much ado about Nothing
// Both sides Set[Left]
Set(Left(1)) shouldEqual Set(Left(1L))
Set(Left(1)) shouldEqual Set(Left(1))
Set(Left(1L)) shouldEqual Set(Left(1))
Set(Left(1)) shouldEqual Set(Left(1))
// Target side Set[Left], parameter side Set[Or]
Set(Left(1)) shouldEqual Set(Left(1L).asEither)
Set(Left(1)) shouldEqual Set(Left(1).asEither)
Set(Left(1L)) shouldEqual Set(Left(1).asEither)
// Parameter side Set[Left], target side Set[Or]
Set(Left(1).asEither) shouldEqual Set(Left(1L))
Set(Left(1).asEither) shouldEqual Set(Left(1))
Set(Left(1L).asEither) shouldEqual Set(Left(1))
// Both sides Set[Right]
Set(Right(1)) shouldEqual Set(Right(1))
Set(Right(1)) shouldEqual Set(Right(1L))
Set(Right(1L)) shouldEqual Set(Right(1))
// Target side Set[Right], parameter side Set[Or]
Set(Right(1)) shouldEqual Set(Right(1).asEither)
Set(Right(1)) shouldEqual Set(Right(1L).asEither)
Set(Right(1L)) shouldEqual Set(Right(1).asEither)
// Parameter side Set[Right], target side Set[Or]
Set(Right(1).asEither) shouldEqual Set(Right(1))
Set(Right(1).asEither) shouldEqual Set(Right(1L))
Set(Right(1L).asEither) shouldEqual Set(Right(1))
// Both sides Set[Or]
Set(Left(1).asEither) shouldEqual Set(Left(1L).asEither)
Set(Left(1).asEither) shouldEqual Set(Left(1).asEither)
Set(Left(1L).asEither) shouldEqual Set(Left(1).asEither)
Set(Right(1).asEither) shouldEqual Set(Right(1).asEither)
Set(Right(1).asEither) shouldEqual Set(Right(1L).asEither)
Set(Right(1L).asEither) shouldEqual Set(Right(1).asEither)
}
}
def `on Options` {
// Both sides Option
Option(1) shouldEqual Option(1L)
Option(1L) shouldEqual Option(1L)
// Both sides Some
Some(1) shouldEqual Some(1L)
Some(1L) shouldEqual Some(1L)
// Both sides None
/*
"None shouldEqual None" shouldNot typeCheck
*/
None shouldEqual None // For now at least, will allow this.
// Left side Some, right side Option
Some(1) shouldEqual Option(1L)
Some(1L) shouldEqual Option(1)
// Left side Option, right side Some
Option(1) shouldEqual Some(1L)
Option(1L) shouldEqual Some(1)
// Left side None, right side Option
None should not equal Option(1L)
None should not equal Option(1)
None shouldEqual (None: Option[Int])
// Left side Option, right side None
Option(1) should not equal None
(None: Option[Int]) shouldEqual None
Option(1L) should not equal None
(None: Option[Long]) shouldEqual None
// Left side None, right side Some
"None shouldEqual Some(1L)" shouldNot typeCheck
"None shouldEqual Some(1)" shouldNot typeCheck
// Left side Some, right side None
"Some(1) shouldEqual None" shouldNot typeCheck
"Some(1L) shouldEqual None" shouldNot typeCheck
}
def `on nested Options` {
// Both sides Option
Option(Option(1)) shouldEqual Option(Option(1L))
Option(Option(1L)) shouldEqual Option(Option(1))
// Both sides Some
Option(Some(1)) shouldEqual Option(Some(1L))
Some(Some(1L)) shouldEqual Some(Some(1L))
// Both sides None
/*
"Some(None) shouldEqual Some(None)" shouldNot typeCheck
"Option(None) shouldEqual Option(None)" shouldNot typeCheck
*/
Some(None) shouldEqual Some(None)
Option(None) shouldEqual Option(None)
// Left side Some, right side Option
Option(Some(1)) shouldEqual Option(Option(1L))
Some(Some(1)) shouldEqual Some(Option(1L))
Option(Some(1L)) shouldEqual Option(Option(1))
Some(Some(1L)) shouldEqual Some(Option(1))
// Left side Option, right side Some
Option(Option(1)) shouldEqual Option(Some(1L))
Some(Option(1)) shouldEqual Some(Some(1L))
Option(Option(1L)) shouldEqual Option(Some(1))
Some(Option(1L)) shouldEqual Some(Some(1))
// Left side None, right side Option
Option(None) should not equal Option(Option(1L))
Option(None) shouldEqual Option((None: Option[Long]))
Option(None) should not equal Option(Option(1))
Option(None) shouldEqual Option((None: Option[Int]))
// Left side Option, right side None
Option(Option(1)) should not equal Option(None)
Option((None: Option[Int])) shouldEqual Option(None)
Option(Option(1L)) should not equal Option(None)
Option((None: Option[Long])) shouldEqual Option(None)
// Left side None, right side Some
"Some(None) should not equal Some(Some(1L))" shouldNot typeCheck
"Option(None) should not equal Option(Some(1))" shouldNot typeCheck
// Left side Some, right side None
"Some(Some(1)) should not equal Some(None)" shouldNot typeCheck
"Option(Some(1L)) should not equal Option(None)" shouldNot typeCheck
}
import scala.util.{Try, Success, Failure}
val ex = new Exception("oops")
def `on Try` {
// Both sides Try
Try(1) shouldEqual Try(1L)
Try(1L) shouldEqual Try(1L)
// Both sides Success
Success(1) shouldEqual Success(1L)
Success(1L) shouldEqual Success(1L)
// Both sides Failure(ex)
Failure(ex) shouldEqual Failure(ex)
// Left side Success, right side Try
Success(1) shouldEqual Try(1L)
Success(1L) shouldEqual Try(1)
// Left side Try, right side Success
Try(1) shouldEqual Success(1L)
Try(1L) shouldEqual Success(1)
// Left side Failure(ex), right side Try
Failure(ex) should not equal Try(1L)
Failure(ex) should not equal Try(1)
Failure(ex) shouldEqual (Failure(ex): Try[Int])
// Left side Try, right side Failure(ex)
Try(1) should not equal Failure(ex)
(Failure(ex): Try[Int]) shouldEqual Failure(ex)
Try(1L) should not equal Failure(ex)
(Failure(ex): Try[Long]) shouldEqual Failure(ex)
// Left side Failure(ex), right side Success
"Failure(ex) shouldEqual Success(1L)" shouldNot typeCheck
"Failure(ex) shouldEqual Success(1)" shouldNot typeCheck
// Left side Success, right side Failure(ex)
"Success(1) shouldEqual Failure(ex)" shouldNot typeCheck
"Success(1L) shouldEqual Failure(ex)" shouldNot typeCheck
}
def `on nested Try` {
// Both sides Try
Try(Try(1)) shouldEqual Try(Try(1L))
Try(Try(1L)) shouldEqual Try(Try(1))
// Both sides Success
Try(Success(1)) shouldEqual Try(Success(1L))
Success(Success(1L)) shouldEqual Success(Success(1L))
// Both sides Failure(ex)
Success(Failure(ex)) shouldEqual Success(Failure(ex))
Try(Failure(ex)) shouldEqual Try(Failure(ex))
// Left side Success, right side Try
Try(Success(1)) shouldEqual Try(Try(1L))
Success(Success(1)) shouldEqual Success(Try(1L))
Try(Success(1L)) shouldEqual Try(Try(1))
Success(Success(1L)) shouldEqual Success(Try(1))
// Left side Try, right side Success
Try(Try(1)) shouldEqual Try(Success(1L))
Success(Try(1)) shouldEqual Success(Success(1L))
Try(Try(1L)) shouldEqual Try(Success(1))
Success(Try(1L)) shouldEqual Success(Success(1))
// Left side Failure(ex), right side Try
Try(Failure(ex)) should not equal Try(Try(1L))
Try(Failure(ex)) shouldEqual Try((Failure(ex): Try[Long]))
Try(Failure(ex)) should not equal Try(Try(1))
Try(Failure(ex)) shouldEqual Try((Failure(ex): Try[Int]))
// Left side Try, right side Failure(ex)
Try(Try(1)) should not equal Try(Failure(ex))
Try((Failure(ex): Try[Int])) shouldEqual Try(Failure(ex))
Try(Try(1L)) should not equal Try(Failure(ex))
Try((Failure(ex): Try[Long])) shouldEqual Try(Failure(ex))
// Left side Failure(ex), right side Success
"Success(Failure(ex)) should not equal Success(Success(1L))" shouldNot typeCheck
"Try(Failure(ex)) should not equal Try(Success(1))" shouldNot typeCheck
// Left side Success, right side Failure(ex)
"Success(Success(1)) should not equal Success(Failure(ex))" shouldNot typeCheck
"Try(Success(1L)) should not equal Try(Failure(ex))" shouldNot typeCheck
}
}
}
|
cquiroz/scalatest
|
common-test/src/main/scala/org/scalatest/NyayaGeneratorDrivenPropertyChecks.scala
|
<reponame>cquiroz/scalatest<filename>common-test/src/main/scala/org/scalatest/NyayaGeneratorDrivenPropertyChecks.scala
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
import com.nicta.rng.Rng
import japgolly.nyaya.test.PTest._
import org.scalatest.exceptions.DiscardedEvaluationException
import japgolly.nyaya._
import japgolly.nyaya.test._
import japgolly.nyaya.test.PropTest._
import japgolly.nyaya.test.Executor._
import org.scalatest.Assertions
import scalaz.EphemeralStream
import japgolly.nyaya.test.Settings
trait NyayaGeneratorDrivenPropertyChecks extends /*Whenever with Configuration with*/ Assertions {
implicit def settings = new Settings
def prepareData[A](gen: Gen[A], sizeDist: Settings.SizeDist, genSize: GenSize, debug: Boolean): Data[A] =
(sampleSize, seedo, debugPrefix) => {
val samples: (SampleSize, GenSize) => Rng[EphemeralStream[A]] = (s, g) => {
if (debug) println(s"${debugPrefix}Generating ${s.value} samples @ sz ${g.value}...")
gen.data(g, s).map(_ take s.value)
}
val rng =
if (sizeDist.isEmpty)
samples(sampleSize, genSize)
else {
var total = sizeDist.foldLeft(0)(_ + _._1)
var rem = sampleSize.value
val plan = sizeDist.map { case (si, gg) =>
val gs = gg.fold[GenSize](p => genSize.map(v => (v * p + 0.5).toInt max 0), identity)
val ss = SampleSize((si.toDouble / total * rem + 0.5).toInt)
total -= si
rem -= ss.value
(ss, gs)
}
plan.toStream
.map(samples.tupled)
.foldLeft(Rng insert EphemeralStream[A])((a, b) => b.flatMap(c => a.map(_ ++ c)))
}
seedo.fold(rng)(seed => Rng.setseed(seed).flatMap(_ => rng))
.run
}
def test1[A](p: Prop[A], a: A): Result[A] =
try {
Result(a, p(a))
} catch {
case e: Throwable => Error(a, e)
}
def testN[A](p: Prop[A], data: EphemeralStream[A], runInc: () => Int, S: Settings): RunState[A] = {
val it = EphemeralStream.toIterable(data).iterator
var rs = RunState.empty[A]
while (rs.success && it.hasNext) {
val a = it.next()
rs = RunState(runInc(), test1(p, a))
}
rs
}
def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = {
val data = g(S.sampleSize, S.seed, "").unsafePerformIO()
var i = 0
testN(p, data, () => {i+=1; i}, S)
}
def forAll[A](fun: (A) => Unit)(implicit genA: Gen[A], S: Settings) {
def propF = { (a: A) =>
val (unmetCondition, exception) =
try {
fun(a)
(false, None)
}
catch {
case e: DiscardedEvaluationException => (true, None)
case e: Throwable => (false, Some(e))
}
!unmetCondition && exception.isEmpty
}
val prop = Prop.test[A]("test", propF)
val runStatus = run(prop, prepareData(genA, S.sizeDist, S.genSize, S.debug), S) //PTest.test(prop, genA, S)
runStatus match {
case RunState(_, Satisfied) | RunState(_, Proved) => () // test passed
case RunState(runs, Falsified(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Test failed after $runs runs, failing input value: " + e.input.show)
case RunState(runs, Error(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Error occurred after $runs runs, error message: " + e.getMessage)
}
}
def forAll[A, B](fun: (A, B) => Unit)(implicit genA: Gen[A], genB: Gen[B], S: Settings) {
def propF = { (a: A, b: B) =>
val (unmetCondition, exception) =
try {
fun(a, b)
(false, None)
}
catch {
case e: DiscardedEvaluationException => (true, None)
case e: Throwable => (false, Some(e))
}
!unmetCondition && exception.isEmpty
}
val prop = Prop.test[(A, B)]("test", (tuple2: Tuple2[A, B]) => propF(tuple2._1, tuple2._2))
val runStatus = run(prop, prepareData(genA *** genB, S.sizeDist, S.genSize, S.debug), S)
runStatus match {
case RunState(_, Satisfied) | RunState(_, Proved) => () // test passed
case RunState(runs, Falsified(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Test failed after $runs runs, failing input value: " + e.input.show)
case RunState(runs, Error(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Error occurred after $runs runs, error message: " + e.getMessage)
}
}
def forAll[A, B, C](fun: (A, B, C) => Unit)(implicit genA: Gen[A], genB: Gen[B], genC: Gen[C], S: Settings) {
def propF = { (a: A, b: B, c: C) =>
val (unmetCondition, exception) =
try {
fun(a, b, c)
(false, None)
}
catch {
case e: DiscardedEvaluationException => (true, None)
case e: Throwable => (false, Some(e))
}
!unmetCondition && exception.isEmpty
}
val prop = Prop.test[((A, B), C)]("test", (tuple2: Tuple2[(A, B), C]) => propF(tuple2._1._1, tuple2._1._2, tuple2._2))
val runStatus = run(prop, prepareData(genA *** genB *** genC, S.sizeDist, S.genSize, S.debug), S)
runStatus match {
case RunState(_, Satisfied) | RunState(_, Proved) => () // test passed
case RunState(runs, Falsified(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Test failed after $runs runs, failing input value: " + e.input.show)
case RunState(runs, Error(a, e)) =>
// TODO: should throw GeneratorDrivenPropertyCheckFailedException here
fail(s"Error occurred after $runs runs, error message: " + e.getMessage)
}
}
}
object NyayaGeneratorDrivenPropertyChecks extends NyayaGeneratorDrivenPropertyChecks
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/LowPriorityEqualityConstraints.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import annotation.implicitNotFound
import scala.language.higherKinds
import scala.util.{Try,Success,Failure}
trait LowPriorityEqualityConstraints {
import EqualityPolicy.BasicEqualityConstraint
// ELG Element Left Good
// ELB Element Left Bad
// ERG Element Right Good
// ERB Element Right Bad
// This one will provide an equality constraint if the Bad types have an inner constraint. It doesn't matter
// in this case what the Good type does. If there was a constraint available for the Good types, then it would
// use the higher priority implicit Constraint.orEqualityConstraint and never get here.
implicit def lowPriorityOrEqualityConstraint[ELG, ELB, ERG, ERB](implicit equalityOfL: Equality[Or[ELG, ELB]], ev: EqualityConstraint[ELB, ERB] with NativeSupport): EqualityConstraint[Or[ELG, ELB], Or[ERG, ERB]] with NativeSupport = new BasicEqualityConstraint[Or[ELG, ELB], Or[ERG, ERB]](equalityOfL)
implicit def lowPriorityOrOnBothSidesWithGoodNothingConstraint[ELB, ERB](implicit equalityOfL: Equality[Or[Nothing, ELB]], ev: EqualityConstraint[ELB, ERB] with NativeSupport): EqualityConstraint[Or[Nothing, ELB], Or[Nothing, ERB]] with NativeSupport = new BasicEqualityConstraint[Or[Nothing, ELB], Or[Nothing, ERB]](equalityOfL)
// This must be low priority to allow Every on both sides
implicit def everyOnRightEqualityConstraint[EA, CA[ea] <: Every[ea], EB](implicit equalityOfA: Equality[CA[EA]], ev: EqualityConstraint[EA, EB] with NativeSupport): EqualityConstraint[CA[EA], Every[EB]] with NativeSupport = new BasicEqualityConstraint[CA[EA], Every[EB]](equalityOfA)
// Either (in x === y, x is the "target" of the === invocation, y is the "parameter")
// ETL Element Target Left
// ETR Element Target Right
// EPL Element Parameter Left
// EPR Element Parameter Right
// This one will provide an equality constraint if the Bad types have an inner constraint. It doesn't matter
// in this case what the Good type does. If there was a constraint available for the Good types, then it would
// use the higher priority implicit Constraint.orEqualityConstraint and never get here.
implicit def lowPriorityEitherEqualityConstraint[ETL, ETR, EPL, EPR](implicit equalityOfT: Equality[Either[ETL, ETR]], ev: EqualityConstraint[ETR, EPR] with NativeSupport): EqualityConstraint[Either[ETL, ETR], Either[EPL, EPR]] with NativeSupport = new BasicEqualityConstraint[Either[ETL, ETR], Either[EPL, EPR]](equalityOfT)
implicit def lowPriorityEitherNothingConstraint[ETR, EPR](implicit equalityOfT: Equality[Either[Nothing, ETR]], ev: EqualityConstraint[ETR, EPR] with NativeSupport): EqualityConstraint[Either[Nothing, ETR], Either[Nothing, EPR]] with NativeSupport = new BasicEqualityConstraint[Either[Nothing, ETR], Either[Nothing, EPR]](equalityOfT)
// This must be low priority to allow Option on both sides
implicit def optionOnRightEqualityConstraint[EA, CA[ea] <: Option[ea], EB](implicit equalityOfA: Equality[CA[EA]], ev: EqualityConstraint[EA, EB] with NativeSupport): EqualityConstraint[CA[EA], Option[EB]] with NativeSupport = new BasicEqualityConstraint[CA[EA], Option[EB]](equalityOfA)
// This must be low priority to allow Try on both sides
implicit def tryOnRightEqualityConstraint[EA, CA[ea] <: Try[ea], EB](implicit equalityOfA: Equality[CA[EA]], ev: EqualityConstraint[EA, EB] with NativeSupport): EqualityConstraint[CA[EA], Try[EB]] with NativeSupport = new BasicEqualityConstraint[CA[EA], Try[EB]](equalityOfA)
}
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/algebra/Monad.scala
|
<reponame>cquiroz/scalatest<filename>scalactic/src/main/scala/org/scalactic/algebra/Monad.scala<gh_stars>1-10
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.algebra
import scala.language.higherKinds
import scala.language.implicitConversions
/**
* Typeclass trait for algebraic structure containing <em>insertion</em> and <em>flat-mapping</em> methods that obey
* laws of <em>identity</em> and <em>associativity</em>.
*
* <p>
* A <code>Monad</code> instance wraps an object that in some way behaves as a <code>Monad</code>.
* </p>
*/
trait Monad[Context[_]] extends Applicative[Context] {
/**
* Applies the given function to the value contained in this context, returning the result
* of the function, which is a value wrapped in another context.
*/
def flatMap[A, B](ca: Context[A])(f: A => Context[B]): Context[B]
// TODO: Flesh out the scaladoc, explaining the implementation here.
/**
* Applies the given function in context to the given value in context, returning the result in
* the context.
*/
def applying[A, B](ca: Context[A])(cab: Context[A => B]): Context[B] = flatMap(ca)(a => map(cab)(ab => ab(a)))
/**
* Flattens a nested context into a single context.
*/
def flatten[A](cca: Context[Context[A]]): Context[A] = flatMap(cca)(a => a)
}
/**
* Companion object for <code>Monad</code> typeclass.
*/
object Monad {
/**
* Monad adapter class for any Context[A].
*/
class Adapter[Context[_], A](val underlying: Context[A])(implicit val monad: Monad[Context]) {
def map[B](f: A => B) = monad.map(underlying)(f)
def flatMap[B](f: A => Context[B]) = monad.flatMap(underlying)(f)
def flatten[B](implicit ev: A <:< Context[B]) = monad.flatMap(underlying)(ev)
}
/**
* Implicit conversion from a Context[A] to a Monad.Adapter.
*/
implicit def adapters[Context[_], A](ca: Context[A])(implicit ev: Monad[Context]): Adapter[Context, A] =
new Adapter(ca)(ev)
/**
* Summons an implicitly available <code>Monad[Context]</code>.
*
* @param ev Evidence (implicit typeclass) that Context is an Applicative.
* @tparam Context The type of the <code>Monad[Context]</code> to summon.
* @return The <code>Monad[Context]</code> instance.
*/
def apply[Context[_]](implicit ev: Monad[Context]): Monad[Context] = ev
}
|
cquiroz/scalatest
|
examples/src/main/scala/org/scalatest/examples/propspec/multi/ExampleSpec.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.fixture.propspec.multi
import org.scalatest._
import prop.PropertyChecks
import scala.collection.mutable.ListBuffer
class ExampleSpec extends fixture.PropSpec with PropertyChecks with ShouldMatchers {
case class FixtureParam(builder: StringBuilder, buffer: ListBuffer[String])
def withFixture(test: OneArgTest) = {
// Create needed mutable objects
val stringBuilder = new StringBuilder("ScalaTest is ")
val listBuffer = new ListBuffer[String]
val theFixture = FixtureParam(stringBuilder, listBuffer)
// Invoke the test function, passing in the mutable objects
withFixture(test.toNoArgTest(theFixture))
}
property("testing should be easy") { f =>
f.builder.append("easy!")
assert(f.builder.toString === "ScalaTest is easy!")
assert(f.buffer.isEmpty)
val firstChar = f.builder(0)
forAll { (c: Char) =>
whenever (c != 'S') {
c should not equal firstChar
}
}
f.buffer += "sweet"
}
property("testing should be fun") { f =>
f.builder.append("fun!")
assert(f.builder.toString === "ScalaTest is fun!")
assert(f.buffer.isEmpty)
val firstChar = f.builder(0)
forAll { (c: Char) =>
whenever (c != 'S') {
c should not equal firstChar
}
}
}
}
|
cquiroz/scalatest
|
scalatest-test/src/test/scala/org/scalatest/junit/JUnitWrapperSuiteSuite.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.junit {
import org.scalatest._
import org.scalatest.events._
class JUnitWrapperSuiteSuite extends FunSuite {
class MyReporter extends Reporter {
def apply(event: Event) {
event match {
case event: TestStarting =>
testStartingEvents += event
case event: TestIgnored =>
testIgnoredEvent = Some(event)
case event: TestSucceeded =>
testSucceededEvents += event
case event: TestFailed =>
testFailedEvent = Some(event)
case _ =>
}
}
var testStartingEvents = Set[TestStarting]()
var testSucceededEvents = Set[TestSucceeded]()
var testFailedEvent: Option[TestFailed] = None
var testIgnoredEvent: Option[TestIgnored] = None
}
test("A JUnitWrapperSuite runs a JUnit3 TestCase class successfully") {
val jRap =
new JUnitWrapperSuite("org.scalatest.junit.JUnit3TestCase",
this.getClass.getClassLoader)
val repA = new MyReporter
jRap.run(None, Args(repA))
//
// verify one of the TestStarting events
//
val startingEventsTestA =
repA.testSucceededEvents.filter(_.testName == "testA")
assert(startingEventsTestA.size === 1)
val startingEventTestA = startingEventsTestA.toArray.apply(0) // For 2.8
assert(startingEventTestA.suiteName === "JUnit3TestCase")
assert(startingEventTestA.suiteClassName.get ===
"org.scalatest.junit.JUnit3TestCase")
assert(repA.testStartingEvents.size === 3)
//
// verify one of the TestSucceeded events
//
val successEventsTestA =
repA.testSucceededEvents.filter(_.testName == "testA")
assert(successEventsTestA.size === 1)
val successEventTestA = successEventsTestA.toArray.apply(0) // For 2.8
assert(successEventTestA.suiteName === "JUnit3TestCase")
assert(successEventTestA.suiteClassName.get ===
"org.scalatest.junit.JUnit3TestCase")
assert(repA.testSucceededEvents.size === 2)
//
// verify a TestFailed event
//
assert(repA.testFailedEvent.isDefined)
assert(repA.testFailedEvent.get.testName === "testC")
assert(repA.testFailedEvent.get.suiteName === "JUnit3TestCase")
assert(repA.testFailedEvent.get.suiteClassName.get ===
"org.scalatest.junit.JUnit3TestCase")
}
test("A JUnitWrapperSuite runs a JUnit4 class successfully") {
val jRap =
new JUnitWrapperSuite("org.scalatest.junit.JHappySuite",
this.getClass.getClassLoader)
val repA = new MyReporter
jRap.run(None, Args(repA))
//
// verify the TestStarting event
//
assert(repA.testStartingEvents.size === 1)
val startingEvent = repA.testStartingEvents.toArray.apply(0) // For 2.8
assert(startingEvent.testName === "verifySomething")
assert(startingEvent.suiteName === "JHappySuite")
assert(startingEvent.suiteClassName.get ===
"org.scalatest.junit.JHappySuite")
//
// verify the TestSucceeded event
//
assert(repA.testSucceededEvents.size === 1)
val succeededEvent = repA.testSucceededEvents.toArray.apply(0) // For 2.8
assert(succeededEvent.testName === "verifySomething")
assert(succeededEvent.suiteName === "JHappySuite")
assert(succeededEvent.suiteClassName.get ===
"org.scalatest.junit.JHappySuite")
}
test("A JUnitWrapperSuite runs a failing JUnit4 class successfully") {
val jRap =
new JUnitWrapperSuite("org.scalatest.junit.JBitterSuite",
this.getClass.getClassLoader)
val repA = new MyReporter
jRap.run(None, Args(repA))
//
// verify the TestStarting event
//
assert(repA.testStartingEvents.size === 1)
val startingEvent = repA.testStartingEvents.toArray.apply(0) // For 2.8
assert(startingEvent.testName === "verifySomething")
assert(startingEvent.suiteName === "JBitterSuite")
assert(startingEvent.suiteClassName.get ===
"org.scalatest.junit.JBitterSuite")
//
// verify a TestFailed event
//
assert(repA.testFailedEvent.isDefined)
assert(repA.testFailedEvent.get.testName === "verifySomething")
assert(repA.testFailedEvent.get.suiteName === "JBitterSuite")
assert(repA.testFailedEvent.get.suiteClassName.get ===
"org.scalatest.junit.JBitterSuite")
assert(repA.testSucceededEvents.size === 0)
}
test("A JUnitWrapperSuite runs a JUnit3 TestSuite class successfully") {
val jRap = new JUnitWrapperSuite("org.scalatest.junit.JUnit3TestSuite",
this.getClass.getClassLoader)
val repA = new MyReporter
jRap.run(None, Args(repA))
//
// verify one of the TestStarting events
//
val startingEventsTestB =
repA.testStartingEvents.filter(_.testName == "testB")
assert(startingEventsTestB.size === 1)
val startingEventTestB = startingEventsTestB.toArray.apply(0) // For 2.8
assert(startingEventTestB.testName === "testB")
assert(startingEventTestB.suiteName === "JUnit3TestCase")
assert(startingEventTestB.suiteClassName.get ===
"org.scalatest.junit.JUnit3TestCase")
assert(repA.testStartingEvents.size === 2)
//
// verify one of the TestSucceeded events
//
val successEventsTestB =
repA.testSucceededEvents.filter(_.testName == "testB")
assert(successEventsTestB.size === 1)
val successEventTestB = successEventsTestB.toArray.apply(0) // For 2.8
assert(successEventTestB.suiteName === "JUnit3TestCase")
assert(successEventTestB.suiteClassName.get ===
"org.scalatest.junit.JUnit3TestCase")
assert(repA.testSucceededEvents.size === 2)
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/tools/ReporterConfiguration.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import java.net.URL
/**
* This file has types that are used in parsing command line arguments to Runner.
*
* @author <NAME>
*/
private[tools] sealed abstract class ReporterConfiguration
private[tools] case class GraphicReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class StandardOutReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class StandardErrReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class FileReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class MemoryReporterConfiguration(fileName: String) extends ReporterConfiguration
private[tools] case class JunitXmlReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class DashboardReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String, numOldFilesToKeep: Int) extends ReporterConfiguration
private[tools] case class XmlReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class HtmlReporterConfiguration(configSet: Set[ReporterConfigParam], directory: String, cssFileName: Option[URL]) extends ReporterConfiguration
private[tools] case class CustomReporterConfiguration(configSet: Set[ReporterConfigParam], reporterClass: String) extends ReporterConfiguration
private[tools] case class XmlSocketReporterConfiguration(host: String, port: Int) extends ReporterConfiguration
private[tools] case class SocketReporterConfiguration(host: String, port: Int) extends ReporterConfiguration
// If there were no fileReporterSpecList or customReporterSpecList specified, you get Nil
// If there were no graphicReporterSpec, standardOutReporterSpec, or standardErrReporterSpec, you get None
private[tools] case class ReporterConfigurations(
val graphicReporterConfiguration: Option[GraphicReporterConfiguration],
val fileReporterConfigurationList: List[FileReporterConfiguration],
val memoryReporterConfigurationList: List[MemoryReporterConfiguration],
val junitXmlReporterConfigurationList: List[JunitXmlReporterConfiguration],
//val dashboardReporterConfigurationList: List[DashboardReporterConfiguration],
//val xmlReporterConfigurationList: List[XmlReporterConfiguration],
val standardOutReporterConfiguration: Option[StandardOutReporterConfiguration],
val standardErrReporterConfiguration: Option[StandardErrReporterConfiguration],
val htmlReporterConfigurationList: List[HtmlReporterConfiguration],
val customReporterConfigurationList: List[CustomReporterConfiguration],
val xmlSocketReporterConfigurationList: List[XmlSocketReporterConfiguration],
val socketReporterConfigurationList: List[SocketReporterConfiguration]
) extends Seq[ReporterConfiguration] {
val reporterConfigurationList =
List.concat[ReporterConfiguration](
graphicReporterConfiguration.toList,
fileReporterConfigurationList,
memoryReporterConfigurationList,
junitXmlReporterConfigurationList,
//dashboardReporterConfigurationList,
//xmlReporterConfigurationList,
standardOutReporterConfiguration.toList,
standardErrReporterConfiguration.toList,
htmlReporterConfigurationList,
customReporterConfigurationList,
xmlSocketReporterConfigurationList,
socketReporterConfigurationList
)
// Need to add the null pointer checks, or later, NotNull
override def length = reporterConfigurationList.length
// override def elements = reporterConfigurationList.iterator
override def iterator = reporterConfigurationList.iterator // For 2.8
override def apply(i: Int) = reporterConfigurationList(i)
}
|
cquiroz/scalatest
|
scalactic-macro/src/main/scala/org/scalactic/anyvals/GuessANumberMacros.scala
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import reflect.macros.Context
import org.scalactic.Resources
private[scalactic] object GuessANumberMacro extends CompileTimeAssertions {
def apply(c: Context)(value: c.Expr[Int]): c.Expr[GuessANumber] = {
val notValidMsg =
"GuessANumber.apply can only be invoked on Int literals between 1 and "+
"10, inclusive, like GuessANumber(8)."
val notLiteralMsg =
"GuessANumber.apply can only be invoked on Int literals, like "+
"GuessANumber(8). Please use GuessANumber.from instead."
ensureValidIntLiteral(c)(value, notValidMsg, notLiteralMsg) { i =>
i >= 1 && i <= 10
}
c.universe.reify { GuessANumber.from(value.splice).get }
}
}
import CompileTimeAssertions._
private[scalactic] object PercentMacro {
def apply(c: Context)(value: c.Expr[Int]): c.Expr[Percent] = {
val notValidMsg =
"Percent.apply can only be invoked on Int literals between 0 and 100, "+
"inclusive, like Percent(8)."
val notLiteralMsg =
"Percent.apply can only be invoked on Int literals, like Percent(8)."+
" Please use Percent.from instead."
ensureValidIntLiteral(c)(value, notValidMsg, notLiteralMsg) { i =>
i >= 0 && i <= 100
}
c.universe.reify { Percent.from(value.splice).get }
}
}
private[scalactic] object TLAMacro {
def apply(c: Context)(value: c.Expr[String]): c.Expr[TLA] = {
val notValidMsg =
"TLA.apply can only be invoked on String literals of length 3,"+
"like \"LOL\"."
val notLiteralMsg =
"TLA.apply can only be invoked on String literals, like \"LOL\""+
" Please use TLA.from instead."
ensureValidStringLiteral(c)(value, notValidMsg, notLiteralMsg) { s =>
s.length == 3
}
c.universe.reify { TLA.from(value.splice).get }
}
}
private[scalactic] object DigitMacro {
def apply(c: Context)(value: c.Expr[Char]): c.Expr[Digit] = {
val notValidMsg =
"Digit.apply can only be invoked on Char literals that are digits," +
"like '8'."
val notLiteralMsg =
"Digit.apply can only be invoked on Char literals that are digits, like '8'" +
" Please use Digit.from instead."
ensureValidCharLiteral(c)(value, notValidMsg, notLiteralMsg) { c =>
c >= '0' && c <= '9'
}
c.universe.reify { Digit.from(value.splice).get }
}
}
|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/algebra/MonadSpec.scala
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.algebra
import org.scalacheck.Arbitrary
import org.scalactic.Or.B
import org.scalactic.{Good, Or, UnitSpec}
import org.scalatest.laws.MonadLaws
import scala.language.implicitConversions
class MonadSpec extends UnitSpec {
"List" should "obey the monad laws" in {
class ListMonad extends Monad[List] {
override def flatMap[A, B](ca: List[A])(f: (A) => List[B]): List[B] = ca.flatMap(f)
override def insert[A](a: A): List[A] = List(a)
}
implicit val listMonad = new ListMonad
new MonadLaws[List].assert()
}
"Option" should "obey the monad laws" in {
class OptionMonad extends Monad[Option] {
override def flatMap[A, B](ca: Option[A])(f: (A) => Option[B]): Option[B] = ca.flatMap(f)
override def insert[A](a: A): Option[A] = Option(a)
}
implicit val optionMonad = new OptionMonad
new MonadLaws[Option].assert()
}
"The good nature of Or" should "obey the monad laws" in {
class OrMonad[BAD] extends Monad[Or.B[BAD]#G] {
override def flatMap[A, B](ca: Or.B[BAD]#G[A])(f: (A) => Or.B[BAD]#G[B]): Or.B[BAD]#G[B] =
ca.flatMap(f)
override def insert[A](a: A): B[BAD]#G[A] = Good(a)
}
implicit val orMonad = new OrMonad[Int]
implicit def orArbGood[G, B](implicit arbG: Arbitrary[G]): Arbitrary[G Or B] = Arbitrary(for (g <- Arbitrary.arbitrary[G]) yield Good(g))
new MonadLaws[Or.B[Int]#G].assert()
}
"A Monad" should "offer a flatten method" in {
class ListMonad extends Monad[List] {
override def flatMap[A, B](ca: List[A])(f: (A) => List[B]): List[B] = ca.flatMap(f)
override def insert[A](a: A): List[A] = List(a)
}
implicit val listMonad = new ListMonad
Monad[List].flatten(List(List(1, 2), List(3, 4), List(5, 6))) shouldEqual List(1, 2, 3, 4, 5, 6)
}
"A Monad Adapter" should "offer a flatten method" in {
class ListMonad extends Monad[List] {
override def flatMap[A, B](ca: List[A])(f: (A) => List[B]): List[B] = ca.flatMap(f)
override def insert[A](a: A): List[A] = List(a)
}
implicit val listMonad = new ListMonad
val adapted = new Monad.Adapter[List, List[Int]]((List(List(1, 2), List(3, 4), List(5, 6))))
adapted.flatten shouldEqual List(1, 2, 3, 4, 5, 6)
}
}
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/enablers/Length.scala
|
<filename>scalactic/src/main/scala/org/scalactic/enablers/Length.scala
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.enablers
/**
* Supertrait for <code>Length</code> typeclasses.
*
* <p>
* Trait <code>Length</code> is a typeclass trait for objects that can be queried for length.
* Objects of type T for which an implicit <code>Length[T]</code> is available can be used
* with the <code>should have length</code> syntax.
* In other words, this trait enables you to use the length checking
* syntax with arbitrary objects. As an example, consider
* <code>java.net.DatagramPacket</code>, which has a <code>getLength</code> method. By default, this
* can't be used with ScalaTest's <code>have length</code> syntax.
* </p>
*
* <pre>
* scala> import java.net.DatagramPacket
* import java.net.DatagramPacket
*
* scala> import org.scalatest.Matchers._
* import org.scalatest.Matchers._
*
* scala> val dp = new DatagramPacket(Array(0x0, 0x1, 0x2, 0x3), 4)
* dp: java.net.DatagramPacket = java.net.DatagramPacket@54906181
*
* scala> dp.getLength
* res0: Int = 4
*
* scala> dp should have length 4
* <console>:13: error: could not find implicit value for parameter ev: org.scalatest.matchers.ShouldMatchers.Extent[java.net.DatagramPacket]
* dp should have length 4
* ^
*
* scala> implicit val lengthOfDatagramPacket =
* | new Length[DatagramPacket] {
* | def lengthOf(dp: DatagramPacket): Long = dp.getLength
* | }
* lengthOfDatagramPacket: java.lang.Object with org.scalatest.matchers.ShouldMatchers.Length[java.net.DatagramPacket] = $anon$1@550c6b37
*
* scala> dp should have length 4
*
* scala> dp should have length 3
* org.scalatest.exceptions.TestFailedException: java.net.DatagramPacket@54906181 had length 4, not length 3
* </pre>
*
* @author <NAME>
*/
trait Length[T] {
/**
* Returns the length of the passed object.
*
* @param obj the object whose length to return
* @return the length of the passed object
*/
def lengthOf(obj: T): Long
}
/**
* Companion object for <code>Length</code> that provides implicit implementations for the following types:
*
* <ul>
* <li><code>scala.collection.GenSeq</code></li>
* <li><code>String</code></li>
* <li><code>Array</code></li>
* <li><code>java.util.Collection</code></li>
* <li>arbitary object with a <code>length()</code> method that returns <code>Int</code></li>
* <li>arbitary object with a parameterless <code>length</code> method that returns <code>Int</code></li>
* <li>arbitary object with a <code>getLength()</code> method that returns <code>Int</code></li>
* <li>arbitary object with a parameterless <code>getLength</code> method that returns <code>Int</code></li>
* <li>arbitary object with a <code>length()</code> method that returns <code>Long</code></li>
* <li>arbitary object with a parameterless <code>length</code> method that returns <code>Long</code></li>
* <li>arbitary object with a <code>getLength()</code> method that returns <code>Long</code></li>
* <li>arbitary object with a parameterless <code>getLength</code> method that returns <code>Long</code></li>
* </ul>
*/
object Length {
/**
* Enable <code>Length</code> implementation for <code>java.util.List</code>
*
* @tparam JLIST any subtype of <code>java.util.List</code>
* @return <code>Length[JLIST]</code> that supports <code>java.util.List</code> in <code>have length</code> syntax
*/
implicit def lengthOfJavaList[JLIST <: java.util.List[_]]: Length[JLIST] =
new Length[JLIST] {
def lengthOf(javaList: JLIST): Long = javaList.size
}
/**
* Enable <code>Length</code> implementation for <code>scala.collection.GenSeq</code>
*
* @tparam SEQ any subtype of <code>scala.collection.GenSeq</code>
* @return <code>Length[SEQ]</code> that supports <code>scala.collection.GenSeq</code> in <code>have length</code> syntax
*/
implicit def lengthOfGenSeq[SEQ <: scala.collection.GenSeq[_]]: Length[SEQ] =
new Length[SEQ] {
def lengthOf(seq: SEQ): Long = seq.length
}
/**
* Enable <code>Length</code> implementation for <code>Array</code>
*
* @tparam E the type of the element in the <code>Array</code>
* @return <code>Length[Array[E]]</code> that supports <code>Array</code> in <code>have length</code> syntax
*/
implicit def lengthOfArray[E]: Length[Array[E]] =
new Length[Array[E]] {
def lengthOf(arr: Array[E]): Long = arr.length
}
/**
* Enable <code>Length</code> implementation for <code>String</code>
*
* @return <code>Length[String]</code> that supports <code>String</code> in <code>have length</code> syntax
*/
implicit val lengthOfString: Length[String] =
new Length[String] {
def lengthOf(str: String): Long = str.length
}
import scala.language.reflectiveCalls
/**
* Enable <code>Length</code> implementation for arbitary object with <code>length()</code> method that returns <code>Int</code>.
*
* @tparam T any type with <code>length()</code> method that returns <code>Int</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithLengthMethodForInt[T <: AnyRef { def length(): Int}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.length
}
/**
* Enable <code>Length</code> implementation for arbitary object with parameterless <code>length</code> method that returns <code>Int</code>.
*
* @tparam T any type with parameterless <code>length</code> method that returns <code>Int</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithParameterlessLengthMethodForInt[T <: AnyRef { def length: Int}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.length
}
/**
* Enable <code>Length</code> implementation for arbitary object with <code>getLength()</code> method that returns <code>Int</code>.
*
* @tparam T any type with <code>getLength()</code> method that returns <code>Int</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithGetLengthMethodForInt[T <: AnyRef { def getLength(): Int}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.getLength
}
/**
* Enable <code>Length</code> implementation for arbitary object with parameterless <code>getLength</code> method that returns <code>Int</code>.
*
* @tparam T any type with parameterless <code>getLength</code> method that returns <code>Int</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithParameterlessGetLengthMethodForInt[T <: AnyRef { def getLength: Int}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.getLength
}
/**
* Enable <code>Length</code> implementation for arbitary object with <code>length()</code> method that returns <code>Long</code>.
*
* @tparam T any type with <code>length()</code> method that returns <code>Long</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithLengthMethodForLong[T <: AnyRef { def length(): Long}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.length
}
/**
* Enable <code>Length</code> implementation for arbitary object with parameterless <code>length</code> method that returns <code>Long</code>.
*
* @tparam T any type with parameterless <code>length</code> method that returns <code>Long</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithParameterlessLengthMethodForLong[T <: AnyRef { def length: Long}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.length
}
/**
* Enable <code>Length</code> implementation for arbitary object with <code>getLength()</code> method that returns <code>Long</code>.
*
* @tparam T any type with <code>getLength()</code> method that returns <code>Long</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithGetLengthMethodForLong[T <: AnyRef { def getLength(): Long}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.getLength
}
/**
* Enable <code>Length</code> implementation for arbitary object with parameterless <code>getLength</code> method that returns <code>Long</code>.
*
* @tparam T any type with parameterless <code>getLength</code> method that returns <code>Long</code>
* @return <code>Length[T]</code> that supports <code>T</code> in <code>have length</code> syntax
*/
implicit def lengthOfAnyRefWithParameterlessGetLengthMethodForLong[T <: AnyRef { def getLength: Long}]: Length[T] =
new Length[T] {
def lengthOf(obj: T): Long = obj.getLength
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/AsyncFreeSpecLike.scala
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.concurrent.{ExecutionContext, Future}
@Finders(Array("org.scalatest.finders.FreeSpecFinder"))
trait AsyncFreeSpecLike extends FreeSpecRegistration with AsyncTests with OneInstancePerTest { thisSuite =>
implicit def executionContext: ExecutionContext
override private[scalatest] def transformToOutcome(testFun: => Future[Unit]): () => AsyncOutcome =
() => {
val futureUnit = testFun
FutureOutcome(
futureUnit.map(u => Succeeded).recover {
case ex: exceptions.TestCanceledException => Canceled(ex)
case _: exceptions.TestPendingException => Pending
case tfe: exceptions.TestFailedException => Failed(tfe)
case ex: Throwable if !Suite.anExceptionThatShouldCauseAnAbort(ex) => Failed(ex)
}
)
}
private final val engine: Engine = getEngine
import engine._
protected override def runTest(testName: String, args: Args): Status = {
if (args.runTestInNewInstance) {
// In initial instance, so create a new test-specific instance for this test and invoke run on it.
val oneInstance = newInstance
oneInstance.run(Some(testName), args)
}
else {
// Therefore, in test-specific instance, so run the test.
def invokeWithAsyncFixture(theTest: TestLeaf): AsyncOutcome = {
val theConfigMap = args.configMap
val testData = testDataFor(testName, theConfigMap)
FutureOutcome(
withAsyncFixture(
new NoArgAsyncTest {
val name = testData.name
def apply(): Future[Outcome] = { theTest.testFun().toFutureOutcome }
val configMap = testData.configMap
val scopes = testData.scopes
val text = testData.text
val tags = testData.tags
}
)
)
}
runTestImpl(thisSuite, testName, args, true, invokeWithAsyncFixture)
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/tools/JUnitXmlReporter.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import org.scalatest.events._
import Suite.unparsedXml
import java.io.PrintWriter
import java.text.SimpleDateFormat
import java.util.Enumeration
import java.util.Properties
import java.net.UnknownHostException
import java.net.InetAddress
import scala.collection.mutable.Set
import scala.collection.mutable.ListBuffer
import scala.xml
/**
* A <code>Reporter</code> that writes test status information in XML format
* using the same format as is generated by the xml formatting option of the
* ant <junit> task.
*
* A separate file is written for each test suite, named TEST-[classname].xml,
* to the directory specified.
*
* @exception IOException if unable to open the file for writing
*
* @author <NAME>
*/
private[scalatest] class JUnitXmlReporter(directory: String) extends Reporter {
private val events = Set.empty[Event]
private val propertiesXml = genPropertiesXml
//
// Records events in 'events' set. Generates xml from events upon receipt
// of SuiteCompleted or SuiteAborted events.
//
def apply(event: Event) {
events += event
event match {
case e: SuiteCompleted =>
writeSuiteFile(e, e.suiteId)
case e: SuiteAborted =>
writeSuiteFile(e, e.suiteId)
case _ =>
}
}
//
// Writes the xml file for a single test suite. Removes processed
// events from the events Set as they are used.
//
private def writeSuiteFile(endEvent: Event, suiteId: String) {
require(endEvent.isInstanceOf[SuiteCompleted] ||
endEvent.isInstanceOf[SuiteAborted])
val testsuite = getTestsuite(endEvent, suiteId)
val xmlStr = xmlify(testsuite)
val filespec = directory + "/TEST-" + suiteId + ".xml"
val out = new PrintWriter(filespec, "UTF-8")
out.print(xmlStr)
out.close()
}
//
// Constructs a Testsuite object corresponding to a specified
// SuiteCompleted or SuiteAborted event.
//
// Scans events reported so far and builds the Testsuite from events
// associated with the specified suite. Removes events from
// the class's events Set as they are consumed.
//
// Only looks at events that have the same ordinal prefix as the
// end event being processed (where an event's ordinal prefix is its
// ordinal list with last element removed). Events with the same
// prefix get processed sequentially, so filtering this way eliminates
// events from any nested suites being processed concurrently
// that have not yet completed when the parent's SuiteCompleted or
// SuiteAborted event is processed.
//
private def getTestsuite(endEvent: Event, suiteId: String): Testsuite = {
require(endEvent.isInstanceOf[SuiteCompleted] ||
endEvent.isInstanceOf[SuiteAborted])
val orderedEvents = events.toList.filter { e =>
e match {
case e: TestStarting => e.suiteId == suiteId
case e: TestSucceeded => e.suiteId == suiteId
case e: TestIgnored => e.suiteId == suiteId
case e: TestFailed => e.suiteId == suiteId
case e: TestPending => e.suiteId == suiteId
case e: TestCanceled => e.suiteId == suiteId
case e: InfoProvided =>
e.nameInfo match {
case Some(nameInfo) =>
nameInfo.suiteId == suiteId
case None => false
}
case e: AlertProvided =>
e.nameInfo match {
case Some(nameInfo) =>
nameInfo.suiteId == suiteId
case None => false
}
case e: NoteProvided =>
e.nameInfo match {
case Some(nameInfo) =>
nameInfo.suiteId == suiteId
case None => false
}
case e: MarkupProvided =>
e.nameInfo match {
case Some(nameInfo) =>
nameInfo.suiteId == suiteId
case None => false
}
case e: ScopeOpened => e.nameInfo.suiteId == suiteId
case e: ScopeClosed => e.nameInfo.suiteId == suiteId
case e: SuiteStarting => e.suiteId == suiteId
case e: SuiteAborted => e.suiteId == suiteId
case e: SuiteCompleted => e.suiteId == suiteId
case _ => false
}
}.sortWith((a, b) => a < b).toArray
val (startIndex, endIndex) = locateSuite(orderedEvents, endEvent)
val startEvent = orderedEvents(startIndex).asInstanceOf[SuiteStarting]
events -= startEvent
val name =
startEvent.suiteClassName match {
case Some(className) => className
case None => startEvent.suiteName
}
val testsuite = Testsuite(name, startEvent.timeStamp)
var idx = startIndex + 1
while (idx <= endIndex) {
val event = orderedEvents(idx)
events -= event
event match {
case e: TestStarting =>
val (testEndIndex, testcase) = processTest(orderedEvents, e, idx)
testsuite.testcases += testcase
if (testcase.failure != None) testsuite.failures += 1
idx = testEndIndex + 1
case e: SuiteAborted =>
assert(endIndex == idx)
testsuite.errors += 1
testsuite.time = e.timeStamp - testsuite.timeStamp
idx += 1
case e: SuiteCompleted =>
assert(endIndex == idx)
testsuite.time = e.timeStamp - testsuite.timeStamp
idx += 1
case e: TestIgnored =>
val testcase = Testcase(e.testName, e.suiteClassName, e.timeStamp)
testcase.ignored = true
testsuite.testcases += testcase
idx += 1
case e: InfoProvided => idx += 1
case e: AlertProvided => idx += 1
case e: NoteProvided => idx += 1
case e: MarkupProvided => idx += 1
case e: ScopeOpened => idx += 1
case e: ScopeClosed => idx += 1
case e: ScopePending => idx += 1
case e: TestPending => unexpected(e)
case e: TestCanceled => unexpected(e)
case e: RunStarting => unexpected(e)
case e: RunCompleted => unexpected(e)
case e: RunStopped => unexpected(e)
case e: RunAborted => unexpected(e)
case e: TestSucceeded => unexpected(e)
case e: TestFailed => unexpected(e)
case e: SuiteStarting => unexpected(e)
case e: DiscoveryStarting => unexpected(e)
case e: DiscoveryCompleted => unexpected(e)
}
}
testsuite
}
//
// Finds the indexes for the SuiteStarted and SuiteCompleted or
// SuiteAborted endpoints of a test suite within an ordered array of
// events, given the terminating SuiteCompleted or SuiteAborted event.
//
// Searches sequentially through the array to find the specified
// SuiteCompleted event and its preceding SuiteStarting event.
//
// (The orderedEvents array does not contain any SuiteStarting events
// from nested suites running concurrently because of the ordinal-prefix
// filtering performed in getTestsuite(). It does not contain any from
// nested suites running sequentially because those get removed when they
// are processed upon occurrence of their corresponding SuiteCompleted
// events.)
//
private def locateSuite(orderedEvents: Array[Event],
endEvent: Event):
(Int, Int) = {
require(orderedEvents.size > 0)
require(endEvent.isInstanceOf[SuiteCompleted] ||
endEvent.isInstanceOf[SuiteAborted])
var startIndex = 0
var endIndex = 0
var idx = 0
while ((idx < orderedEvents.size) && (endIndex == 0)) {
val event = orderedEvents(idx)
event match {
case e: SuiteStarting =>
startIndex = idx
case e: SuiteCompleted =>
if (event == endEvent) {
endIndex = idx
assert(
e.suiteName ==
orderedEvents(startIndex).asInstanceOf[SuiteStarting].
suiteName)
}
case e: SuiteAborted =>
if (event == endEvent) {
endIndex = idx
assert(
e.suiteName ==
orderedEvents(startIndex).asInstanceOf[SuiteStarting].
suiteName)
}
case _ =>
}
idx += 1
}
assert(endIndex > 0)
assert(orderedEvents(startIndex).isInstanceOf[SuiteStarting])
(startIndex, endIndex)
}
private def idxAdjustmentForRecordedEvents(recordedEvents: collection.immutable.IndexedSeq[RecordableEvent]) =
recordedEvents.filter(e => e.isInstanceOf[InfoProvided] || e.isInstanceOf[MarkupProvided]).size
//
// Constructs a Testcase object from events in orderedEvents array.
//
// Accepts a TestStarting event and its index within orderedEvents.
// Returns a Testcase object plus the index to its corresponding
// test completion event. Removes events from class's events Set
// as they are processed.
//
private def processTest(orderedEvents: Array[Event],
startEvent: TestStarting, startIndex: Int):
(Int, Testcase) = {
val testcase = Testcase(startEvent.testName, startEvent.suiteClassName,
startEvent.timeStamp)
var endIndex = 0
var idx = startIndex + 1
while ((idx < orderedEvents.size) && (endIndex == 0)) {
val event = orderedEvents(idx)
events -= event
event match {
case e: TestSucceeded =>
endIndex = idx
testcase.time = e.timeStamp - testcase.timeStamp
idx += idxAdjustmentForRecordedEvents(e.recordedEvents)
case e: TestFailed =>
endIndex = idx
testcase.failure = Some(e)
testcase.time = e.timeStamp - testcase.timeStamp
idx += idxAdjustmentForRecordedEvents(e.recordedEvents)
case e: TestPending =>
endIndex = idx
testcase.pending = true
idx += idxAdjustmentForRecordedEvents(e.recordedEvents)
case e: TestCanceled =>
endIndex = idx
testcase.canceled = true
idx += idxAdjustmentForRecordedEvents(e.recordedEvents)
case e: ScopeOpened => idx += 1
case e: ScopeClosed => idx += 1
case e: ScopePending => idx += 1
case e: InfoProvided => idx += 1
case e: MarkupProvided => idx += 1
case e: AlertProvided => idx += 1
case e: NoteProvided => idx += 1
case e: SuiteCompleted => unexpected(e)
case e: TestStarting => unexpected(e)
case e: TestIgnored => unexpected(e)
case e: SuiteStarting => unexpected(e)
case e: RunStarting => unexpected(e)
case e: RunCompleted => unexpected(e)
case e: RunStopped => unexpected(e)
case e: RunAborted => unexpected(e)
case e: SuiteAborted => unexpected(e)
case e: DiscoveryStarting => unexpected(e)
case e: DiscoveryCompleted => unexpected(e)
}
}
(endIndex, testcase)
}
//
// Creates an xml string describing a run of a test suite.
//
def xmlify(testsuite: Testsuite): String = {
val xmlVal =
<testsuite
errors = { "" + testsuite.errors }
failures = { "" + testsuite.failures }
hostname = { "" + hostname }
name = { "" + testsuite.name }
tests = { "" + testsuite.testcases.size }
time = { "" + testsuite.time / 1000.0 }
timestamp = { "" + formatTimeStamp(testsuite.timeStamp) }>
{ propertiesXml }
{
for (testcase <- testsuite.testcases) yield {
<testcase
name = { "" + testcase.name }
classname = { "" + strVal(testcase.className) }
time = { "" + testcase.time / 1000.0 }
>
{
if (testcase.ignored || testcase.pending || testcase.canceled)
<skipped/>
else
failureXml(testcase.failure)
}
</testcase>
}
}
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
val prettified = (new xml.PrettyPrinter(76, 2)).format(xmlVal)
// scala xml strips out the <![CDATA[]]> elements, so restore them here
val withCDATA =
prettified.
replace("<system-out></system-out>",
"<system-out><![CDATA[]]></system-out>").
replace("<system-err></system-err>",
"<system-err><![CDATA[]]></system-err>")
"<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" + withCDATA
}
//
// Returns string representation of stack trace for specified Throwable,
// including any nested exceptions.
//
def getStackTrace(throwable: Throwable): String = {
"" + throwable +
Array.concat(throwable.getStackTrace).mkString("\n at ",
"\n at ", "\n") +
{
if (throwable.getCause != null) {
" Cause: " +
getStackTrace(throwable.getCause)
}
else ""
}
}
//
// Generates <failure> xml for TestFailed event, if specified Option
// contains one.
//
private def failureXml(failureOption: Option[TestFailed]): xml.NodeSeq = {
failureOption match {
case None =>
xml.NodeSeq.Empty
case Some(failure) =>
val (throwableType, throwableText) =
failure.throwable match {
case None => ("", "")
case Some(throwable) =>
val throwableType = "" + throwable.getClass
val throwableText = getStackTrace(throwable)
(throwableType, throwableText)
}
<failure message = { failure.message.replaceAll("\n", "
") }
type = { throwableType } >
{ throwableText }
</failure>
}
}
//
// Returns toString value of option contents if Some, or empty string if
// None.
//
private def strVal(option: Option[Any]): String = {
option match {
case Some(x) => "" + x
case None => ""
}
}
//
// Determines hostname of local machine.
//
lazy val hostname: String =
try {
val localMachine = InetAddress.getLocalHost();
localMachine.getHostName
} catch {
case e: UnknownHostException => "unknown"
}
//
// Generates <properties> element of xml.
//
private def genPropertiesXml: xml.Elem = {
val sysprops = System.getProperties
<properties> {
for (name <- propertyNames(sysprops))
yield
<property name={ name } value = { sysprops.getProperty(name) }>
</property>
}
</properties>
}
//
// Returns a list of the names of properties in a Properties object.
//
private def propertyNames(props: Properties): List[String] = {
val listBuf = new ListBuffer[String]
val enumeration = props.propertyNames
while (enumeration.hasMoreElements)
listBuf += "" + enumeration.nextElement
listBuf.toList
}
//
// Formats timestamp into a string for display, e.g. "2009-08-31T14:59:37"
//
private def formatTimeStamp(timeStamp: Long): String = {
val dateFmt = new SimpleDateFormat("yyyy-MM-dd")
val timeFmt = new SimpleDateFormat("HH:mm:ss")
dateFmt.format(timeStamp) + "T" + timeFmt.format(timeStamp)
}
//
// Throws an exception if an unexpected Event is encountered.
//
def unexpected(event: Event) {
throw new RuntimeException("unexpected event [" + event + "]")
}
//
// Class to hold information about an execution of a test suite.
//
private[scalatest] case class Testsuite(name: String, timeStamp: Long) {
var errors = 0
var failures = 0
var time = 0L
val testcases = new ListBuffer[Testcase]
}
//
// Class to hold information about an execution of a testcase.
//
private[scalatest] case class Testcase(name: String, className: Option[String],
timeStamp: Long) {
var time = 0L
var pending = false
var canceled = false
var ignored = false
var failure: Option[TestFailed] = None
}
}
|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/anyvals/PosZDoubleSpec.scala
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import org.scalatest._
import org.scalatest.prop.NyayaGeneratorDrivenPropertyChecks._
import japgolly.nyaya.test.Gen
// SKIP-SCALATESTJS-START
import scala.collection.immutable.NumericRange
// SKIP-SCALATESTJS-END
import scala.collection.mutable.WrappedArray
import OptionValues._
class PosZDoubleSpec extends FunSpec with Matchers {
implicit val posZDoubleGen: Gen[PosZDouble] =
for {i <- Gen.choosedouble(1, Double.MaxValue)} yield PosZDouble.from(i).get
implicit val intGen: Gen[Int] = Gen.int
implicit val longGen: Gen[Long] = Gen.long
implicit val shortGen: Gen[Short] = Gen.short
implicit val charGen: Gen[Char] = Gen.char
implicit val floatGen: Gen[Float] = Gen.float
implicit val doubleGen: Gen[Double] = Gen.double
implicit val byteGen: Gen[Byte] = Gen.byte
describe("A PosZDouble") {
describe("should offer a from factory method that") {
it("returns Some[PosZDouble] if the passed Double is greater than or equal to 0") {
PosZDouble.from(0.0).value.value shouldBe 0.0
PosZDouble.from(50.23).value.value shouldBe 50.23
PosZDouble.from(100.0).value.value shouldBe 100.0
}
it("returns None if the passed Double is NOT greater than or equal to 0") {
PosZDouble.from(-0.00001) shouldBe None
PosZDouble.from(-99.9) shouldBe None
}
}
it("should have a pretty toString") {
// SKIP-SCALATESTJS-START
PosZDouble.from(42.0).value.toString shouldBe "PosZDouble(42.0)"
// SKIP-SCALATESTJS-END
//SCALATESTJS-ONLY PosZDouble.from(42.0).value.toString shouldBe "PosZDouble(42)"
}
it("should return the same type from its unary_+ method") {
+PosZDouble(3.0) shouldEqual PosZDouble(3.0)
}
it("should be automatically widened to compatible AnyVal targets") {
"PosZDouble(3.0): Int" shouldNot typeCheck
"PosZDouble(3.0): Long" shouldNot typeCheck
"PosZDouble(3.0): Float" shouldNot typeCheck
(PosZDouble(3.0): Double) shouldEqual 3.0
"PosZDouble(3.0): PosInt" shouldNot typeCheck
"PosZDouble(3.0): PosLong" shouldNot typeCheck
"PosZDouble(3.0): PosFloat" shouldNot typeCheck
"PosZDouble(3.0): PosDouble" shouldNot typeCheck
"PosZDouble(3.0): PosZInt" shouldNot typeCheck
"PosZDouble(3.0): PosZLong" shouldNot typeCheck
"PosZDouble(3.0): PosZFloat" shouldNot typeCheck
(PosZDouble(3.0): PosZDouble) shouldEqual PosZDouble(3.0)
}
describe("when a compatible AnyVal is passed to a + method invoked on it") {
it("should give the same AnyVal type back at compile time, and correct value at runtime") {
// When adding a "primitive"
val opInt = PosZDouble(3.0) + 3
opInt shouldEqual 6.0
val opLong = PosZDouble(3.0) + 3L
opLong shouldEqual 6.0
val opFloat = PosZDouble(3.0) + 3.0F
opFloat shouldEqual 6.0
val opDouble = PosZDouble(3.0) + 3.0
opDouble shouldEqual 6.0
// When adding a Pos*
val opPosInt = PosZDouble(3.0) + PosInt(3)
opPosInt shouldEqual 6.0
val opPosLong = PosZDouble(3.0) + PosLong(3L)
opPosLong shouldEqual 6.0
val opPosFloat = PosZDouble(3.0) + PosFloat(3.0F)
opPosFloat shouldEqual 6.0
val opPosDouble = PosZDouble(3.0) + PosDouble(3.0)
opPosDouble shouldEqual 6.0
// When adding a *PosZ
val opPosZ = PosZDouble(3.0) + PosZInt(3)
opPosZ shouldEqual 6.0
val opPosZLong = PosZDouble(3.0) + PosZLong(3L)
opPosZLong shouldEqual 6.0
val opPosZFloat = PosZDouble(3.0) + PosZFloat(3.0F)
opPosZFloat shouldEqual 6.0
val opPosZDouble = PosZDouble(3.0) + PosZDouble(3.0)
opPosZDouble shouldEqual 6.0
}
}
describe("when created with apply method") {
it("should compile when 8 is passed in") {
"PosZDouble(8)" should compile
PosZDouble(8).value shouldEqual 8.0
"PosZDouble(8L)" should compile
PosZDouble(8L).value shouldEqual 8.0
"PosZDouble(8.0F)" should compile
PosZDouble(8.0F).value shouldEqual 8.0
"PosZDouble(8.0)" should compile
PosZDouble(8.0).value shouldEqual 8.0
}
it("should compile when 0 is passed in") {
"PosZDouble(0)" should compile
PosZDouble(0).value shouldEqual 0.0
"PosZDouble(0L)" should compile
PosZDouble(0L).value shouldEqual 0.0
"PosZDouble(0.0F)" should compile
PosZDouble(0.0F).value shouldEqual 0.0
"PosZDouble(0.0)" should compile
PosZDouble(0.0).value shouldEqual 0.0
}
it("should not compile when -8 is passed in") {
"PosZDouble(-8)" shouldNot compile
"PosZDouble(-8L)" shouldNot compile
"PosZDouble(-8.0F)" shouldNot compile
"PosZDouble(-8.0)" shouldNot compile
}
it("should not compile when x is passed in") {
val a: Int = -8
"PosZDouble(a)" shouldNot compile
val b: Long = -8L
"PosZDouble(b)" shouldNot compile
val c: Float = -8.0F
"PosZDouble(c)" shouldNot compile
val d: Double = -8.0
"PosZDouble(d)" shouldNot compile
}
}
describe("when specified as a plain-old Double") {
def takesPosZDouble(poz: PosZDouble): Double = poz.value
it("should compile when 8 is passed in") {
"takesPosZDouble(8)" should compile
takesPosZDouble(8) shouldEqual 8.0
"takesPosZDouble(8L)" should compile
takesPosZDouble(8L) shouldEqual 8.0
"takesPosZDouble(8.0F)" should compile
takesPosZDouble(8.0F) shouldEqual 8.0
"takesPosZDouble(8.0)" should compile
takesPosZDouble(8.0) shouldEqual 8.0
}
it("should compile when 0 is passed in") {
"takesPosZDouble(0)" should compile
takesPosZDouble(0) shouldEqual 0.0
"takesPosZDouble(0L)" should compile
takesPosZDouble(0L) shouldEqual 0.0
"takesPosZDouble(0.0F)" should compile
takesPosZDouble(0.0F) shouldEqual 0.0
"takesPosZDouble(0.0)" should compile
takesPosZDouble(0.0) shouldEqual 0.0
}
it("should not compile when -8 is passed in") {
"takesPosZDouble(-8)" shouldNot compile
"takesPosZDouble(-8L)" shouldNot compile
"takesPosZDouble(-8.0F)" shouldNot compile
"takesPosZDouble(-8.0)" shouldNot compile
}
it("should not compile when x is passed in") {
val x: Int = -8
"takesPosZDouble(x)" shouldNot compile
val b: Long = -8L
"takesPosZDouble(b)" shouldNot compile
val c: Float = -8.0F
"takesPosZDouble(c)" shouldNot compile
val d: Double = -8.0
"takesPosZDouble(d)" shouldNot compile
}
}
it("should offer a unary + method that is consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
(+pzdouble).toDouble shouldEqual (+(pzdouble.toDouble))
}
}
it("should offer a unary - method that is consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
(-pzdouble) shouldEqual (-(pzdouble.toDouble))
}
}
it("should offer '<' comparison that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble < byte) shouldEqual (pzdouble.toDouble < byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble < short) shouldEqual (pzdouble.toDouble < short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble < char) shouldEqual (pzdouble.toDouble < char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble < int) shouldEqual (pzdouble.toDouble < int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble < long) shouldEqual (pzdouble.toDouble < long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble < float) shouldEqual (pzdouble.toDouble < float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble < double) shouldEqual (pzdouble.toDouble < double)
}
}
it("should offer '<=' comparison that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble <= byte) shouldEqual (pzdouble.toDouble <= byte)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble <= char) shouldEqual (pzdouble.toDouble <= char)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble <= short) shouldEqual (pzdouble.toDouble <= short)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble <= int) shouldEqual (pzdouble.toDouble <= int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble <= long) shouldEqual (pzdouble.toDouble <= long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble <= float) shouldEqual (pzdouble.toDouble <= float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble <= double) shouldEqual (pzdouble.toDouble <= double)
}
}
it("should offer '>' comparison that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble > byte) shouldEqual (pzdouble.toDouble > byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble > short) shouldEqual (pzdouble.toDouble > short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble > char) shouldEqual (pzdouble.toDouble > char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble > int) shouldEqual (pzdouble.toDouble > int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble > long) shouldEqual (pzdouble.toDouble > long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble > float) shouldEqual (pzdouble.toDouble > float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble > double) shouldEqual (pzdouble.toDouble > double)
}
}
it("should offer '>=' comparison that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble >= byte) shouldEqual (pzdouble.toDouble >= byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble >= short) shouldEqual (pzdouble.toDouble >= short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble >= char) shouldEqual (pzdouble.toDouble >= char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble >= int) shouldEqual (pzdouble.toDouble >= int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble >= long) shouldEqual (pzdouble.toDouble >= long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble >= float) shouldEqual (pzdouble.toDouble >= float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble >= double) shouldEqual (pzdouble.toDouble >= double)
}
}
it("should offer a '+' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble + byte) shouldEqual (pzdouble.toDouble + byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble + short) shouldEqual (pzdouble.toDouble + short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble + char) shouldEqual (pzdouble.toDouble + char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble + int) shouldEqual (pzdouble.toDouble + int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble + long) shouldEqual (pzdouble.toDouble + long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble + float) shouldEqual (pzdouble.toDouble + float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble + double) shouldEqual (pzdouble.toDouble + double)
}
}
it("should offer a '-' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble - byte) shouldEqual (pzdouble.toDouble - byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble - short) shouldEqual (pzdouble.toDouble - short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble - char) shouldEqual (pzdouble.toDouble - char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble - int) shouldEqual (pzdouble.toDouble - int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble - long) shouldEqual (pzdouble.toDouble - long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble - float) shouldEqual (pzdouble.toDouble - float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble - double) shouldEqual (pzdouble.toDouble - double)
}
}
it("should offer a '*' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
(pzdouble * byte) shouldEqual (pzdouble.toDouble * byte)
}
forAll { (pzdouble: PosZDouble, short: Short) =>
(pzdouble * short) shouldEqual (pzdouble.toDouble * short)
}
forAll { (pzdouble: PosZDouble, char: Char) =>
(pzdouble * char) shouldEqual (pzdouble.toDouble * char)
}
forAll { (pzdouble: PosZDouble, int: Int) =>
(pzdouble * int) shouldEqual (pzdouble.toDouble * int)
}
forAll { (pzdouble: PosZDouble, long: Long) =>
(pzdouble * long) shouldEqual (pzdouble.toDouble * long)
}
forAll { (pzdouble: PosZDouble, float: Float) =>
(pzdouble * float) shouldEqual (pzdouble.toDouble * float)
}
forAll { (pzdouble: PosZDouble, double: Double) =>
(pzdouble * double) shouldEqual (pzdouble.toDouble * double)
}
}
it("should offer a '/' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
pzdouble / byte shouldEqual pzdouble.toDouble / byte
}
forAll { (pzdouble: PosZDouble, short: Short) =>
pzdouble / short shouldEqual pzdouble.toDouble / short
}
forAll { (pzdouble: PosZDouble, char: Char) =>
pzdouble / char shouldEqual pzdouble.toDouble / char
}
forAll { (pzdouble: PosZDouble, int: Int) =>
pzdouble / int shouldEqual pzdouble.toDouble / int
}
forAll { (pzdouble: PosZDouble, long: Long) =>
pzdouble / long shouldEqual pzdouble.toDouble / long
}
forAll { (pzdouble: PosZDouble, float: Float) =>
pzdouble / float shouldEqual pzdouble.toDouble / float
}
forAll { (pzdouble: PosZDouble, double: Double) =>
pzdouble / double shouldEqual pzdouble.toDouble / double
}
}
// note: since a PosInt % 0 is NaN (as opposed to PosInt / 0, which is Infinity)
// extra logic is needed to convert to a comparable type (boolean, in this case)
it("should offer a '%' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble, byte: Byte) =>
val res = pzdouble % byte
if (res.isNaN)
(pzdouble.toDouble % byte).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % byte
}
forAll { (pzdouble: PosZDouble, short: Short) =>
val res = pzdouble % short
if (res.isNaN)
(pzdouble.toDouble % short).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % short
}
forAll { (pzdouble: PosZDouble, char: Char) =>
val res = pzdouble % char
if (res.isNaN)
(pzdouble.toDouble % char).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % char
}
forAll { (pzdouble: PosZDouble, int: Int) =>
val res = pzdouble % int
if (res.isNaN)
(pzdouble.toDouble % int).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % int
}
forAll { (pzdouble: PosZDouble, long: Long) =>
val res = pzdouble % long
if (res.isNaN)
(pzdouble.toDouble % long).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % long
}
forAll { (pzdouble: PosZDouble, float: Float) =>
val res = pzdouble % float
if (res.isNaN)
(pzdouble.toDouble % float).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % float
}
forAll { (pzdouble: PosZDouble, double: Double) =>
val res = pzdouble % double
if (res.isNaN)
(pzdouble.toDouble % double).isNaN shouldBe true
else
res shouldEqual pzdouble.toDouble % double
}
}
it("should offer 'min' and 'max' methods that are consistent with Double") {
forAll { (pzdouble1: PosZDouble, pzdouble2: PosZDouble) =>
pzdouble1.max(pzdouble2).toDouble shouldEqual pzdouble1.toDouble.max(pzdouble2.toDouble)
pzdouble1.min(pzdouble2).toDouble shouldEqual pzdouble1.toDouble.min(pzdouble2.toDouble)
}
}
it("should offer an 'isWhole' method that is consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
pzdouble.isWhole shouldEqual pzdouble.toDouble.isWhole
}
}
it("should offer 'round', 'ceil', and 'floor' methods that are consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
pzdouble.round.toDouble shouldEqual pzdouble.toDouble.round
pzdouble.ceil.toDouble shouldEqual pzdouble.toDouble.ceil
pzdouble.floor.toDouble shouldEqual pzdouble.toDouble.floor
}
}
it("should offer 'toRadians' and 'toDegrees' methods that are consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
pzdouble.toRadians shouldEqual pzdouble.toDouble.toRadians
}
}
// SKIP-SCALATESTJS-START
it("should offer 'to' and 'until' method that is consistent with Double") {
def rangeEqual[T](a: NumericRange[T], b: NumericRange[T]): Boolean =
a.start == b.start && a.end == b.end && a.step == b.step
forAll { (pzdouble: PosZDouble, end: Double, step: Double) =>
rangeEqual(pzdouble.until(end).by(1f), pzdouble.toDouble.until(end).by(1f)) shouldBe true
rangeEqual(pzdouble.until(end, step), pzdouble.toDouble.until(end, step)) shouldBe true
rangeEqual(pzdouble.to(end).by(1f), pzdouble.toDouble.to(end).by(1f)) shouldBe true
rangeEqual(pzdouble.to(end, step), pzdouble.toDouble.to(end, step)) shouldBe true
}
}
// SKIP-SCALATESTJS-END
it("should offer widening methods for basic types that are consistent with Double") {
forAll { (pzdouble: PosZDouble) =>
def widen(value: Double): Double = value
widen(pzdouble) shouldEqual widen(pzdouble.toDouble)
}
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/exceptions/StackDepthException.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.exceptions
/**
* Exception class that encapsulates information about the stack depth at which the line of code that failed resides,
* so that information can be presented to the user that makes it quick to find the failing line of code. (In other
* words, the user need not scan through the stack trace to find the correct filename and line number of the problem code.)
* Having a stack depth is more useful in a testing environment in which test failures are implemented as
* thrown exceptions, as is the case in ScalaTest's built-in suite traits.
*
* @param messageFun a function that produces an optional detail message for this <code>StackDepthException</code>.
* @param cause an optional cause, the <code>Throwable</code> that caused this <code>StackDepthException</code> to be thrown.
* @param failedCodeStackDepthFun a function that produces the depth in the stack trace of this exception at which the line of test code that failed resides.
*
* @throws NullPointerException if either <code>messageFun</code>, <code>cause</code> or <code>failedCodeStackDepthFun</code> is <code>null</code>, or <code>Some(null)</code>.
*
* @author <NAME>
*/
abstract class StackDepthException(
val messageFun: StackDepthException => Option[String],
val cause: Option[Throwable],
val failedCodeStackDepthFun: StackDepthException => Int
) extends RuntimeException(cause.orNull) with StackDepth {
if (messageFun == null) throw new NullPointerException("messageFun was null")
if (cause == null) throw new NullPointerException("cause was null")
cause match {
case Some(null) => throw new NullPointerException("cause was a Some(null)")
case _ =>
}
if (failedCodeStackDepthFun == null) throw new NullPointerException("failedCodeStackDepthFun was null")
/**
* Constructs a <code>StackDepthException</code> with an optional pre-determined <code>message</code>, optional cause, and
* a <code>failedCodeStackDepth</code> function.
*
* @param message an optional detail message for this <code>StackDepthException</code>.
* @param cause an optional cause, the <code>Throwable</code> that caused this <code>StackDepthException</code> to be thrown.
* @param failedCodeStackDepthFun a function that return the depth in the stack trace of this exception at which the line of test code that failed resides.
*
* @throws NullPointerException if either <code>message</code> or <code>cause</code> is <code>null</code> or <code>Some(null)</code>, or <code>failedCodeStackDepthFun</code> is <code>null</code>.
*/
def this(message: Option[String], cause: Option[Throwable], failedCodeStackDepthFun: StackDepthException => Int) =
this(
message match {
case null => throw new NullPointerException("message was null")
case Some(null) => throw new NullPointerException("message was a Some(null)")
case _ => (e: StackDepthException) => message
},
cause,
failedCodeStackDepthFun
)
/**
* Constructs a <code>StackDepthException</code> with an optional pre-determined <code>message</code>,
* optional <code>cause</code>, and and <code>failedCodeStackDepth</code>. (This was
* the primary constructor form prior to ScalaTest 1.5.)
*
* @param message an optional detail message for this <code>StackDepthException</code>.
* @param cause an optional cause, the <code>Throwable</code> that caused this <code>StackDepthException</code> to be thrown.
* @param failedCodeStackDepth the depth in the stack trace of this exception at which the line of test code that failed resides.
*
* @throws NullPointerException if either <code>message</code> of <code>cause</code> is <code>null</code>, or <code>Some(null)</code>.
*/
def this(message: Option[String], cause: Option[Throwable], failedCodeStackDepth: Int) =
this(
message match {
case null => throw new NullPointerException("message was null")
case Some(null) => throw new NullPointerException("message was a Some(null)")
case _ => (e: StackDepthException) => message
},
cause,
(e: StackDepthException) => failedCodeStackDepth
)
/**
* An optional detail message for this <code>StackDepth</code> exception.
*
* <p>
* One reason this is lazy is to delay any searching of the stack trace until it is actually needed. It will
* usually be needed, but not always. For example, exceptions thrown during a shrink phase of a failed property
* will often be <code>StackDepthException</code>s, but whose <code>message</code> will never be used. Another related reason is to remove the need
* to create a different exception before creating this one just for the purpose of searching through its stack
* trace for the proper stack depth. Still one more reason is to allow the message to contain information about the
* stack depth, such as the failed file name and line number.
* </p>
*/
lazy val message: Option[String] = messageFun(this)
/**
* The depth in the stack trace of this exception at which the line of test code that failed resides.
*
* <p>
* One reason this is lazy is to delay any searching of the stack trace until it is actually needed. It will
* usually be needed, but not always. For example, exceptions thrown during a shrink phase of a failed property
* will often be <code>StackDepthException</code>s, but whose <code>failedCodeStackDepth</code> will never be used. Another reason is to remove the need
* to create a different exception before creating this one just for the purpose of searching through its stack
* trace for the proper stack depth. Still one more reason is to allow the message to contain information about the
* stack depth, such as the failed file name and line number.
* </p>
*/
lazy val failedCodeStackDepth: Int = failedCodeStackDepthFun(this)
/**
* Returns the detail message string of this <code>StackDepthException</code>.
*
* @return the detail message string of this <code>StackDepthException</code> instance (which may be <code>null</code>).
*/
override def getMessage: String = message.orNull
/*
* Throws <code>IllegalStateException</code>, because <code>StackDepthException</code>s are
* always initialized with a cause passed to the constructor of superclass <code>
*/
override final def initCause(throwable: Throwable): Throwable = { throw new IllegalStateException }
/**
* Indicates whether this object can be equal to the passed object.
*/
def canEqual(other: Any): Boolean = other.isInstanceOf[StackDepthException]
/**
* Indicates whether this object is equal to the passed object. If the passed object is
* a <code>StackDepthException</code>, equality requires equal <code>message</code>,
* <code>cause</code>, and <code>failedCodeStackDepth</code> fields, as well as equal
* return values of <code>getStackTrace</code>.
*/
override def equals(other: Any): Boolean =
other match {
case that: StackDepthException =>
(that canEqual this) &&
message == that.message &&
cause == that.cause &&
failedCodeStackDepth == that.failedCodeStackDepth &&
getStackTrace.deep == that.getStackTrace.deep
case _ => false
}
/**
* Returns a hash code value for this object.
*/
override def hashCode: Int =
41 * (
41 * (
41 * (
41 + message.hashCode
) + cause.hashCode
) + failedCodeStackDepth.hashCode
) + getStackTrace.hashCode
}
private[scalatest] object StackDepthException {
/**
* If message or message contents are null, throw a null exception, otherwise
* create a function that returns the option.
*/
def toExceptionFunction(message: Option[String]): StackDepthException => Option[String] = {
message match {
case null => throw new NullPointerException("message was null")
case Some(null) => throw new NullPointerException("message was a Some(null)")
case _ => { e => message }
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/exceptions/TableDrivenPropertyCheckFailedException.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
package exceptions
/**
* Exception that indicates a table-driven property check failed.
*
* <p>
* For an introduction to using tables, see the documentation for trait
* <a href="../prop/TableDrivenPropertyChecks.html">TableDrivenPropertyChecks</a>.
* </p>
*
* @param messageFun a function that returns a detail message, not optional) for this <code>TableDrivenPropertyCheckFailedException</code>.
* @param cause an optional cause, the <code>Throwable</code> that caused this <code>TableDrivenPropertyCheckFailedException</code> to be thrown.
* @param failedCodeStackDepthFun a function that returns the depth in the stack trace of this exception at which the line of test code that failed resides.
* @param payload an optional payload, which ScalaTest will include in a resulting <code>TestFailed</code> event
* @param undecoratedMessage just a short message that has no redundancy with args, labels, etc. The regular "message" has everything in it
* @param args the argument values
* @param namesOfArgs a list of string names for the arguments
* @param row the index of the table row that failed the property check, causing this exception to be thrown
*
* @throws NullPointerException if any parameter is <code>null</code> or <code>Some(null)</code>.
*
* @author <NAME>
*/
class TableDrivenPropertyCheckFailedException(
messageFun: StackDepthException => String,
cause: Option[Throwable],
failedCodeStackDepthFun: StackDepthException => Int,
payload: Option[Any],
undecoratedMessage: String,
args: List[Any],
namesOfArgs: List[String],
val row: Int
) extends PropertyCheckFailedException(
messageFun, cause, failedCodeStackDepthFun, payload, undecoratedMessage, args, Some(namesOfArgs)
) {
/**
* Returns an instance of this exception's class, identical to this exception,
* except with the detail message option string replaced with the result of passing
* the current detail message to the passed function, <code>fun</code>.
*
* @param fun A function that, given the current optional detail message, will produce
* the modified optional detail message for the result instance of <code>TestFailedDueToTimeoutException</code>.
*/
override def modifyMessage(fun: Option[String] => Option[String]): TableDrivenPropertyCheckFailedException = {
val mod =
new TableDrivenPropertyCheckFailedException(
sde => fun(message).getOrElse(messageFun(this)),
cause,
failedCodeStackDepthFun,
payload,
undecoratedMessage,
args,
namesOfArgs,
row
)
mod.setStackTrace(getStackTrace)
mod
}
/**
* Returns an instance of this exception's class, identical to this exception,
* except with the payload option replaced with the result of passing
* the current payload option to the passed function, <code>fun</code>.
*
* @param fun A function that, given the current optional payload, will produce
* the modified optional payload for the result instance of <code>TableDrivenPropertyCheckFailedException</code>.
*/
override def modifyPayload(fun: Option[Any] => Option[Any]): TableDrivenPropertyCheckFailedException = {
val currentPayload: Option[Any] = payload
val mod =
new TableDrivenPropertyCheckFailedException(
messageFun,
cause,
failedCodeStackDepthFun,
fun(currentPayload),
undecoratedMessage,
args,
namesOfArgs,
row
)
mod.setStackTrace(getStackTrace)
mod
}
}
|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/algebra/FunctorSpec.scala
|
<reponame>cquiroz/scalatest<gh_stars>1-10
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.algebra
import org.scalacheck.Arbitrary
import org.scalactic._
import org.scalatest.laws._
import scala.language.implicitConversions
class FunctorSpec extends UnitSpec {
class ListFunctor extends Functor[List] {
override def map[A, B](ca: List[A])(f: (A) => B): List[B] = ca.map(f)
}
class OptionFunctor extends Functor[Option] {
override def map[A, B](ca: Option[A])(f: (A) => B): Option[B] = ca.map(f)
}
"Option" should "obey the functor laws via its map method" in {
implicit val optionIntFunctor = new OptionFunctor
implicit def arbOptionSome[G](implicit arbG: Arbitrary[G]): Arbitrary[Option[G]] =
Arbitrary(for (g <- Arbitrary.arbitrary[G]) yield Some(g))
new FunctorLaws[Option]().assert()
}
"List" should "obey the functor laws via its map method" in {
implicit val listIntFunctor = new ListFunctor
new FunctorLaws[List]().assert()
}
"Or" should "obey the functor laws (for its 'good' type) via its map method" in {
class OrFunctor[BAD] extends Functor[Or.B[BAD]#G] {
override def map[G, H](ca: G Or BAD)(f: G => H): H Or BAD = ca.map(f)
}
implicit val orFunctor = new OrFunctor[Int]
implicit def orArbGood[G, B](implicit arbG: Arbitrary[G]): Arbitrary[G Or B] =
Arbitrary(for (g <- Arbitrary.arbitrary[G]) yield Good(g))
new FunctorLaws[Or.B[Int]#G]().assert()
}
"Or" should "obey the functor laws (for its 'bad' type) via its badMap method" in {
class BadOrFunctor[GOOD] extends Functor[Or.G[GOOD]#B] {
override def map[B, C](ca: GOOD Or B)(f: B => C): GOOD Or C = ca.badMap(f)
}
implicit val badOrFunctor = new BadOrFunctor[Int]
implicit def orArbBad[G, B](implicit arbG: Arbitrary[B]): Arbitrary[G Or B] =
Arbitrary(for (b <- Arbitrary.arbitrary[B]) yield Bad(b))
new FunctorLaws[Or.G[Int]#B]().assert()
}
}
|
cquiroz/scalatest
|
examples/src/main/scala/org/scalatest/examples/suite/nested/ASCIISuite.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.suite.nested
import org.scalatest._
class ASuite extends Suite {
def `test: A should have ASCII value 41 hex` {
assert('A' === 0x41)
}
def `test: a should have ASCII value 61 hex` {
assert('a' === 0x61)
}
}
class BSuite extends Suite {
def `test: B should have ASCII value 42 hex` {
assert('B' === 0x42)
}
def `test: b should have ASCII value 62 hex` {
assert('b' === 0x62)
}
}
class CSuite extends Suite {
def `test: C should have ASCII value 43 hex` {
assert('C' === 0x43)
}
def `test: c should have ASCII value 63 hex` {
assert('c' === 0x63)
}
}
class ASCIISuite extends Suites(
new ASuite,
new BSuite,
new CSuite
)
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/algebra/Associative.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.algebra
import scala.language.higherKinds
import scala.language.implicitConversions
/**
* Typeclass trait representing a binary operation that obeys the associative law.
*
* <p>
* The associative law states that given values <code>a</code>, <code>b</code>, and <code>c</code>
* of type <code>A</code> (and implicit <code>Associative.adapters</code> imported):
* </p>
*
* <pre>
* ((a op b) op c) === (a op (b op c))
* </pre>
*
* <p>
* Note: In mathematics, the algebraic structure consisting of a set along with an associative binary operation
* is known as a <em>semigroup</em>.
* </p>
*/
trait Associative[A] {
/**
* A binary operation that obeys the associative law.
*
* See the main documentation for this trait for more detail.
*/
def op(a1: A, a2: A): A
}
/**
* Companion object for <code>Associative</code>.
* an implicit conversion method from <code>A</code> to <code>Associative.Adapter[A]</code>
*/
object Associative {
/**
* Adapter class for <a href="Associative.html"><code>Associative</code></a>
* that wraps a value of type <code>A</code> given an
* implicit <code>Associative[A]</code>.
*
* @param underlying The value of type <code>A</code> to wrap.
* @param associative The captured <code>Associative[A]</code> whose behavior
* is used to implement this class's methods.
*/
class Adapter[A](val underlying: A)(implicit val associative: Associative[A]) {
/**
* A binary operation that obeys the associative law.
*
* See the main documentation for trait <a href="Associative.html"><code>Associative</code></a> for more detail.
*/
def op(a2: A): A = associative.op(underlying, a2)
}
/**
* Implicitly wraps an object in an <code>Associative.Adapter[A]</code>
* so long as an implicit <code>Associative[A]</code> is available.
*/
implicit def adapters[A](a: A)(implicit ev: Associative[A]): Associative.Adapter[A] = new Adapter(a)(ev)
/**
* Summons an implicitly available <code>Associative[A]</code>.
*
* <p>
* This method allows you to write expressions like <code>Associative[String]</code> instead of
* <code>implicitly[Associative[String]]</code>.
* </p>
*/
def apply[A](implicit ev: Associative[A]): Associative[A] = ev
}
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/enablers/Existence.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.enablers
/**
* Supertrait for typeclasses that enable the <code>exist</code> matcher syntax.
*
* <p>
* An <code>Existence[S]</code> provides access to the "existence nature" of type <code>S</code> in such
* a way that <code>exist</code> matcher syntax can be used with type <code>S</code>. A <code>S</code>
* can be any type for which the concept of existence makes sense, such as <code>java.io.File</code>. ScalaTest provides
* implicit implementations for <code>java.io.File</code>. You can enable the <code>exist</code> matcher syntax on your own
* type <code>U</code> by defining a <code>Existence[U]</code> for the type and making it available implicitly.
*
* <p>
* ScalaTest provides an implicit <code>Existence</code> instance for <code>java.io.File</code>
* in the <code>Existence</code> companion object.
* </p>
*/
trait Existence[-S] {
/**
* Determines whether the passed thing exists, <em>i.e.</em>, whether the passed <code>java.io.File</code> exists.
*
* @param thing the thing to check for existence
* @return <code>true</code> if passed thing exists, <code>false</code> otherwise
*/
def exists(thing: S): Boolean
}
/**
* Companion object for <code>Existence</code> that provides implicit implementations for <code>java.io.File</code>.
*/
object Existence {
/**
* Enable <code>Existence</code> implementation for <code>java.io.File</code>
*
* @tparam FILE any subtype of <code>java.io.File</code>
* @return <code>Existence[FILE]</code> that supports <code>java.io.File</code> in <code>exist</code> syntax
*/
implicit def existenceOfFile[FILE <: java.io.File]: Existence[FILE] =
new Existence[FILE] {
def exists(file: FILE): Boolean = file.exists
}
}
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/EnabledEqualityBetween.scala
|
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
class EnabledEqualityBetween[A, B]
object EnabledEqualityBetween {
def apply[A, B]: EnabledEqualityBetween[A, B] = new EnabledEqualityBetween[A, B]
// THis is needed to allow under EnabledEquality an implicit like
// enablers.ContainingConstraint.containingNatureOfJavaMap, which needs an implicit
// EqualityConstraint[java.util.Map.Entry[K, V], R]. Well R could be org.scalatest.Entry,
// and that's a subtype. So with this implicit, will always allow under EnabledEquality
// any subtype of java.util.Map.Entry with any other subtype of java.util.Map.Entry.
implicit def enabledEqualityBetweenJavaMapEntries[A, B, ENTRYA[a, b] <: java.util.Map.Entry[a, b], ENTRYB[a, b] <: java.util.Map.Entry[a, b]]: EnabledEqualityBetween[ENTRYA[A, B], ENTRYB[A, B]] = EnabledEqualityBetween[ENTRYA[A, B], ENTRYB[A, B]]
}
|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/LazySeqSpec.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
class LazySeqSpec extends UnitSpec {
"LazySeq" should "offer a size method" in {
LazySeq(1, 2, 3).size shouldBe 3
LazySeq(1, 1, 3, 2).size shouldBe 4
LazySeq(1, 1, 1, 1).size shouldBe 4
}
it should "have a pretty toString" in {
def assertPretty[T](lazySeq: LazySeq[T]) = {
val lss = lazySeq.toString
lss should startWith ("LazySeq(")
lss should endWith (")")
/*
scala> val lss = "LazySeq(1, 2, 3)"
lss: String = LazySeq()
scala> lss.replaceAll(""".*\((.*)\).*""", "$1")
res0: String = 1,2,3
scala> res0.split(',')
res1: Array[String] = Array(1, 2, 3)
scala> val lss = "LazySeq()"
lss: String = LazySeq()
scala> lss.replaceAll(""".*\((.*)\).*""", "$1")
res2: String = ""
scala> res2.split(',')
res3: Array[String] = Array("")
*/
val elemStrings = lss.replaceAll(""".*\((.*)\).*""", "$1")
val elemStrArr = if (elemStrings.size != 0) elemStrings.split(',') else Array.empty[String]
elemStrArr.size should equal (lazySeq.size)
elemStrArr should contain theSameElementsAs lazySeq.toList.map(_.toString)
}
// Test BasicLazySeq
assertPretty(LazySeq(1, 2, 3))
assertPretty(LazySeq(1, 2, 3, 4))
assertPretty(LazySeq(1))
assertPretty(LazySeq())
assertPretty(LazySeq("one", "two", "three", "four", "five"))
// Test FlatMappedLazySeq
val trimmed = SortedEquaPath[String](StringNormalizations.trimmed.toOrderingEquality)
val lazySeq = trimmed.SortedEquaSet("1", "2", "01", "3").toLazy
val flatMapped = lazySeq.flatMap { (digit: String) =>
LazySeq(digit.toInt)
}
assertPretty(flatMapped)
val mapped = flatMapped.map(_ + 1)
assertPretty(mapped)
}
it should "have a zip method" in {
val seq1 = LazySeq(1,2,3)
val seq2 = LazySeq("a", "b", "c")
val zipped = seq1.zip(seq2)
val (b1, b2) = zipped.toList.unzip
b1 shouldBe seq1.toList
b2 shouldBe seq2.toList
}
it should "have a zipAll method" in {
val shortSeq1 = LazySeq(1,2,3)
val longSeq1 = LazySeq(1,2,3,4)
val shortSeq2 = LazySeq("a", "b", "c")
val longSeq2 = LazySeq("a", "b", "c", "d")
def assertSameElements(thisSeq: LazySeq[_], thatSeq: LazySeq[_]): Unit = {
val zipped = thisSeq.zipAll(thatSeq, 4, "d")
val (unzip1, unzip2) = zipped.toList.unzip
unzip1 shouldBe longSeq1.toList
unzip2 shouldBe longSeq2.toList
}
assertSameElements(shortSeq1, longSeq2)
assertSameElements(longSeq1, shortSeq2)
assertSameElements(longSeq1, longSeq2)
}
it should "have a zipWithIndex method" in {
val bag = LazySeq("a", "b", "c")
val zipped = bag.zipWithIndex
val (b1, b2) = zipped.toList.unzip
b1 shouldBe bag.toList
b2 shouldBe List(0, 1, 2)
}
}
|
cquiroz/scalatest
|
scalactic-test/src/test/scala/org/scalactic/ComplexSpec.scala
|
<filename>scalactic-test/src/test/scala/org/scalactic/ComplexSpec.scala
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
class ComplexSpec extends Spec with Matchers {
object `A Complex ` {
def `should equal another Complex if both the real and imaginary parts are equal` {
Complex(2.0, 3.0) should equal (Complex(2.0, 3.0))
Complex(2.0, 3.0) should not equal (Complex(1.0, 3.0))
Complex(2.0, 3.0) should not equal (Complex(2.0, 1.0))
Complex(2.0, 3.0) should not equal (Complex(1.0, 1.0))
}
def `should have implicit conversions from DigitString, Int, and Double` {
def iKnowYouAreButWhatAmI(c: Complex): Complex = c
iKnowYouAreButWhatAmI(Complex(2.0, 3.0)) should equal (Complex(2.0, 3.0))
iKnowYouAreButWhatAmI(3.0) should equal (Complex(3.0, 0.0))
iKnowYouAreButWhatAmI(4) should equal (Complex(4.0, 0.0))
iKnowYouAreButWhatAmI(DigitString("007")) should equal (Complex(7.0, 0.0))
}
}
}
|
cquiroz/scalatest
|
scalactic/src/main/scala/org/scalactic/AsMethods.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
/*
// New policies
// Both sides Some
new UncheckedEquality { Some(42) should not equal Some(Complex(42.0, 0.0)) }
"new CheckedEquality { Some(42) shouldEqual Some(Complex(42.0, 0.0)) }" shouldNot typeCheck
"new EnabledEquality { Some(42) shouldEqual Some(Complex(42.0, 0.0)) }" shouldNot typeCheck
// Both sides Option
new UncheckedEquality { Option(42) should not equal Option(Complex(42.0, 0.0)) }
"new CheckedEquality { Option(42) shouldEqual Option(Complex(42.0, 0.0)) }" shouldNot typeCheck
"new EnabledEquality { Option(42) shouldEqual Option(Complex(42.0, 0.0)) }" shouldNot typeCheck
// Left side Some, right side Option
new UncheckedEquality { Some(42) should not equal Option(Complex(42.0, 0.0)) }
"new CheckedEquality { Some(42) shouldEqual Option(Complex(42.0, 0.0)) }" shouldNot typeCheck
"new EnabledEquality { Some(42) shouldEqual Option(Complex(42.0, 0.0)) }" shouldNot typeCheck
// Left side Option, right side Some
new UncheckedEquality { Option(42) should not equal Some(Complex(42.0, 0.0)) }
"new CheckedEquality { Option(42) shouldEqual Some(Complex(42.0, 0.0)) }" shouldNot typeCheck
"new EnabledEquality { Option(42) shouldEqual Some(Complex(42.0, 0.0)) }" shouldNot typeCheck
import AsMethods._
new CheckedEquality { Option(42.as[Complex]) shouldEqual Some(Complex(42.0, 0.0)) }
"new EnabledEquality { Option(42.as[Complex]) shouldEqual Some(Complex(42.0, 0.0)) }" shouldNot typeCheck
implicit val enableComplexComparisons = EnabledEqualityFor[Complex]
new EnabledEquality { Option(42.as[Complex]) shouldEqual Some(Complex(42.0, 0.0)) }
actually normally you'd do (Given opt, an Option[Int]):
opt.map(_.as[Complex])
Although you can do this:
(42 : Complex)
You can't do this:
opt.map(_: Complex)
Because the compiler gets confused. You could do this:
opt.map(a => a : Complex)
So as just makes that a bit more obvious maybe:
opt.map(_.as[Complex])
Maybe I won't include this as the former isn't too bad.
*/
trait AsMethods {
implicit final class Asifier[T](o: T) {
def as[U](implicit cnv: T => U): U = cnv(o)
}
}
/**
* Companion object for <code>NormMethods</code> enabling its members to be imported as an alternative to mixing them in.
*/
object AsMethods extends AsMethods
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/laws/MonadLaws.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.laws
import org.scalacheck.{Arbitrary, Shrink}
import org.scalactic.Every
import org.scalactic.algebra._
import org.scalatest.Matchers._
import org.scalatest.prop.GeneratorDrivenPropertyChecks._
import Monad.adapters
import scala.language.higherKinds
/**
* Represents the laws that should hold true for an algebraic structure (a Monad) which
* contains a "flatMap" operation and obeys the laws of associativity, right identity,
* and left identity.
*/
class MonadLaws[Context[_]](implicit monad: Monad[Context],
arbCa: Arbitrary[Context[Int]],
shrCa: Shrink[Context[Int]],
arbCab: Arbitrary[Int => Context[String]],
shrCab: Shrink[Int => Context[String]],
arbCbc: Arbitrary[String => Context[Double]],
shrCbc: Shrink[String => Context[Double]]) extends Laws("monad") {
override val laws = Every (
law("associativity") { () =>
forAll { (ca: Context[Int], f: Int => Context[String], g: String => Context[Double]) =>
((ca flatMap f) flatMap g) shouldEqual (ca flatMap (a => f(a) flatMap g))
}
},
law("left identity") { () =>
forAll { (ca: Context[Int]) =>
ca.flatMap(a => monad.insert(a)) shouldEqual ca
}
},
law("right identity") { () =>
forAll { (a: Int, f: Int => Context[String]) =>
(monad.insert(a) flatMap f) shouldEqual f(a)
}
}
)
}
|
cquiroz/scalatest
|
scalatest-test/src/test/scala/org/scalatest/ShouldBeReadableLogicalAndImplicitSpec.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.{thisLineNumber, createTempDirectory}
import enablers.Readability
import Matchers._
import exceptions.TestFailedException
class ShouldBeReadableLogicalAndImplicitSpec extends Spec {
val fileName: String = "ShouldBeReadableLogicalAndImplicitSpec.scala"
def wasEqualTo(left: Any, right: Any): String =
FailureMessages.wasEqualTo(left, right)
def wasNotEqualTo(left: Any, right: Any): String =
FailureMessages.wasNotEqualTo(left, right)
def equaled(left: Any, right: Any): String =
FailureMessages.equaled(left, right)
def didNotEqual(left: Any, right: Any): String =
FailureMessages.didNotEqual(left, right)
def wasNotReadable(left: Any): String =
FailureMessages.wasNotReadable(left)
def wasReadable(left: Any): String =
FailureMessages.wasReadable(left)
def allError(message: String, lineNumber: Int, left: Any): String = {
val messageWithIndex = UnquotedString(" " + FailureMessages.forAssertionsGenTraversableMessageWithStackDepth(0, UnquotedString(message), UnquotedString(fileName + ":" + lineNumber)))
FailureMessages.allShorthandFailed(messageWithIndex, left)
}
trait Thing {
def canRead: Boolean
}
val book = new Thing {
val canRead = true
}
val stone = new Thing {
val canRead = false
}
implicit def readabilityOfThing[T <: Thing]: Readability[T] =
new Readability[T] {
def isReadable(thing: T): Boolean = thing.canRead
}
object `Readability matcher` {
object `when work with 'file should be (readable)'` {
def `should do nothing when file is readable` {
book should (equal (book) and be (readable))
book should (be (readable) and equal (book))
book should (be_== (book) and be (readable))
book should (be (readable) and be_== (book))
}
def `should throw TestFailedException with correct stack depth when file is not readable` {
val caught1 = intercept[TestFailedException] {
stone should (equal (stone) and be (readable))
}
assert(caught1.message === Some(equaled(stone, stone) + ", but " + wasNotReadable(stone)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
stone should (be (readable) and equal (stone))
}
assert(caught2.message === Some(wasNotReadable(stone)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
stone should (be_== (stone) and be (readable))
}
assert(caught3.message === Some(wasEqualTo(stone, stone) + ", but " + wasNotReadable(stone)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
stone should (be (readable) and be_== (stone))
}
assert(caught4.message === Some(wasNotReadable(stone)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file should not be sorted'` {
def `should do nothing when file is not readable` {
stone should (not equal book and not be readable)
stone should (not be readable and not equal book)
stone should (not be_== book and not be readable)
stone should (not be readable and not be_== book)
}
def `should throw TestFailedException with correct stack depth when xs is not sorted` {
val caught1 = intercept[TestFailedException] {
book should (not equal stone and not be readable)
}
assert(caught1.message === Some(didNotEqual(book, stone) + ", but " + wasReadable(book)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
book should (not be readable and not equal stone)
}
assert(caught2.message === Some(wasReadable(book)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
book should (not be_== stone and not be readable)
}
assert(caught3.message === Some(wasNotEqualTo(book, stone) + ", but " + wasReadable(book)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
book should (not be readable and not be_== stone)
}
assert(caught4.message === Some(wasReadable(book)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should be (readable)'` {
def `should do nothing when all(xs) is readable` {
all(List(book)) should (be_== (book) and be (readable))
all(List(book)) should (be (readable) and be_== (book))
all(List(book)) should (equal (book) and be (readable))
all(List(book)) should (be (readable) and equal (book))
}
def `should throw TestFailedException with correct stack depth when all(xs) is not readable` {
val left1 = List(stone)
val caught1 = intercept[TestFailedException] {
all(left1) should (be_== (stone) and be (readable))
}
assert(caught1.message === Some(allError(wasEqualTo(stone, stone) + ", but " + wasNotReadable(stone), thisLineNumber - 2, left1)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(stone)
val caught2 = intercept[TestFailedException] {
all(left2) should (be (readable) and be_== (stone))
}
assert(caught2.message === Some(allError(wasNotReadable(stone), thisLineNumber - 2, left2)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(stone)
val caught3 = intercept[TestFailedException] {
all(left3) should (equal (stone) and be (readable))
}
assert(caught3.message === Some(allError(equaled(stone, stone) + ", but " + wasNotReadable(stone), thisLineNumber - 2, left3)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(stone)
val caught4 = intercept[TestFailedException] {
all(left4) should (be (readable) and equal (stone))
}
assert(caught4.message === Some(allError(wasNotReadable(stone), thisLineNumber - 2, left4)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should not be readable'` {
def `should do nothing when all(xs) is not readable` {
all(List(stone)) should (not be readable and not be_== book)
all(List(stone)) should (not be_== book and not be readable)
all(List(stone)) should (not be readable and not equal book)
all(List(stone)) should (not equal book and not be readable)
}
def `should throw TestFailedException with correct stack depth when all(xs) is readable` {
val left1 = List(book)
val caught1 = intercept[TestFailedException] {
all(left1) should (not be_== stone and not be readable)
}
assert(caught1.message === Some(allError(wasNotEqualTo(book, stone) + ", but " + wasReadable(book), thisLineNumber - 2, left1)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(book)
val caught2 = intercept[TestFailedException] {
all(left2) should (not be readable and not be_== stone)
}
assert(caught2.message === Some(allError(wasReadable(book), thisLineNumber - 2, left2)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(book)
val caught3 = intercept[TestFailedException] {
all(left3) should (not equal stone and not be readable)
}
assert(caught3.message === Some(allError(didNotEqual(book, stone) + ", but " + wasReadable(book), thisLineNumber - 2, left3)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(book)
val caught4 = intercept[TestFailedException] {
all(left4) should (not be readable and not equal stone)
}
assert(caught4.message === Some(allError(wasReadable(book), thisLineNumber - 2, left4)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/JavaClassesWrappers.scala
|
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import collection.JavaConverters._
import scala.collection.GenTraversable
private[scalatest] class ConcurrentLinkedQueue[T] extends Serializable {
private final val queue = new java.util.concurrent.ConcurrentLinkedQueue[T]
def add(ele: T): Unit = {
queue.add(ele)
}
def iterator: Iterator[T] = queue.iterator.asScala
def isEmpty: Boolean = queue.isEmpty
def asScala: GenTraversable[T] = queue.asScala
}
private[scalatest] class CountDownLatch(count: Int) {
@transient private final val latch = new java.util.concurrent.CountDownLatch(count)
def countDown(): Unit = latch.countDown()
def getCount: Long = latch.getCount
def await(): Unit = latch.await()
}
private[scalatest] object NameTransformer {
def decode(encoded: String): String = scala.reflect.NameTransformer.decode(encoded)
}
|
cquiroz/scalatest
|
scalatest-test/src/test/scala/org/scalatest/EveryShouldContainSpec.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, NormalizingEquality, Every}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class EveryShouldContainSpec extends Spec {
object `a List` {
val xs: Every[String] = Every("hi", "hi", "hi")
val caseLists: Every[String] = Every("tell", "them", "Hi")
object `when used with contain (value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should contain ("hi")
val e1 = intercept[TestFailedException] {
xs should contain ("ho")
}
e1.message.get should be (Resources.didNotContainExpectedElement(decorateToStringValue(xs), "\"ho\""))
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should contain ("hi")
intercept[TestFailedException] {
xs should contain ("ho")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should contain ("ho")
intercept[TestFailedException] {
xs should contain ("hi")
}
}
def `should use an explicitly provided Equality` {
intercept[TestFailedException] {
caseLists should contain ("HI")
}
(caseLists should contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
(caseLists should contain ("HI")) (after being lowerCased)
(caseLists should contain ("HI ")) (after being lowerCased and trimmed)
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
(xs should contain ("hi")) (decided by defaultEquality[String])
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
intercept[TestFailedException] {
caseLists should contain ("HI")
}
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
caseLists should contain ("HI")
normalizedInvokedCount should be (4)
}
}
object `when used with not contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should not contain "ho"
val e3 = intercept[TestFailedException] {
xs should not contain "hi"
}
e3.message.get should be (Resources.containedExpectedElement(decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should not contain "ho"
intercept[TestFailedException] {
xs should not contain "hi"
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should not contain "hi"
intercept[TestFailedException] {
xs should not contain "ho"
}
}
def `should use an explicitly provided Equality` {
caseLists should not contain "HI"
caseLists should not contain "HI "
(caseLists should not contain "HI ") (decided by defaultEquality afterBeing lowerCased)
(caseLists should not contain "HI ") (after being lowerCased)
intercept[TestFailedException] {
(caseLists should not contain "HI") (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should not contain "HI ") (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should not contain "HI"
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should not contain "HI"
}
normalizedInvokedCount should be (4)
}
}
object `when used with not (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should not (contain ("ho"))
val e3 = intercept[TestFailedException] {
xs should not (contain ("hi"))
}
e3.message.get should be (Resources.containedExpectedElement(decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should not (contain ("ho"))
intercept[TestFailedException] {
xs should not (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should not (contain ("hi"))
intercept[TestFailedException] {
xs should not (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
caseLists should not (contain ("HI"))
caseLists should not (contain ("HI "))
(caseLists should not (contain ("HI "))) (decided by defaultEquality afterBeing lowerCased)
(caseLists should not (contain ("HI "))) (after being lowerCased)
intercept[TestFailedException] {
(caseLists should not (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should not (contain ("HI"))) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists should not (contain ("HI "))) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should not (contain ("HI"))
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should not (contain ("HI"))
}
normalizedInvokedCount should be (4)
}
}
object `when used with (not contain value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs should (not contain "ho")
val e3 = intercept[TestFailedException] {
xs should (not contain "hi")
}
e3.message.get should be (Resources.containedExpectedElement(decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs should (not contain "ho")
intercept[TestFailedException] {
xs should (not contain "hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs should (not contain "hi")
intercept[TestFailedException] {
xs should (not contain "ho")
}
}
def `should use an explicitly provided Equality` {
caseLists should (not contain "HI")
caseLists should (not contain "HI ")
(caseLists should (not contain "HI ")) (decided by defaultEquality afterBeing lowerCased)
(caseLists should (not contain "HI ")) (after being lowerCased)
intercept[TestFailedException] {
(caseLists should (not contain "HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists should (not contain "HI")) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists should (not contain "HI ")) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists should (not contain "HI")
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists should (not contain "HI")
}
normalizedInvokedCount should be (4)
}
}
object `when used with shouldNot contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs shouldNot contain ("ho")
val e3 = intercept[TestFailedException] {
xs shouldNot contain ("hi")
}
e3.message.get should be (Resources.containedExpectedElement(decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs shouldNot contain ("ho")
intercept[TestFailedException] {
xs shouldNot contain ("hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs shouldNot contain ("hi")
intercept[TestFailedException] {
xs shouldNot contain ("ho")
}
}
def `should use an explicitly provided Equality` {
caseLists shouldNot contain ("HI")
caseLists shouldNot contain ("HI ")
(caseLists shouldNot contain ("HI ")) (decided by defaultEquality afterBeing lowerCased)
(caseLists shouldNot contain ("HI ")) (after being lowerCased)
intercept[TestFailedException] {
(caseLists shouldNot contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot contain ("HI ")) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists shouldNot contain ("HI")
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists shouldNot contain ("HI")
}
normalizedInvokedCount should be (4)
}
}
object `when used with shouldNot (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
xs shouldNot (contain ("ho"))
val e3 = intercept[TestFailedException] {
xs shouldNot (contain ("hi"))
}
e3.message.get should be (Resources.containedExpectedElement(decorateToStringValue(xs), "\"hi\""))
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 4)
}
def `should use the implicit Equality in scope` {
xs shouldNot (contain ("ho"))
intercept[TestFailedException] {
xs shouldNot (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
xs shouldNot (contain ("hi"))
intercept[TestFailedException] {
xs shouldNot (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
caseLists shouldNot (contain ("HI"))
caseLists shouldNot (contain ("HI "))
(caseLists shouldNot (contain ("HI "))) (decided by defaultEquality afterBeing lowerCased)
(caseLists shouldNot (contain ("HI "))) (after being lowerCased)
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI"))) (after being lowerCased)
}
intercept[TestFailedException] {
(caseLists shouldNot (contain ("HI "))) (after being lowerCased and trimmed)
}
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
caseLists shouldNot (contain ("HI"))
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
intercept[TestFailedException] {
caseLists shouldNot (contain ("HI"))
}
normalizedInvokedCount should be (4)
}
}
}
object `a collection of Lists` {
val list123s: Every[Every[Int]] = Every(Every(1, 2, 3), Every(1, 2, 3), Every(1, 2, 3))
val lists: Every[Every[Int]] = Every(Every(1, 2, 3), Every(1, 2, 3), Every(4, 5, 6))
val hiLists: Every[Every[String]] = Every(Every("hi"), Every("hi"), Every("hi"))
object `when used with contain (value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should contain (1)
atLeast (2, lists) should contain (1)
atMost (2, lists) should contain (4)
no (lists) should contain (7)
val e1 = intercept[TestFailedException] {
all (lists) should contain (1)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) did not contain element 1 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
val e2 = intercept[TestFailedException] {
all (lists) should not contain (4)
}
e2.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 4 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
val e3 = intercept[TestFailedException] {
all (lists) should contain (1)
}
e3.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e3.failedCodeLineNumber.get should be (thisLineNumber - 3)
e3.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) did not contain element 1 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
intercept[TestFailedException] {
all (hiLists) should contain ("ho")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should contain ("ho")
intercept[TestFailedException] {
all (hiLists) should contain ("hi")
}
}
def `should use an explicitly provided Equality` {
intercept[TestFailedException] {
all (hiLists) should contain ("HI")
}
intercept[TestFailedException] {
all (hiLists) should contain ("HI ")
}
(all (hiLists) should contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
(all (hiLists) should contain ("HI ")) (after being trimmed and lowerCased)
}
@Ignore def `should minimize normalization if an implicit NormalizingEquality is in scope` {
val hiHeHoLists: List[List[String]] = List(List("hi", "he", "ho"), List("hi", "he", "ho"), List("hi", "he", "ho"))
intercept[TestFailedException] {
all (hiHeHoLists) should contain ("HO")
}
var normalizedInvokedCount = 0
implicit val e = new NormalizingEquality[String] {
def normalized(s: String): String = {
normalizedInvokedCount += 1
s.toLowerCase
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any): Any =
b match {
case s: String => normalized(s)
case _ => b
}
}
all (hiHeHoLists) should contain ("HO")
normalizedInvokedCount should be (12)
}
}
object `when used with not contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should not contain 4
atLeast (2, lists) should not contain 4
atMost (2, lists) should not contain 4
no (list123s) should not contain 1 // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should not contain 6
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should not contain "ho"
intercept[TestFailedException] {
all (hiLists) should not contain "hi"
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should not contain "hi"
intercept[TestFailedException] {
all (hiLists) should not contain "ho"
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should not contain "HI"
all (hiLists) should not contain "HI "
intercept[TestFailedException] {
(all (hiLists) should not contain "HI") (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should not contain "HI ") (after being trimmed and lowerCased)
}
}
}
object `when used with not (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should not (contain (4))
atLeast (2, lists) should not (contain (4))
atMost (2, lists) should not (contain (4))
no (list123s) should not (contain (1)) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should not (contain (6))
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should not (contain ("ho"))
intercept[TestFailedException] {
all (hiLists) should not (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should not (contain ("hi"))
intercept[TestFailedException] {
all (hiLists) should not (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should not (contain ("HI"))
all (hiLists) should not (contain ("HI "))
intercept[TestFailedException] {
(all (hiLists) should not (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should not (contain ("HI "))) (after being trimmed and lowerCased)
}
}
}
object `when used with (not contain value) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) should (not contain 4)
atLeast (2, lists) should (not contain 4)
atMost (2, lists) should (not contain 4)
no (list123s) should (not contain 1) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) should (not contain 6)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) should (not contain "ho")
intercept[TestFailedException] {
all (hiLists) should (not contain "hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) should (not contain "hi")
intercept[TestFailedException] {
all (hiLists) should (not contain "ho")
}
}
def `should use an explicitly provided Equality` {
all (hiLists) should (not contain "HI")
all (hiLists) should (not contain "HI ")
intercept[TestFailedException] {
(all (hiLists) should (not contain "HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) should (not contain "HI ")) (after being trimmed and lowerCased)
}
}
}
object `when used with shouldNot contain value syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) shouldNot contain (4)
atLeast (2, lists) shouldNot contain (4)
atMost (2, lists) shouldNot contain (4)
no (list123s) shouldNot contain (1) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) shouldNot contain (6)
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) shouldNot contain ("ho")
intercept[TestFailedException] {
all (hiLists) shouldNot contain ("hi")
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) shouldNot contain ("hi")
intercept[TestFailedException] {
all (hiLists) shouldNot contain ("ho")
}
}
def `should use an explicitly provided Equality` {
all (hiLists) shouldNot contain ("HI")
all (hiLists) shouldNot contain ("HI ")
intercept[TestFailedException] {
(all (hiLists) shouldNot contain ("HI")) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) shouldNot contain ("HI ")) (after being trimmed and lowerCased)
}
}
}
object `when used with shouldNot (contain (value)) syntax` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list123s) shouldNot (contain (4))
atLeast (2, lists) shouldNot (contain (4))
atMost (2, lists) shouldNot (contain (4))
no (list123s) shouldNot (contain (1)) // I will recommend against double negatives, but we should test it
val e1 = intercept[TestFailedException] {
all (lists) shouldNot (contain (6))
}
e1.failedCodeFileName.get should be ("EveryShouldContainSpec.scala")
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some("'all' inspection failed, because: \n" +
" at index 2, Many(4, 5, 6) contained element 6 (EveryShouldContainSpec.scala:" + (thisLineNumber - 5) + ") \n" +
"in Many(Many(1, 2, 3), Many(1, 2, 3), Many(4, 5, 6))"))
}
def `should use the implicit Equality in scope` {
all (hiLists) shouldNot (contain ("ho"))
intercept[TestFailedException] {
all (hiLists) shouldNot (contain ("hi"))
}
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
all (hiLists) shouldNot (contain ("hi"))
intercept[TestFailedException] {
all (hiLists) shouldNot (contain ("ho"))
}
}
def `should use an explicitly provided Equality` {
all (hiLists) shouldNot (contain ("HI"))
all (hiLists) shouldNot (contain ("HI "))
intercept[TestFailedException] {
(all (hiLists) shouldNot (contain ("HI"))) (decided by defaultEquality afterBeing lowerCased)
}
intercept[TestFailedException] {
(all (hiLists) shouldNot (contain ("HI "))) (after being trimmed and lowerCased)
}
}
}
}
}
|
cquiroz/scalatest
|
scalatest-test/src/test/scala/org/scalatest/AllShouldContainElementTypeCheckSpec.scala
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.exceptions.TestFailedException
import FailureMessages._
import Matchers._
import org.scalactic.CheckedEquality
class AllShouldContainElementTypeCheckSpec extends Spec with CheckedEquality {
// Checking for a specific size
object `The 'contain (<element>)' syntax` {
object `should give a type error if the types are not compatible` {
def `on Array` {
"""all (Array(Array(1, 2))) should contain ("2")""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""all (Array(Array(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.immutable.Set` {
"""atLeast (1, Set(Set(1, 2))) should contain ("2")""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""atLeast (1, Set(Set(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.mutable.Set` {
import scala.collection.mutable
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should contain ("2")""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""atMost (1, mutable.Set(mutable.Set(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.Set` {
val set: scala.collection.Set[Int] = Set(1, 2)
"""between (1, 3, Vector(set)) should contain ("2")""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (contain ("2"))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should not { contain ("3") }""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should not contain ("3")""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""between (1, 3, Vector(set)) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.immutable.HashSet` {
import scala.collection.immutable.HashSet
"""exactly (1, List(HashSet(1, 2))) should contain ("2")""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""exactly (1, List(HashSet(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.mutable.HashSet` {
import scala.collection.mutable
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should contain ("2")""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""every (mutable.HashSet(mutable.HashSet(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on List` {
"""all (List(List(1, 2))) should contain ("2")""" shouldNot typeCheck
"""all (List(List(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""all (List(List(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""all (List(List(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""all (List(List(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""all (List(List(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (List(List(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""all (List(List(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on Vector` {
"""every (Vector(Vector(1, 2))) should contain ("2")""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (contain ("2"))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should not { contain ("3") }""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should not contain ("3")""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""every (Vector(Vector(1, 2))) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on java.util.List` {
val javaList: java.util.List[Int] = new java.util.ArrayList
javaList.add(1)
javaList.add(2)
val javaListOfJavaList: java.util.List[java.util.List[Int]] = new java.util.ArrayList
javaListOfJavaList.add(javaList)
"""all (javaListOfJavaList) should contain ("2")""" shouldNot typeCheck
"""all (javaListOfJavaList) should (contain ("2"))""" shouldNot typeCheck
"""all (javaListOfJavaList) should not { contain ("3") }""" shouldNot typeCheck
"""all (javaListOfJavaList) should not contain ("3")""" shouldNot typeCheck
"""all (javaListOfJavaList) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""all (javaListOfJavaList) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""all (javaListOfJavaList) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""all (javaListOfJavaList) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on scala.collection.immutable.Map ` {
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should contain (2 -> 2)""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain (2 -> 2))""" shouldNot typeCheck
"""atLeast (1, Array(Map(1 -> "one", 2 -> "two"))) should contain ("two" -> "two")""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should contain ("two" -> "two")""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain ("two" -> "two"))""" shouldNot typeCheck
"""atLeast (1, Array(Map(1 -> "one", 2 -> "two"))) should contain (2 -> 2)""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should not { contain (3 -> 3) }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should not contain (3 -> 3)""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain (3 -> 3))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should not { contain ("three" -> "three") }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should not contain ("three" -> "three")""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain ("three" -> "three"))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain (1 -> 1)) }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain (1 -> 1)))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain (1 -> 1))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain ("one" -> "one")) }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain ("one" -> "one")))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain ("one" -> "one"))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain (1 -> 1)) }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain (1 -> 1)))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain (1 -> 1))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain ("one" -> "one")) }""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain ("one" -> "one")))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain ("one" -> "one"))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain (3 -> 3) }}""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain (3 -> 3)))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain (3 -> 3))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain ("three" -> "three")))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain ("three" -> "three"))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain (3 -> 3) }}""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain (3 -> 3)))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain (3 -> 3))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain ("three" -> "three")))""" shouldNot typeCheck
"""atLeast (1, Array(Map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain ("three" -> "three"))""" shouldNot typeCheck
}
def `on scala.collection.mutable.Map ` {
import scala.collection.mutable
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should contain (2 -> 2)""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain (2 -> 2))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map(1 -> "one", 2 -> "two"))) should contain ("two" -> "two")""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should contain ("two" -> "two")""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain ("two" -> "two"))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map(1 -> "one", 2 -> "two"))) should contain (2 -> 2)""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should not { contain (3 -> 3) }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should not contain (3 -> 3)""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain (3 -> 3))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should not { contain ("three" -> "three") }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should not contain ("three" -> "three")""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain ("three" -> "three"))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain (1 -> 1)) }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain (1 -> 1)))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain (1 -> 1))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain ("one" -> "one")) }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain ("one" -> "one")))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain ("one" -> "one"))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain (1 -> 1)) }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain (1 -> 1)))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain (1 -> 1))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain ("one" -> "one")) }""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain ("one" -> "one")))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain ("one" -> "one"))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain (3 -> 3) }}""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain (3 -> 3)))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain (3 -> 3))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain ("three" -> "three")))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain ("three" -> "three"))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain (3 -> 3) }}""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain (3 -> 3)))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain (3 -> 3))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain ("three" -> "three")))""" shouldNot typeCheck
"""atMost(1, List(mutable.Map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain ("three" -> "three"))""" shouldNot typeCheck
}
def `on scala.collection.Map ` {
val map: scala.collection.Map[String, Int] = Map("one" -> 1, "two" -> 2)
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should contain (2 -> 2)""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain (2 -> 2))""" shouldNot typeCheck
"""between (1, 2, List(map(1 -> "one", 2 -> "two"))) should contain ("two" -> "two")""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should contain ("two" -> "two")""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain ("two" -> "two"))""" shouldNot typeCheck
"""between (1, 2, List(map(1 -> "one", 2 -> "two"))) should contain (2 -> 2)""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should not { contain (3 -> 3) }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should not contain (3 -> 3)""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain (3 -> 3))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should not { contain ("three" -> "three") }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should not contain ("three" -> "three")""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain ("three" -> "three"))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain (1 -> 1)) }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain (1 -> 1)))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain (1 -> 1))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain ("one" -> "one")) }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain ("one" -> "one")))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain ("one" -> "one"))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain (1 -> 1)) }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain (1 -> 1)))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain (1 -> 1))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain ("one" -> "one")) }""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain ("one" -> "one")))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain ("one" -> "one"))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain (3 -> 3) }}""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain (3 -> 3)))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain (3 -> 3))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain ("three" -> "three")))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain ("three" -> "three"))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain (3 -> 3) }}""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain (3 -> 3)))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain (3 -> 3))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain ("three" -> "three")))""" shouldNot typeCheck
"""between (1, 2, List(map("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain ("three" -> "three"))""" shouldNot typeCheck
}
def `on scala.collection.immutable.HashMap ` {
import scala.collection.immutable.HashMap
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should contain (2 -> 2)""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain (2 -> 2))""" shouldNot typeCheck
"""no (Set(HashMap(1 -> "one", 2 -> "two"))) should contain ("two" -> "two")""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should contain ("two" -> "two")""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> "two"))""" shouldNot typeCheck
"""no (Set(HashMap(1 -> "one", 2 -> "two"))) should contain (2 -> 2)""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should not { contain (3 -> 3) }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should not contain (3 -> 3)""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain (3 -> 3))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should not { contain ("three" -> "three") }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should not contain ("three" -> "three")""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain ("three" -> "three"))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain (1 -> 1)) }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain (1 -> 1)))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain (1 -> 1))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain ("one" -> "one")) }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain ("one" -> "one")))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain ("one" -> "one"))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain (1 -> 1)) }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain (1 -> 1)))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain (1 -> 1))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain ("one" -> "one")) }""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain ("one" -> "one")))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain ("one" -> "one"))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain (3 -> 3) }}""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain (3 -> 3)))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain (3 -> 3))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain ("three" -> "three")))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain ("three" -> "three"))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain (3 -> 3) }}""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain (3 -> 3)))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain (3 -> 3))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain ("three" -> "three")))""" shouldNot typeCheck
"""no (Set(HashMap("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain ("three" -> "three"))""" shouldNot typeCheck
}
def `on scala.collection.mutable.HashMap ` {
import scala.collection.mutable
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should contain (2 -> 2)""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain (2 -> 2))""" shouldNot typeCheck
"""all (List(mutable.HashMap(1 -> "one", 2 -> "two"))) should contain ("two" -> "two")""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should contain ("two" -> "two")""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> "two"))""" shouldNot typeCheck
"""all (List(mutable.HashMap(1 -> "one", 2 -> "two"))) should contain (2 -> 2)""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should not { contain (3 -> 3) }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should not contain (3 -> 3)""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain (3 -> 3))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should not { contain ("three" -> "three") }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should not contain ("three" -> "three")""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain ("three" -> "three"))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain (1 -> 1)) }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain (1 -> 1)))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain (1 -> 1))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { contain ("two" -> 2) and (contain ("one" -> "one")) }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((contain ("two" -> 2)) and (contain ("one" -> "one")))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain ("two" -> 2) and contain ("one" -> "one"))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain (1 -> 1)) }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain (1 -> 1)))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain (1 -> 1))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { contain ("cat" -> 77) or (contain ("one" -> "one")) }""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((contain ("cat" -> 77)) or (contain ("one" -> "one")))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (contain ("cat" -> 77) or contain ("one" -> "one"))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain (3 -> 3) }}""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain (3 -> 3)))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain (3 -> 3))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { not { contain ("five" -> 5) } and not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((not contain ("five" -> 5)) and (not contain ("three" -> "three")))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain ("five" -> 5) and not contain ("three" -> "three"))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain (3 -> 3) }}""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain (3 -> 3)))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain (3 -> 3))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should { not { contain ("two" -> 2) } or not { contain ("three" -> "three") }}""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should ((not contain ("two" -> 2)) or (not contain ("three" -> "three")))""" shouldNot typeCheck
"""all (List(mutable.HashMap("one" -> 1, "two" -> 2))) should (not contain ("two" -> 2) or not contain ("three" -> "three"))""" shouldNot typeCheck
}
def `on java.util.Set` {
val javaSet: java.util.Set[Int] = new java.util.HashSet
javaSet.add(1)
javaSet.add(2)
val javaSetOfJavaSet: java.util.Set[java.util.Set[Int]] = new java.util.HashSet
javaSetOfJavaSet.add(javaSet)
"""every (javaSetOfJavaSet) should contain ("2")""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (contain ("2"))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should not { contain ("3") }""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should not contain ("3")""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { contain ("2") and (contain (1)) }""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((contain ("2")) and (contain (1)))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (contain ("2") and contain (1))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { contain (2) and (contain ("1")) }""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((contain (2)) and (contain ("1")))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (contain (2) and contain ("1"))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { contain ("77") or (contain (2)) }""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((contain ("77")) or (contain (2)))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (contain ("77") or contain (2))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { contain (77) or (contain ("2")) }""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((contain (77)) or (contain ("2")))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (contain (77) or contain ("2"))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { not { contain ("5") } and not { contain (3) }}""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((not contain ("5")) and (not contain (3)))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { not { contain (5) } and not { contain ("3") }}""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((not contain (5)) and (not contain ("3")))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should { not { contain (1) } or not { contain ("3") }}""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((not contain ("1")) or (not contain (3)))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (not contain ("3") or not contain (2))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (not contain ("5") and not contain (3))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should ((not contain (1)) or (not contain ("3")))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (not contain (3) or not contain ("2"))""" shouldNot typeCheck
"""every (javaSetOfJavaSet) should (not contain (5) and not contain ("3"))""" shouldNot typeCheck
}
def `on java.util.Map` {
val javaMap: java.util.Map[String, Int] = new java.util.HashMap
javaMap.put("one",1)
javaMap.put("two", 2)
"""all (Vector(javaMap)) should contain (Entry(2, 2))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry(2, 2)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should contain (Entry("two", "two"))""" shouldNot typeCheck
"""all (Vector(javaMap)) should contain (Entry("two", "two"))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry("two", "two")))""" shouldNot typeCheck
"""all (Vector(javaMap)) should contain (Entry(2, 2))""" shouldNot typeCheck
"""all (Vector(javaMap)) should not { contain (Entry(3, 3)) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should not contain (Entry(3, 3))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry(3, 3)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should not { contain (Entry("three", "three")) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should not contain (Entry("three", "three"))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry("three", "three")))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { contain (Entry(Entry("two", 2))) and (contain (Entry(1, 1))) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((contain (Entry(Entry("two", 2)))) and (contain (Entry(1, 1))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry(Entry("two", 2))) and contain (Entry(1, 1)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { contain (Entry(Entry("two", 2))) and (contain (Entry("one", "one"))) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((contain (Entry(Entry("two", 2)))) and (contain (Entry("one", "one"))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry(Entry("two", 2))) and contain (Entry("one", "one")))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { contain (Entry("cat", 77)) or (contain (Entry(1, 1))) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((contain (Entry("cat", 77))) or (contain (Entry(1, 1))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry("cat", 77)) or contain (Entry(1, 1)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { contain (Entry("cat", 77)) or (contain (Entry("one", "one"))) }""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((contain (Entry("cat", 77))) or (contain (Entry("one", "one"))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (contain (Entry("cat", 77)) or contain (Entry("one", "one")))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { not { contain (Entry("five", 5)) } and not { contain (Entry(3, 3)) }}""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((not contain (Entry("five", 5))) and (not contain (Entry(3, 3))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry("five", 5)) and not contain (Entry(3, 3)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { not { contain (Entry("five", 5)) } and not { contain (Entry("three", "three")) }}""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((not contain (Entry("five", 5))) and (not contain (Entry("three", "three"))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry("five", 5)) and not contain (Entry("three", "three")))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { not { contain (Entry(Entry("two", 2))) } or not { contain (Entry(3, 3)) }}""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((not contain (Entry(Entry("two", 2)))) or (not contain (Entry(3, 3))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry(Entry("two", 2))) or not contain (Entry(3, 3)))""" shouldNot typeCheck
"""all (Vector(javaMap)) should { not { contain (Entry(Entry("two", 2))) } or not { contain (Entry("three", "three")) }}""" shouldNot typeCheck
"""all (Vector(javaMap)) should ((not contain (Entry(Entry("two", 2)))) or (not contain (Entry("three", "three"))))""" shouldNot typeCheck
"""all (Vector(javaMap)) should (not contain (Entry(Entry("two", 2))) or not contain (Entry("three", "three")))""" shouldNot typeCheck
}
}
}
}
|
cquiroz/scalatest
|
scalatest/src/main/scala/org/scalatest/DynaTags.scala
|
<reponame>cquiroz/scalatest
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/**
* Dynamic tags for a run.
*
* <p>
* Instances of this class are passed to the <a href="Filter.html"><code>Filter</code></a> constructor to
* support running selected suites and tests via dynamic tagging. For example, dynamic tags can be used
* to rerun tests that failed previously, or tests selected via a wildcard from <a href="tools/Runner$.html"><code>Runner</code></a> or
* the Scala interpreter.
* </p>
*
* @param suiteTags a map from String suite ID to a set of tags for that suite.
* @param testTags a map from String suite ID to a map, whose keys are test names and values the tags for that test.
* @throws NullPointerException if either <code>suiteTags</code> or <code>testTags</code> is <code>null</code>
*
*/
final case class DynaTags(suiteTags: Map[String, Set[String]], testTags: Map[String, Map[String, Set[String]]]) extends Serializable {
if (suiteTags == null)
throw new NullPointerException("suiteTags was null")
if (testTags == null)
throw new NullPointerException("testTags was null")
}
|
corbantek/play-photostash
|
build.sbt
|
name := "play-photostash"
version := "1.0"
lazy val `play-photostash` = (project in file(".")).enablePlugins(PlayJava)
scalaVersion := "2.11.6"
//libraryDependencies ++= Seq(javaJdbc, javaEbean, cache, javaWs)
libraryDependencies ++= Seq(
"com.arangodb" % "arangodb-java-driver" % "2.5.4",
"org.imgscalr" % "imgscalr-lib" % "4.2",
"com.drewnoakes" % "metadata-extractor" % "2.8.1"
)
// Compile the project before generating Eclipse files, so that generated .scala or .class files for views and routes are present
EclipseKeys.preTasks := Seq(compile in Compile)
EclipseKeys.projectFlavor := EclipseProjectFlavor.Java // Java project. Don't expect Scala IDE
EclipseKeys.createSrc := EclipseCreateSrc.ValueSet(EclipseCreateSrc.ManagedClasses, EclipseCreateSrc.ManagedResources) // Use .class files instead of generated .scala files for views and routes
|
PerkinElmer/nifi-google-drive-bundle
|
nifi-google-drive-processor-processors/src/main/scala/com/example/nifi/processors/DownloadFilesProcessor.scala
|
<gh_stars>1-10
package com.example.nifi.processors
import org.apache.nifi.annotation.behavior.{ ReadsAttribute, ReadsAttributes, WritesAttribute, WritesAttributes }
import org.apache.nifi.annotation.documentation.{ CapabilityDescription, SeeAlso, Tags }
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.processor._
@Tags(Array("please work"))
@CapabilityDescription("A download processor")
@SeeAlso(Array())
@ReadsAttributes(Array(new ReadsAttribute(attribute = "", description = "")))
@WritesAttributes(Array(new WritesAttribute(attribute = "", description = "")))
class DownloadFilesProcessor
extends AbstractProcessor
with DownloadFilesProcessorProperties
with DownloadFilesProcessorRelationships {
import scala.collection.JavaConverters._
override def getSupportedPropertyDescriptors(): java.util.List[PropertyDescriptor] = {
properties.asJava
}
override def getRelationships(): java.util.Set[Relationship] = {
relationships.asJava
}
override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {
val flowFile = Option(session.get())
for (file <- flowFile) {
val fileId = file.getAttribute("ID")
val fileName = file.getAttribute("name")
val filePath = file.getAttribute("path")
val mime = file.getAttribute("mime")
DriveAuth.downloadDriveFile(fileId, fileName, filePath, mime)
session.transfer(file, RelSuccess)
}
}
protected[this] override def init(context: ProcessorInitializationContext): Unit = {
}
}
|
PerkinElmer/nifi-google-drive-bundle
|
nifi-google-drive-processor-processors/src/main/scala/com/example/nifi/processors/ExampleProcessor.scala
|
<reponame>PerkinElmer/nifi-google-drive-bundle
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.nifi.processors
import java.io._
import org.apache.nifi.annotation.behavior.{ ReadsAttribute, ReadsAttributes, WritesAttribute, WritesAttributes }
import org.apache.nifi.annotation.documentation.{ CapabilityDescription, SeeAlso, Tags }
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.processor._
import scala.util.Random
@Tags(Array("example"))
@CapabilityDescription("An example processor")
@SeeAlso(Array())
@ReadsAttributes(Array(
new ReadsAttribute(attribute = "", description = "")))
@WritesAttributes(Array(
new WritesAttribute(attribute = "", description = "")))
class ExampleProcessor extends AbstractProcessor with ExampleProcessorProperties
with ExampleProcessorRelationships {
import java.util.concurrent.atomic.AtomicReference
import scala.collection.JavaConverters._
private val data = new AtomicReference[Array[Byte]]
override def getSupportedPropertyDescriptors(): java.util.List[PropertyDescriptor] = {
properties.asJava
}
override def getRelationships(): java.util.Set[Relationship] = {
relationships.asJava
}
private def generateData(context: ProcessContext) = {
val byteCount = 100
val random = new Random
val array = new Array[Byte](byteCount)
random.nextBytes(array)
("{\"data\":{\"property\":\"" + array.toString() + "\"}}").getBytes()
}
override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {
import java.io.IOException
import org.apache.nifi.processor.io.OutputStreamCallback
data.set(generateData(context))
var flowFile = session.create
if (data.get().length > 0) flowFile = session.write(flowFile, new OutputStreamCallback() {
@throws[IOException]
def process(out: OutputStream): Unit = {
out.write(data.get())
}
})
session.putAttribute(flowFile, "mime.type", "application/json")
session.getProvenanceReporter.create(flowFile)
session.transfer(flowFile, RelSuccess)
}
protected[this] override def init(context: ProcessorInitializationContext): Unit = {
}
}
|
PerkinElmer/nifi-google-drive-bundle
|
nifi-google-drive-processor-processors/src/main/scala/com/example/nifi/processors/DriveAuth.scala
|
<reponame>PerkinElmer/nifi-google-drive-bundle
package com.example.nifi.processors
import com.google.api.client.auth.oauth2.Credential
import com.google.api.client.extensions.java6.auth.oauth2.AuthorizationCodeInstalledApp
import com.google.api.client.extensions.jetty.auth.oauth2.LocalServerReceiver
import com.google.api.client.googleapis.auth.oauth2.GoogleAuthorizationCodeFlow
import com.google.api.client.http.javanet.NetHttpTransport
import com.google.api.client.googleapis.auth.oauth2.GoogleClientSecrets
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport
import com.google.api.client.json.jackson2.JacksonFactory
import com.google.api.client.util.store.FileDataStoreFactory
import com.google.api.services.drive.Drive
import com.google.api.services.drive.model.File
import java.io._
import java.util
import scala.collection.JavaConverters._
object DriveAuth {
/** Application name. */
private val APPLICATION_NAME = "Drive API Java Quickstart"
/** Directory to store user credentials for this application. */
private val DATA_STORE_DIR = new java.io.File(System.getProperty("user.home"), ".credentials/drive-java-quickstart")
/** Global instance of the {@link FileDataStoreFactory}. */
private var DATA_STORE_FACTORY: FileDataStoreFactory = null
/** Global instance of the JSON factory. */
private val JSON_FACTORY = JacksonFactory.getDefaultInstance
/** Global instance of the HTTP transport. */
private var HTTP_TRANSPORT: NetHttpTransport = null
/**
* Global instance of the scopes required by this quickstart.
*
* If modifying these scopes, delete your previously saved credentials
* at ~/.credentials/drive-java-quickstart
*/
private val SCOPES = util.Arrays.asList("https://www.googleapis.com/auth/drive")
private var CLIENT_ID = "7695128173-dpt0u3galufa6jk22q51lsuv6rta65vp.apps.googleusercontent.com"
private var CLIENT_SECRET = "maXDAoyuytcz5tdI-3dF-kCf"
private var ACCESS_TYPE = "online"
private val Q = ("fileExtension = 'sas7bdat'")
try {
HTTP_TRANSPORT = GoogleNetHttpTransport.newTrustedTransport
DATA_STORE_FACTORY = new FileDataStoreFactory(DATA_STORE_DIR)
} catch {
case t: Throwable =>
t.printStackTrace()
System.exit(1)
}
/**
* Creates an authorized Credential object.
*
* @return an authorized Credential object.
* @throws IOException
*/
/**
* hardcoding the client id and secret instead of reading in the /client_secret.json seems to make authorization
* reprompt you for you login credentials every time the program is run
* @throws[IOException]
* def authorize: Credential = {
* // Build flow and trigger user authorization request.
* val flow = new GoogleAuthorizationCodeFlow.Builder(HTTP_TRANSPORT, JSON_FACTORY, CLIENT_ID, CLIENT_SECRET, SCOPES).
* setDataStoreFactory(DATA_STORE_FACTORY).setAccessType(ACCESS_TYPE).build
* val credential = new AuthorizationCodeInstalledApp(flow, new LocalServerReceiver()).authorize("user")
* System.out.println("Credentials saved to " + DATA_STORE_DIR.getAbsolutePath)
* return credential
* }
*/
@throws[IOException]
def authorize: Credential = {
val clientSecrets: GoogleClientSecrets = GoogleClientSecrets.load(JSON_FACTORY, new InputStreamReader(getClass.getResourceAsStream("/client_secret.json")))
// Build flow and trigger user authorization request.
val flow = new GoogleAuthorizationCodeFlow.Builder(HTTP_TRANSPORT, JSON_FACTORY, clientSecrets, SCOPES).
setDataStoreFactory(DATA_STORE_FACTORY).setAccessType(ACCESS_TYPE).build
val credential = new AuthorizationCodeInstalledApp(flow, new LocalServerReceiver()).authorize("user")
System.out.println("Credentials saved to " + DATA_STORE_DIR.getAbsolutePath)
return credential
}
/**
* Build and return an authorized Drive client service.
*
* @return an authorized Drive client service
* @throws IOException
*/
@throws[IOException]
def getDriveService: Drive = {
new Drive.Builder(HTTP_TRANSPORT, JSON_FACTORY, authorize).setApplicationName(APPLICATION_NAME).build
}
@throws[FileNotFoundException]
def downloadDriveFile(fileId: String, fileName: String, filePath: String, mimeType: String) = {
val driveService = getDriveService
val diskPath = "C:\\Users\\redderre\\"
import java.io.File
//if this is a file, check if the directory where it belongs is created, if not create it
//then download the file
if (!mimeType.equals("application/vnd.google-apps.folder")) {
val index = filePath.lastIndexOf("\\")
var newPath = ""
if (index != -1) {
newPath = diskPath + filePath.substring(0, index)
} else {
newPath = diskPath + filePath
}
val file = new File(newPath)
//if current directory for file dne, create
if (!file.exists()) {
file.mkdirs()
file.setExecutable(true)
}
val absFilePath = diskPath + filePath
val fileOutputStream = new FileOutputStream(absFilePath)
try {
driveService.files().get(fileId).executeMediaAndDownloadTo(fileOutputStream)
} catch {
case e: Exception => println("NonBinary File found")
}
} else {
var newPath = diskPath + filePath
val file = new File(newPath)
if (!file.exists()) {
file.mkdirs()
file.setExecutable(true)
}
}
}
//takes file/folder ID as input
//if it's just a file, return that path
//if it's a folder, return a list of its own path and its children's paths
//It currently refers to the root as 'My Drive', so for example if given a file 'file' in the root, the path will be
//My Drive/file
def listFiles(service: Drive, fileId: String): List[(File, String)] = {
var list: List[(File, String)] = List()
val file = service.files().get(fileId).setFields("parents, name, mimeType, id").execute()
if (file.getMimeType == "application/vnd.google-apps.folder") {
println("the folder you are looking into: " + file)
val files = service.files.list.setQ("'" + file.getId + "' in parents and trashed=false").setFields("files(name, id, mimeType, parents)").execute.getFiles
for ((file, index) <- files.asScala.zipWithIndex) {
list = (file, getParents(service, file, file.getName)) :: list
}
}
//list = (file, getParents(service, file, file.getName)) :: list
list
}
def getParents(service: Drive, file: File, filePath: String): String = {
var finalPath = filePath
//if you have no (parents, you're the root, so continue while this is not true
if (file.getParents != null) {
val parentId: String = file.getParents.get(0)
val parentFile = service.files.get(parentId).setFields("id, name, parents").execute()
finalPath = parentFile.getName + "\\" + filePath
getParents(service, parentFile, finalPath)
} else {
finalPath
}
}
@throws[IOException]
def main(args: Array[String]): Unit = { // Build a new authorized API client service.
val service = getDriveService
service.changes.getStartPageToken.execute
val x = listFiles(service, "root") //14JJ3vRHebTHAws7xfBNUI55M58NaMSU6 |||| 1wvI23jzfNCiLduOPT-bMDzIC9CZCAh6i : the former is a folder, the latter is a file
println("1: " + x(0)._1)
println("2: " + x(0)._2)
println(x)
// for (file <- x) {
// downloadDriveFile(file._1.getId, file._1.getName, file._2, file._1.getMimeType)
// }
//downloadDriveFile(x, service)
}
}
|
PerkinElmer/nifi-google-drive-bundle
|
nifi-google-drive-processor-processors/src/main/scala/com/example/nifi/processors/ListFilesProcessor.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.nifi.processors
import java.io._
import org.apache.nifi.annotation.behavior.{ ReadsAttribute, ReadsAttributes, WritesAttribute, WritesAttributes }
import org.apache.nifi.annotation.documentation.{ CapabilityDescription, SeeAlso, Tags }
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.processor._
@Tags(Array("please work"))
@CapabilityDescription("A list processor")
@SeeAlso(Array())
@ReadsAttributes(Array(
new ReadsAttribute(attribute = "", description = "")))
@WritesAttributes(Array(
new WritesAttribute(attribute = "", description = "")))
class ListFilesProcessor extends AbstractProcessor with ListFilesProcessorProperties
with ListFilesProcessorRelationships {
import java.util.concurrent.atomic.AtomicReference
import scala.collection.JavaConverters._
private val data = new AtomicReference[Array[Byte]]
override def getSupportedPropertyDescriptors(): java.util.List[PropertyDescriptor] = {
properties.asJava
}
override def getRelationships(): java.util.Set[Relationship] = {
relationships.asJava
}
// override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {
// import java.io.IOException
//
// import org.apache.nifi.processor.io.OutputStreamCallback
//
// data.set(generateData(context))
// var flowFile = session.create //id, name, path
// if (data.get().length > 0) flowFile = session.write(flowFile, new OutputStreamCallback() {
// @throws[IOException]
// def process(out: OutputStream): Unit = {
// out.write(data.get())
// }
// })
//
// session.putAttribute(flowFile, "id", "value")
// session.putAttribute(flowFile, "test", "work")
// session.getProvenanceReporter.create(flowFile)
// session.transfer(flowFile, RelSuccess)
// }
override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {
import java.io.IOException
import org.apache.nifi.processor.io.OutputStreamCallback
val files = getFileList(context)
for (file <- files) {
var flowFile = session.create
session.putAttribute(flowFile, "ID", file._1.getId)
session.putAttribute(flowFile, "name", file._1.getName)
session.putAttribute(flowFile, "mime", file._1.getMimeType)
session.putAttribute(flowFile, "path", file._2)
session.getProvenanceReporter.create(flowFile)
session.transfer(flowFile, RelSuccess)
}
}
def getFileList(context: ProcessContext) = {
val id = context.getProperty("ID").evaluateAttributeExpressions().getValue
val service = DriveAuth.getDriveService
DriveAuth.listFiles(service, id)
}
protected[this] override def init(context: ProcessorInitializationContext): Unit = {
}
}
|
kabutopia/shadow-stack
|
src/test/scala/ShadowStackUnitTest.scala
|
// See README.md for license details.
package shadowstack
import java.io.File
import chisel3.iotesters
import chisel3.iotesters.{ChiselFlatSpec, Driver, PeekPokeTester}
class ShadowStackUnitTester(c: ShadowStack) extends PeekPokeTester(c) {
val retAddList = List(Int)
//val dest = rnd.nextInt(0xffffffff)
val dest1 = 0xabcd1234
val dest2 = 0x1234bcda
for (i <- 1 to 20) {
//val toDo = rnd.nextInt(20)
val toDo = i
toDo match {
// test jal
case 1 => {
poke(c.io.curentAddr, dest1)
poke(c.io.jal, 1)
poke(c.io.ret, 0)
step(1)
expect(c.io.outputValid, true)
}
// test jal
case 2 => {
poke(c.io.curentAddr, dest2)
poke(c.io.jal, 1)
poke(c.io.ret, 0)
step(1)
expect(c.io.outputValid, true)
}
// test ret ok
case 4 => {
poke(c.io.targetAddr, dest2)
poke(c.io.jal, 0)
poke(c.io.ret, 1)
step(1)
expect(c.io.outputValid, true)
}
// test ret ok
case 7 => {
poke(c.io.targetAddr, dest1)
poke(c.io.jal, 0)
poke(c.io.ret, 1)
step(1)
expect(c.io.outputValid, true)
}
// test jal
case 10 => {
poke(c.io.curentAddr, dest2)
poke(c.io.jal, 1)
poke(c.io.ret, 0)
step(1)
expect(c.io.outputValid, true)
}
// test ret ko
case 12 => {
poke(c.io.targetAddr, dest2+4)
poke(c.io.jal, 0)
poke(c.io.ret, 1)
step(1)
expect(c.io.outputValid, false)
}
case _ => {
poke(c.io.jal, 0)
poke(c.io.ret, 0)
step(1)
}
}
}
}
/**
* This is a trivial example of how to run this Specification
* From within sbt use:
* {{{
* testOnly gcd.GCDTester
* }}}
* From a terminal shell use:
* {{{
* sbt 'testOnly gcd.GCDTester'
* }}}
*/
class ShadowStackTester extends ChiselFlatSpec {
// Disable this until we fix isCommandAvailable to swallow stderr along with stdout
private val backendNames = if(false && firrtl.FileUtils.isCommandAvailable(Seq("verilator", "--version"))) {
Array("firrtl", "verilator")
}
else {
Array("firrtl")
}
for ( backendName <- backendNames ) {
"ShadowStack" should s"calculate proper greatest common denominator (with $backendName)" in {
Driver(() => new ShadowStack, backendName) {
c => new ShadowStackUnitTester(c)
} should be (true)
}
}
"Basic test using Driver.execute" should "be used as an alternative way to run specification" in {
iotesters.Driver.execute(Array(), () => new ShadowStack) {
c => new ShadowStackUnitTester(c)
} should be (true)
}
"using --backend-name verilator" should "be an alternative way to run using verilator" in {
if(backendNames.contains("verilator")) {
iotesters.Driver.execute(Array("--backend-name", "verilator"), () => new ShadowStack) {
c => new ShadowStackUnitTester(c)
} should be(true)
}
}
"running with --is-verbose" should "show more about what's going on in your tester" in {
iotesters.Driver.execute(Array("--is-verbose"), () => new ShadowStack) {
c => new ShadowStackUnitTester(c)
} should be(true)
}
/**
* By default verilator backend produces vcd file, and firrtl and treadle backends do not.
* Following examples show you how to turn on vcd for firrtl and treadle and how to turn it off for verilator
*/
"running with --generate-vcd-output on" should "create a vcd file from your test" in {
iotesters.Driver.execute(
Array("--generate-vcd-output", "on", "--target-dir", "test_run_dir/make_a_vcd", "--top-name", "make_a_vcd"),
() => new ShadowStack
) {
c => new ShadowStackUnitTester(c)
} should be(true)
new File("test_run_dir/make_a_vcd/make_a_vcd.vcd").exists should be (true)
}
"running with --generate-vcd-output off" should "not create a vcd file from your test" in {
iotesters.Driver.execute(
Array("--generate-vcd-output", "off", "--target-dir", "test_run_dir/make_no_vcd", "--top-name", "make_no_vcd",
"--backend-name", "verilator"),
() => new ShadowStack
) {
c => new ShadowStackUnitTester(c)
} should be(true)
new File("test_run_dir/make_no_vcd/make_a_vcd.vcd").exists should be (false)
}
}
|
kabutopia/shadow-stack
|
src/main/scala/ShadowStack.scala
|
// See README.md for license details.
package shadowstack
import chisel3._
/**
*
*
*
*/
class ShadowStack(depth: Int = 32) extends Module {
val io = IO(new Bundle {
val curentAddr = Input(UInt(32.W))
val jal = Input(Bool())
val ret = Input(Bool())
val targetAddr = Input(UInt(32.W))
val outputValid = Output(Bool())
})
val savedAddr = Mem(depth, UInt(32.W))
val idx = RegInit(0.U(32.W))
val out = RegInit(Bool(true))
io.outputValid := out
when(io.jal) {
// stack one
when(idx === (depth-1).U) {
// TODO internal vec overflowed,
} .otherwise {
// save the current address
savedAddr(idx) := io.curentAddr
idx := idx+1.U
}
}
when(io.ret) {
// check if we can destack
when(idx === 0.U) {
out := false.B
} .otherwise {
// Check if we return to the good location
when(io.targetAddr === savedAddr(idx-1.U)) {
out := true.B
} .otherwise {
out := false.B
}
idx := idx-1.U
}
} .otherwise {
out := true.B
}
}
object ShadowStackMain extends App {
chisel3.Driver.execute(args, () => new ShadowStack)
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/broadcast/BroadcastFactory.scala
|
package org.apache.spark.broadcast
import scala.reflect.ClassTag
import org.apache.spark.SecurityManager
import org.apache.spark.SparkConf
/**
* An interface for all the broadcast implementations in Spark (to allow
* multiple broadcast implementations). SparkContext uses a BroadcastFactory
* implementation to instantiate a particular broadcast for the entire Spark job.
*/
private[spark] trait BroadcastFactory {
def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager): Unit
/**
* Creates a new broadcast variable.
*
* @param value value to broadcast
* @param isLocal whether we are in local mode (single JVM process)
* @param id unique id representing this broadcast variable
*/
def newBroadcast[T: ClassTag](value: T, isLocal: Boolean, id: Long): Broadcast[T]
def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean): Unit
def stop(): Unit
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/history/ApplicationHistoryProvider.scala
|
package org.apache.spark.deploy.history
import java.util.concurrent.locks.ReentrantReadWriteLock
import java.util.zip.ZipOutputStream
import scala.xml.Node
import org.apache.spark.SparkException
import org.apache.spark.status.api.v1.ApplicationInfo
import org.apache.spark.ui.SparkUI
/**
* A loaded UI for a Spark application.
*
* Loaded UIs are valid once created, and can be invalidated once the history provider detects
* changes in the underlying app data (e.g. an updated event log). Invalidating a UI does not
* unload it; it just signals the [[ApplicationCache]] that the UI should not be used to serve
* new requests.
*
* Reloading of the UI with new data requires collaboration between the cache and the provider;
* the provider invalidates the UI when it detects updated information, and the cache invalidates
* the cache entry when it detects the UI has been invalidated. That will trigger a callback
* on the provider to finally clean up any UI state. The cache should hold read locks when
* using the UI, and the provider should grab the UI's write lock before making destructive
* operations.
*
* Note that all this means that an invalidated UI will still stay in-memory, and any resources it
* references will remain open, until the cache either sees that it's invalidated, or evicts it to
* make room for another UI.
*
* @param ui Spark UI
*/
private[history] case class LoadedAppUI(ui: SparkUI) {
val lock = new ReentrantReadWriteLock()
@volatile private var _valid = true
def valid: Boolean = _valid
def invalidate(): Unit = {
lock.writeLock().lock()
try {
_valid = false
} finally {
lock.writeLock().unlock()
}
}
}
private[history] abstract class ApplicationHistoryProvider {
/**
* Returns the count of application event logs that the provider is currently still processing.
* History Server UI can use this to indicate to a user that the application listing on the UI
* can be expected to list additional known applications once the processing of these
* application event logs completes.
*
* A History Provider that does not have a notion of count of event logs that may be pending
* for processing need not override this method.
*
* @return Count of application event logs that are currently under process
*/
def getEventLogsUnderProcess(): Int = {
0
}
/**
* Returns the time the history provider last updated the application history information
*
* @return 0 if this is undefined or unsupported, otherwise the last updated time in millis
*/
def getLastUpdatedTime(): Long = {
0
}
/**
* Returns a list of applications available for the history server to show.
*
* @return List of all know applications.
*/
def getListing(): Iterator[ApplicationInfo]
/**
* Returns the Spark UI for a specific application.
*
* @param appId The application ID.
* @param attemptId The application attempt ID (or None if there is no attempt ID).
* @return a [[LoadedAppUI]] instance containing the application's UI and any state information
* for update probes, or `None` if the application/attempt is not found.
*/
def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI]
/**
* Called when the server is shutting down.
*/
def stop(): Unit = { }
/**
* Returns configuration data to be shown in the History Server home page.
*
* @return A map with the configuration data. Data is show in the order returned by the map.
*/
def getConfig(): Map[String, String] = Map()
/**
* Writes out the event logs to the output stream provided. The logs will be compressed into a
* single zip file and written out.
* @throws SparkException if the logs for the app id cannot be found.
*/
@throws(classOf[SparkException])
def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit
/**
* @return the [[ApplicationInfo]] for the appId if it exists.
*/
def getApplicationInfo(appId: String): Option[ApplicationInfo]
/**
* @return html text to display when the application list is empty
*/
def getEmptyListingHtml(): Seq[Node] = Seq.empty
/**
* Called when an application UI is unloaded from the history server.
*/
def onUIDetached(appId: String, attemptId: Option[String], ui: SparkUI): Unit = { }
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/LeaderElectionAgent.scala
|
<gh_stars>0
package org.apache.spark.deploy.master
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
*
* A LeaderElectionAgent tracks current master and is a common interface for all election Agents.
*/
@DeveloperApi
trait LeaderElectionAgent {
val masterInstance: LeaderElectable
def stop() {} // to avoid noops in implementations.
}
@DeveloperApi
trait LeaderElectable {
def electedLeader(): Unit
def revokedLeadership(): Unit
}
/** Single-node implementation of LeaderElectionAgent -- we're initially and always the leader. */
private[spark] class MonarchyLeaderAgent(val masterInstance: LeaderElectable)
extends LeaderElectionAgent {
masterInstance.electedLeader()
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/StatusAPIImpl.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark
private class SparkJobInfoImpl (
val jobId: Int,
val stageIds: Array[Int],
val status: JobExecutionStatus)
extends SparkJobInfo
private class SparkStageInfoImpl(
val stageId: Int,
val currentAttemptId: Int,
val submissionTime: Long,
val name: String,
val numTasks: Int,
val numActiveTasks: Int,
val numCompletedTasks: Int,
val numFailedTasks: Int)
extends SparkStageInfo
private class SparkExecutorInfoImpl(
val host: String,
val port: Int,
val cacheSize: Long,
val numRunningTasks: Int,
val usedOnHeapStorageMemory: Long,
val usedOffHeapStorageMemory: Long,
val totalOnHeapStorageMemory: Long,
val totalOffHeapStorageMemory: Long)
extends SparkExecutorInfo
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/ExecutorInfo.scala
|
package org.apache.spark.scheduler.cluster
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Stores information about an executor to pass from the scheduler to SparkListeners.
*/
@DeveloperApi
class ExecutorInfo(
val executorHost: String,
val totalCores: Int,
val logUrlMap: Map[String, String]) {
def canEqual(other: Any): Boolean = other.isInstanceOf[ExecutorInfo]
override def equals(other: Any): Boolean = other match {
case that: ExecutorInfo =>
(that canEqual this) &&
executorHost == that.executorHost &&
totalCores == that.totalCores &&
logUrlMap == that.logUrlMap
case _ => false
}
override def hashCode(): Int = {
val state = Seq(executorHost, totalCores, logUrlMap)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
|
CrazyTechnology/spark
|
examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
|
package org.apache.spark.examples.sql
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
object SparkSQLExample {
case class Person(name: String, age: Long)
def main(args: Array[String]) {
val spark = SparkSession
.builder().master("local[*]")
.appName("Spark SQL basic example")
.config("spark.some.config.option", "some-value")
.getOrCreate()
runBasicDataFrameExample(spark)
runDatasetCreationExample(spark)
runInferSchemaExample(spark)
runProgrammaticSchemaExample(spark)
spark.stop()
}
private def runBasicDataFrameExample(spark: SparkSession): Unit = {
val df = spark.read.json("examples/src/main/resources/people.json").filter("age=16")
df.printSchema()
// Displays the content of the DataFrame to stdout
df.show()
// +----+-------+
// | age| name|
// +----+-------+
// |null|Michael|
// | 30| Andy|
// | 19| Justin|
// +----+-------+
// $example off:create_df$
// $example on:untyped_ops$
// This import is needed to use the $-notation
import spark.implicits._
// Print the schema in a tree format
df.printSchema()
// root
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
// Select only the "name" column
df.select("name").show()
// +-------+
// | name|
// +-------+
// |Michael|
// | Andy|
// | Justin|
// +-------+
// Select everybody, but increment the age by 1
df.select($"name", $"age" + 1).show()
// +-------+---------+
// | name|(age + 1)|
// +-------+---------+
// |Michael| null|
// | Andy| 31|
// | Justin| 20|
// +-------+---------+
// Select people older than 21
df.filter($"age" > 21).show()
// +---+----+
// |age|name|
// +---+----+
// | 30|Andy|
// +---+----+
// Count people by age
df.groupBy("age").count().show()
// +----+-----+
// | age|count|
// +----+-----+
// | 19| 1|
// |null| 1|
// | 30| 1|
// +----+-----+
// $example off:untyped_ops$
// $example on:run_sql$
// Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
val sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
// +----+-------+
// | age| name|
// +----+-------+
// |null|Michael|
// | 30| Andy|
// | 19| Justin|
// +----+-------+
// $example off:run_sql$
// $example on:global_temp_view$
// Register the DataFrame as a global temporary view
df.createGlobalTempView("people")
// Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()
// +----+-------+
// | age| name|
// +----+-------+
// |null|Michael|
// | 30| Andy|
// | 19| Justin|
// +----+-------+
// Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
// +----+-------+
// | age| name|
// +----+-------+
// |null|Michael|
// | 30| Andy|
// | 19| Justin|
// +----+-------+
// $example off:global_temp_view$
}
private def runDatasetCreationExample(spark: SparkSession): Unit = {
import spark.implicits._
// $example on:create_ds$
// Encoders are created for case classes
val caseClassDS = Seq(Person("Andy", 32)).toDS()
caseClassDS.show()
// +----+---+
// |name|age|
// +----+---+
// |Andy| 32|
// +----+---+
// Encoders for most common types are automatically provided by importing spark.implicits._
val primitiveDS = Seq(1, 2, 3).toDS()
primitiveDS.map(_ + 1).collect() // Returns: Array(2, 3, 4)
// DataFrames can be converted to a Dataset by providing a class. Mapping will be done by name
val path = "examples/src/main/resources/people.json"
val peopleDS = spark.read.json(path).as[Person]
peopleDS.show()
// +----+-------+
// | age| name|
// +----+-------+
// |null|Michael|
// | 30| Andy|
// | 19| Justin|
// +----+-------+
// $example off:create_ds$
}
private def runInferSchemaExample(spark: SparkSession): Unit = {
// $example on:schema_inferring$
// For implicit conversions from RDDs to DataFrames
import spark.implicits._
// Create an RDD of Person objects from a text file, convert it to a Dataframe
val peopleDF = spark.sparkContext
.textFile("examples/src/main/resources/people.txt")
.map(_.split(","))
.map(attributes => Person(attributes(0), attributes(1).trim.toInt))
.toDF()
// Register the DataFrame as a temporary view
peopleDF.createOrReplaceTempView("people")
// SQL statements can be run by using the sql methods provided by Spark
val teenagersDF = spark.sql("SELECT name, age FROM people WHERE age BETWEEN 13 AND 19")
// The columns of a row in the result can be accessed by field index
teenagersDF.map(teenager => "Name: " + teenager(0)).show()
// +------------+
// | value|
// +------------+
// |Name: Justin|
// +------------+
// or by field name
teenagersDF.map(teenager => "Name: " + teenager.getAs[String]("name")).show()
// +------------+
// | value|
// +------------+
// |Name: Justin|
// +------------+
// No pre-defined encoders for Dataset[Map[K,V]], define explicitly
implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]]
// Primitive types and case classes can be also defined as
// implicit val stringIntMapEncoder: Encoder[Map[String, Any]] = ExpressionEncoder()
// row.getValuesMap[T] retrieves multiple columns at once into a Map[String, T]
teenagersDF.map(teenager => teenager.getValuesMap[Any](List("name", "age"))).collect()
// Array(Map("name" -> "Justin", "age" -> 19))
// $example off:schema_inferring$
}
private def runProgrammaticSchemaExample(spark: SparkSession): Unit = {
import spark.implicits._
// $example on:programmatic_schema$
// Create an RDD
val peopleRDD = spark.sparkContext.textFile("examples/src/main/resources/people.txt")
// The schema is encoded in a string
val schemaString = "name age"
// Generate the schema based on the string of schema
val fields = schemaString.split(" ")
.map(fieldName => StructField(fieldName, StringType, nullable = true))
val schema = StructType(fields)
// Convert records of the RDD (people) to Rows
val rowRDD = peopleRDD
.map(_.split(","))
.map(attributes => Row(attributes(0), attributes(1).trim))
// Apply the schema to the RDD
val peopleDF = spark.createDataFrame(rowRDD, schema)
// Creates a temporary view using the DataFrame
peopleDF.createOrReplaceTempView("people")
// SQL can be run over a temporary view created using DataFrames
val results = spark.sql("SELECT name FROM people")
// The results of SQL queries are DataFrames and support all the normal RDD operations
// The columns of a row in the result can be accessed by field index or by field name
results.map(attributes => "Name: " + attributes(0)).show()
// +-------------+
// | value|
// +-------------+
// |Name: Michael|
// | Name: Andy|
// | Name: Justin|
// +-------------+
// $example off:programmatic_schema$
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/memory/MemoryManager.scala
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory
import javax.annotation.concurrent.GuardedBy
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.storage.BlockId
import org.apache.spark.storage.memory.MemoryStore
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.memory.MemoryAllocator
/**
* An abstract memory manager that enforces how memory is shared between execution and storage.
* 一个抽象内存管理器,强制执行如何在执行和存储之间共享内存。
* In this context, execution memory refers to that used for computation in shuffles, joins,
* sorts and aggregations, while storage memory refers to that used for caching and propagating
* internal data across the cluster. There exists one MemoryManager per JVM.
* 在这种情况下,执行内存是指用于在混洗、连接、排序和聚合中进行计算的内存,而存储内存是指用于跨集群缓存和传播内部数据的内存。
* 每个 JVM 存在一个 MemoryManager。
*/
private[spark] abstract class MemoryManager(
conf: SparkConf,
numCores: Int,
onHeapStorageMemory: Long,
onHeapExecutionMemory: Long) extends Logging {
// -- Methods related to memory allocation policies and bookkeeping ------------------------------
@GuardedBy("this")
protected val onHeapStorageMemoryPool = new StorageMemoryPool(this, MemoryMode.ON_HEAP)
@GuardedBy("this")
protected val offHeapStorageMemoryPool = new StorageMemoryPool(this, MemoryMode.OFF_HEAP)
@GuardedBy("this")
protected val onHeapExecutionMemoryPool = new ExecutionMemoryPool(this, MemoryMode.ON_HEAP)
@GuardedBy("this")
protected val offHeapExecutionMemoryPool = new ExecutionMemoryPool(this, MemoryMode.OFF_HEAP)
onHeapStorageMemoryPool.incrementPoolSize(onHeapStorageMemory)
onHeapExecutionMemoryPool.incrementPoolSize(onHeapExecutionMemory)
protected[this] val maxOffHeapMemory = conf.get(MEMORY_OFFHEAP_SIZE)
protected[this] val offHeapStorageMemory =
(maxOffHeapMemory * conf.getDouble("spark.memory.storageFraction", 0.5)).toLong
offHeapExecutionMemoryPool.incrementPoolSize(maxOffHeapMemory - offHeapStorageMemory)
offHeapStorageMemoryPool.incrementPoolSize(offHeapStorageMemory)
/**
* Total available on heap memory for storage, in bytes. This amount can vary over time,
* depending on the MemoryManager implementation.
* In this model, this is equivalent to the amount of memory not occupied by execution.
*/
def maxOnHeapStorageMemory: Long
/**
* Total available off heap memory for storage, in bytes. This amount can vary over time,
* depending on the MemoryManager implementation.
*/
def maxOffHeapStorageMemory: Long
/**
* Set the [[MemoryStore]] used by this manager to evict cached blocks.
* This must be set after construction due to initialization ordering constraints.
*/
final def setMemoryStore(store: MemoryStore): Unit = synchronized {
onHeapStorageMemoryPool.setMemoryStore(store)
offHeapStorageMemoryPool.setMemoryStore(store)
}
/**
* Acquire N bytes of memory to cache the given block, evicting existing ones if necessary.
*
* @return whether all N bytes were successfully granted.
*/
def acquireStorageMemory(blockId: BlockId, numBytes: Long, memoryMode: MemoryMode): Boolean
/**
* Acquire N bytes of memory to unroll the given block, evicting existing ones if necessary.
*
* This extra method allows subclasses to differentiate behavior between acquiring storage
* memory and acquiring unroll memory. For instance, the memory management model in Spark
* 1.5 and before places a limit on the amount of space that can be freed from unrolling.
*
* @return whether all N bytes were successfully granted.
*/
def acquireUnrollMemory(blockId: BlockId, numBytes: Long, memoryMode: MemoryMode): Boolean
/**
* Try to acquire up to `numBytes` of execution memory for the current task and return the
* number of bytes obtained, or 0 if none can be allocated.
*
* This call may block until there is enough free memory in some situations, to make sure each
* task has a chance to ramp up to at least 1 / 2N of the total memory pool (where N is the # of
* active tasks) before it is forced to spill. This can happen if the number of tasks increase
* but an older task had a lot of memory already.
*/
private[memory]
def acquireExecutionMemory(
numBytes: Long,
taskAttemptId: Long,
memoryMode: MemoryMode): Long
/**
* Release numBytes of execution memory belonging to the given task.
*/
private[memory]
def releaseExecutionMemory(
numBytes: Long,
taskAttemptId: Long,
memoryMode: MemoryMode): Unit = synchronized {
memoryMode match {
case MemoryMode.ON_HEAP => onHeapExecutionMemoryPool.releaseMemory(numBytes, taskAttemptId)
case MemoryMode.OFF_HEAP => offHeapExecutionMemoryPool.releaseMemory(numBytes, taskAttemptId)
}
}
/**
* Release all memory for the given task and mark it as inactive (e.g. when a task ends).
*
* @return the number of bytes freed.
*/
private[memory] def releaseAllExecutionMemoryForTask(taskAttemptId: Long): Long = synchronized {
onHeapExecutionMemoryPool.releaseAllMemoryForTask(taskAttemptId) +
offHeapExecutionMemoryPool.releaseAllMemoryForTask(taskAttemptId)
}
/**
* Release N bytes of storage memory.
*/
def releaseStorageMemory(numBytes: Long, memoryMode: MemoryMode): Unit = synchronized {
memoryMode match {
case MemoryMode.ON_HEAP => onHeapStorageMemoryPool.releaseMemory(numBytes)
case MemoryMode.OFF_HEAP => offHeapStorageMemoryPool.releaseMemory(numBytes)
}
}
/**
* Release all storage memory acquired.
*/
final def releaseAllStorageMemory(): Unit = synchronized {
onHeapStorageMemoryPool.releaseAllMemory()
offHeapStorageMemoryPool.releaseAllMemory()
}
/**
* Release N bytes of unroll memory.
*/
final def releaseUnrollMemory(numBytes: Long, memoryMode: MemoryMode): Unit = synchronized {
releaseStorageMemory(numBytes, memoryMode)
}
/**
* Execution memory currently in use, in bytes.
*/
final def executionMemoryUsed: Long = synchronized {
onHeapExecutionMemoryPool.memoryUsed + offHeapExecutionMemoryPool.memoryUsed
}
/**
* Storage memory currently in use, in bytes.
*/
final def storageMemoryUsed: Long = synchronized {
onHeapStorageMemoryPool.memoryUsed + offHeapStorageMemoryPool.memoryUsed
}
/**
* On heap execution memory currently in use, in bytes.
*/
final def onHeapExecutionMemoryUsed: Long = synchronized {
onHeapExecutionMemoryPool.memoryUsed
}
/**
* Off heap execution memory currently in use, in bytes.
*/
final def offHeapExecutionMemoryUsed: Long = synchronized {
offHeapExecutionMemoryPool.memoryUsed
}
/**
* On heap storage memory currently in use, in bytes.
*/
final def onHeapStorageMemoryUsed: Long = synchronized {
onHeapStorageMemoryPool.memoryUsed
}
/**
* Off heap storage memory currently in use, in bytes.
*/
final def offHeapStorageMemoryUsed: Long = synchronized {
offHeapStorageMemoryPool.memoryUsed
}
/**
* Returns the execution memory consumption, in bytes, for the given task.
*/
private[memory] def getExecutionMemoryUsageForTask(taskAttemptId: Long): Long = synchronized {
onHeapExecutionMemoryPool.getMemoryUsageForTask(taskAttemptId) +
offHeapExecutionMemoryPool.getMemoryUsageForTask(taskAttemptId)
}
// -- Fields related to Tungsten managed memory -------------------------------------------------
/**
* Tracks whether Tungsten memory will be allocated on the JVM heap or off-heap using
* sun.misc.Unsafe.
*/
final val tungstenMemoryMode: MemoryMode = {
if (conf.get(MEMORY_OFFHEAP_ENABLED)) {
require(conf.get(MEMORY_OFFHEAP_SIZE) > 0,
"spark.memory.offHeap.size must be > 0 when spark.memory.offHeap.enabled == true")
require(Platform.unaligned(),
"No support for unaligned Unsafe. Set spark.memory.offHeap.enabled to false.")
MemoryMode.OFF_HEAP
} else {
MemoryMode.ON_HEAP
}
}
/**
* The default page size, in bytes.
*
* If user didn't explicitly set "spark.buffer.pageSize", we figure out the default value
* by looking at the number of cores available to the process, and the total amount of memory,
* and then divide it by a factor of safety.
*/
val pageSizeBytes: Long = {
val minPageSize = 1L * 1024 * 1024 // 1MB
val maxPageSize = 64L * minPageSize // 64MB
val cores = if (numCores > 0) numCores else Runtime.getRuntime.availableProcessors()
// Because of rounding to next power of 2, we may have safetyFactor as 8 in worst case
val safetyFactor = 16
val maxTungstenMemory: Long = tungstenMemoryMode match {
case MemoryMode.ON_HEAP => onHeapExecutionMemoryPool.poolSize
case MemoryMode.OFF_HEAP => offHeapExecutionMemoryPool.poolSize
}
val size = ByteArrayMethods.nextPowerOf2(maxTungstenMemory / cores / safetyFactor)
val default = math.min(maxPageSize, math.max(minPageSize, size))
conf.getSizeAsBytes("spark.buffer.pageSize", default)
}
/**
* Allocates memory for use by Unsafe/Tungsten code.
*/
private[memory] final val tungstenMemoryAllocator: MemoryAllocator = {
tungstenMemoryMode match {
case MemoryMode.ON_HEAP => MemoryAllocator.HEAP
case MemoryMode.OFF_HEAP => MemoryAllocator.UNSAFE
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/SerializableWritable.scala
|
<gh_stars>0
package org.apache.spark
import java.io._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.ObjectWritable
import org.apache.hadoop.io.Writable
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils
@DeveloperApi
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
def value: T = t
override def toString: String = t.toString
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
out.defaultWriteObject()
new ObjectWritable(t).write(out)
}
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
in.defaultReadObject()
val ow = new ObjectWritable()
ow.setConf(new Configuration(false))
ow.readFields(in)
t = ow.get().asInstanceOf[T]
}
}
|
CrazyTechnology/spark
|
sql/core/src/main/scala/org/apache/spark/sql/package.scala
|
package org.apache.spark
import org.apache.spark.annotation.{DeveloperApi, Unstable}
import org.apache.spark.sql.execution.SparkStrategy
/**
* Allows the execution of relational queries, including those expressed in SQL using Spark.
* 允许执行关系查询,包括使用 Spark 以 SQL 表示的查询。
* @groupname dataType Data types
* @groupdesc Spark SQL data types.
* @groupprio dataType -3
* @groupname field Field
* @groupprio field -2
* @groupname row Row
* @groupprio row -1
*/
package object sql {
/**
* Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting
* with the query planner and is not designed to be stable across spark releases. Developers
* writing libraries should instead consider using the stable APIs provided in
* [[org.apache.spark.sql.sources]]
*/
@DeveloperApi
@Unstable
type Strategy = SparkStrategy
type DataFrame = Dataset[Row]
/**
* Metadata key which is used to write Spark version in the followings:
* - Parquet file metadata
* - ORC file metadata
*
* Note that Hive table property `spark.sql.create.version` also has Spark version.
*/
private[sql] val SPARK_VERSION_METADATA_KEY = "org.apache.spark.version"
}
|
CrazyTechnology/spark
|
resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnScheduler.scala
|
package org.apache.spark.scheduler.cluster
import org.apache.hadoop.yarn.util.RackResolver
import org.apache.log4j.{Level, Logger}
import org.apache.spark._
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.util.Utils
private[spark] class YarnScheduler(sc: SparkContext) extends TaskSchedulerImpl(sc) {
// RackResolver logs an INFO message whenever it resolves a rack, which is way too often.
if (Logger.getLogger(classOf[RackResolver]).getLevel == null) {
Logger.getLogger(classOf[RackResolver]).setLevel(Level.WARN)
}
// By default, rack is unknown
override def getRackForHost(hostPort: String): Option[String] = {
val host = Utils.parseHostPort(hostPort)._1
Option(RackResolver.resolve(sc.hadoopConfiguration, host).getNetworkLocation)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/ExecutorDesc.scala
|
package org.apache.spark.deploy.master
import org.apache.spark.deploy.{ExecutorDescription, ExecutorState}
private[master] class ExecutorDesc(
val id: Int,
val application: ApplicationInfo,
val worker: WorkerInfo,
val cores: Int,
val memory: Int) {
var state = ExecutorState.LAUNCHING
/** Copy all state (non-val) variables from the given on-the-wire ExecutorDescription. */
def copyState(execDesc: ExecutorDescription) {
state = execDesc.state
}
def fullId: String = application.id + "/" + id
override def equals(other: Any): Boolean = {
other match {
case info: ExecutorDesc =>
fullId == info.fullId &&
worker.id == info.worker.id &&
cores == info.cores &&
memory == info.memory
case _ => false
}
}
override def toString: String = fullId
override def hashCode: Int = toString.hashCode()
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
|
package org.apache.spark.deploy.master
import com.codahale.metrics.{Gauge, MetricRegistry}
import org.apache.spark.metrics.source.Source
private[spark] class MasterSource(val master: Master) extends Source {
override val metricRegistry = new MetricRegistry()
override val sourceName = "master"
// Gauge for worker numbers in cluster
metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] {
override def getValue: Int = master.workers.size
})
// Gauge for alive worker numbers in cluster
metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{
override def getValue: Int = master.workers.count(_.state == WorkerState.ALIVE)
})
// Gauge for application numbers in cluster
metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] {
override def getValue: Int = master.apps.size
})
// Gauge for waiting application numbers in cluster
metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] {
override def getValue: Int = master.apps.count(_.state == ApplicationState.WAITING)
})
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/WorkerInfo.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.deploy.master
import scala.collection.mutable
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.util.Utils
private[spark] class WorkerInfo(
val id: String,
val host: String,
val port: Int,
val cores: Int,
val memory: Int,
val endpoint: RpcEndpointRef,
val webUiAddress: String)
extends Serializable {
Utils.checkHost(host)
assert (port > 0)
@transient var executors: mutable.HashMap[String, ExecutorDesc] = _ // executorId => info
@transient var drivers: mutable.HashMap[String, DriverInfo] = _ // driverId => info
@transient var state: WorkerState.Value = _
@transient var coresUsed: Int = _
@transient var memoryUsed: Int = _
@transient var lastHeartbeat: Long = _
init()
def coresFree: Int = cores - coresUsed
def memoryFree: Int = memory - memoryUsed
private def readObject(in: java.io.ObjectInputStream): Unit = Utils.tryOrIOException {
in.defaultReadObject()
init()
}
private def init() {
executors = new mutable.HashMap
drivers = new mutable.HashMap
state = WorkerState.ALIVE
coresUsed = 0
memoryUsed = 0
lastHeartbeat = System.currentTimeMillis()
}
def hostPort: String = {
assert (port > 0)
host + ":" + port
}
def addExecutor(exec: ExecutorDesc) {
executors(exec.fullId) = exec
coresUsed += exec.cores
memoryUsed += exec.memory
}
def removeExecutor(exec: ExecutorDesc) {
if (executors.contains(exec.fullId)) {
executors -= exec.fullId
coresUsed -= exec.cores
memoryUsed -= exec.memory
}
}
def hasExecutor(app: ApplicationInfo): Boolean = {
executors.values.exists(_.application == app)
}
def addDriver(driver: DriverInfo) {
drivers(driver.id) = driver
memoryUsed += driver.desc.mem
coresUsed += driver.desc.cores
}
def removeDriver(driver: DriverInfo) {
drivers -= driver.id
memoryUsed -= driver.desc.mem
coresUsed -= driver.desc.cores
}
def setState(state: WorkerState.Value): Unit = {
this.state = state
}
def isAlive(): Boolean = this.state == WorkerState.ALIVE
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala
|
<filename>core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala<gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{DataInputStream, DataOutputStream}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, Map}
import org.apache.spark.util.{ByteBufferInputStream, ByteBufferOutputStream, Utils}
/**
* Description of a task that gets passed onto executors to be executed, usually created by
* `TaskSetManager.resourceOffer`.
* 传递给要执行的执行器的任务的描述,通常由TaskSetManager.resourceOffer创建。
* TaskDescriptions and the associated Task need to be serialized carefully for two reasons:
* 需要仔细序列化TaskDescriptions和相关的Task,这有两个原因:
* (1) When a TaskDescription is received by an Executor, the Executor needs to first get the
* list of JARs and files and add these to the classpath, and set the properties, before
* deserializing the Task object (serializedTask). This is why the Properties are included
* in the TaskDescription, even though they're also in the serialized task.
* 当执行者收到TaskDescription时,执行者需要首先获取JAR和文件列表,并将它们添加到类路径中,然后设置属性,然后反序列化Task对象(serializedTask)。
这就是即使在序列化任务中也将属性包含在TaskDescription中的原因。
* (2) Because a TaskDescription is serialized and sent to an executor for each task, efficient
* serialization (both in terms of serialization time and serialized buffer size) is
* important. For this reason, we serialize TaskDescriptions ourselves with the
* TaskDescription.encode and TaskDescription.decode methods. This results in a smaller
* serialized size because it avoids serializing unnecessary fields in the Map objects
* (which can introduce significant overhead when the maps are small).
*/
private[spark] class TaskDescription(
val taskId: Long,
val attemptNumber: Int,
val executorId: String,
val name: String,
val index: Int, // Index within this task's TaskSet
val partitionId: Int,
val addedFiles: Map[String, Long],
val addedJars: Map[String, Long],
val properties: Properties,
val serializedTask: ByteBuffer) {
override def toString: String = "TaskDescription(TID=%d, index=%d)".format(taskId, index)
}
private[spark] object TaskDescription {
private def serializeStringLongMap(map: Map[String, Long], dataOut: DataOutputStream): Unit = {
dataOut.writeInt(map.size)
for ((key, value) <- map) {
dataOut.writeUTF(key)
dataOut.writeLong(value)
}
}
def encode(taskDescription: TaskDescription): ByteBuffer = {
val bytesOut = new ByteBufferOutputStream(4096)
val dataOut = new DataOutputStream(bytesOut)
dataOut.writeLong(taskDescription.taskId)
dataOut.writeInt(taskDescription.attemptNumber)
dataOut.writeUTF(taskDescription.executorId)
dataOut.writeUTF(taskDescription.name)
dataOut.writeInt(taskDescription.index)
dataOut.writeInt(taskDescription.partitionId)
// Write files.
serializeStringLongMap(taskDescription.addedFiles, dataOut)
// Write jars.
serializeStringLongMap(taskDescription.addedJars, dataOut)
// Write properties.
dataOut.writeInt(taskDescription.properties.size())
taskDescription.properties.asScala.foreach { case (key, value) =>
dataOut.writeUTF(key)
// SPARK-19796 -- writeUTF doesn't work for long strings, which can happen for property values
val bytes = value.getBytes(StandardCharsets.UTF_8)
dataOut.writeInt(bytes.length)
dataOut.write(bytes)
}
// Write the task. The task is already serialized, so write it directly to the byte buffer.
Utils.writeByteBuffer(taskDescription.serializedTask, bytesOut)
dataOut.close()
bytesOut.close()
bytesOut.toByteBuffer
}
private def deserializeStringLongMap(dataIn: DataInputStream): HashMap[String, Long] = {
val map = new HashMap[String, Long]()
val mapSize = dataIn.readInt()
for (i <- 0 until mapSize) {
map(dataIn.readUTF()) = dataIn.readLong()
}
map
}
def decode(byteBuffer: ByteBuffer): TaskDescription = {
val dataIn = new DataInputStream(new ByteBufferInputStream(byteBuffer))
val taskId = dataIn.readLong()
val attemptNumber = dataIn.readInt()
val executorId = dataIn.readUTF()
val name = dataIn.readUTF()
val index = dataIn.readInt()
val partitionId = dataIn.readInt()
// Read files.
val taskFiles = deserializeStringLongMap(dataIn)
// Read jars.
val taskJars = deserializeStringLongMap(dataIn)
// Read properties.
val properties = new Properties()
val numProperties = dataIn.readInt()
for (i <- 0 until numProperties) {
val key = dataIn.readUTF()
val valueLength = dataIn.readInt()
val valueBytes = new Array[Byte](valueLength)
dataIn.readFully(valueBytes)
properties.setProperty(key, new String(valueBytes, StandardCharsets.UTF_8))
}
// Create a sub-buffer for the serialized task into its own buffer (to be deserialized later).
val serializedTask = byteBuffer.slice()
new TaskDescription(taskId, attemptNumber, executorId, name, index, partitionId, taskFiles,
taskJars, properties, serializedTask)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcastFactory.scala
|
<filename>core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcastFactory.scala
package org.apache.spark.broadcast
import scala.reflect.ClassTag
import org.apache.spark.{SecurityManager, SparkConf}
/**
* A [[org.apache.spark.broadcast.Broadcast]] implementation that uses a BitTorrent-like
* protocol to do a distributed transfer of the broadcasted data to the executors. Refer to
* [[org.apache.spark.broadcast.TorrentBroadcast]] for more details.
*/
private[spark] class TorrentBroadcastFactory extends BroadcastFactory {
override def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager) { }
override def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean, id: Long): Broadcast[T] = {
new TorrentBroadcast[T](value_, id)
}
override def stop() { }
/**
* Remove all persisted state associated with the torrent broadcast with the given ID.
* @param removeFromDriver Whether to remove state from the driver.
* @param blocking Whether to block until unbroadcasted
*/
override def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
TorrentBroadcast.unpersist(id, removeFromDriver, blocking)
}
}
|
CrazyTechnology/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala
|
<filename>sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GeneratePredicate.scala
package org.apache.spark.sql.catalyst.expressions.codegen
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
/**
* Interface for generated predicate
* 生成谓词的接口
*/
abstract class Predicate {
def eval(r: InternalRow): Boolean
/**
* Initializes internal states given the current partition index.
* This is used by nondeterministic expressions to set initial states.
* The default implementation does nothing.
*/
def initialize(partitionIndex: Int): Unit = {}
}
/**
* Generates bytecode that evaluates a boolean [[Expression]] on a given input [[InternalRow]].
* 生成对给定输入 [[InternalRow]] 计算布尔值 [[Expression]] 的字节码。
*/
object GeneratePredicate extends CodeGenerator[Expression, Predicate] {
//canonicalize 规范化
protected def canonicalize(in: Expression): Expression = ExpressionCanonicalizer.execute(in)
protected def bind(in: Expression, inputSchema: Seq[Attribute]): Expression =
BindReferences.bindReference(in, inputSchema)
protected def create(predicate: Expression): Predicate = {
val ctx = newCodeGenContext()
val eval = predicate.genCode(ctx)
val codeBody = s"""
public SpecificPredicate generate(Object[] references) {
return new SpecificPredicate(references);
}
class SpecificPredicate extends ${classOf[Predicate].getName} {
private final Object[] references;
${ctx.declareMutableStates()}
public SpecificPredicate(Object[] references) {
this.references = references;
${ctx.initMutableStates()}
}
public void initialize(int partitionIndex) {
${ctx.initPartition()}
}
public boolean eval(InternalRow ${ctx.INPUT_ROW}) {
${eval.code}
return !${eval.isNull} && ${eval.value};
}
${ctx.declareAddedFunctions()}
}"""
val code = CodeFormatter.stripOverlappingComments(
new CodeAndComment(codeBody, ctx.getPlaceHolderToComments()))
logDebug(s"Generated predicate '$predicate':\n${CodeFormatter.format(code)}")
val (clazz, _) = CodeGenerator.compile(code)
clazz.generate(ctx.references.toArray).asInstanceOf[Predicate]
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/FileSystemPersistenceEngine.scala
|
<gh_stars>0
package org.apache.spark.deploy.master
import java.io._
import scala.reflect.ClassTag
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, Serializer}
import org.apache.spark.util.Utils
/**
* Stores data in a single on-disk directory with one file per application and worker.
* Files are deleted when applications and workers are removed.
*
* @param dir Directory to store files. Created if non-existent (but not recursively).
* @param serializer Used to serialize our objects.
*/
private[master] class FileSystemPersistenceEngine(
val dir: String,
val serializer: Serializer)
extends PersistenceEngine with Logging {
new File(dir).mkdir()
override def persist(name: String, obj: Object): Unit = {
serializeIntoFile(new File(dir + File.separator + name), obj)
}
override def unpersist(name: String): Unit = {
val f = new File(dir + File.separator + name)
if (!f.delete()) {
logWarning(s"Error deleting ${f.getPath()}")
}
}
override def read[T: ClassTag](prefix: String): Seq[T] = {
val files = new File(dir).listFiles().filter(_.getName.startsWith(prefix))
files.map(deserializeFromFile[T])
}
private def serializeIntoFile(file: File, value: AnyRef) {
val created = file.createNewFile()
if (!created) { throw new IllegalStateException("Could not create file: " + file) }
val fileOut = new FileOutputStream(file)
var out: SerializationStream = null
Utils.tryWithSafeFinally {
out = serializer.newInstance().serializeStream(fileOut)
out.writeObject(value)
} {
fileOut.close()
if (out != null) {
out.close()
}
}
}
private def deserializeFromFile[T](file: File)(implicit m: ClassTag[T]): T = {
val fileIn = new FileInputStream(file)
var in: DeserializationStream = null
try {
in = serializer.newInstance().deserializeStream(fileIn)
in.readObject[T]()
} finally {
fileIn.close()
if (in != null) {
in.close()
}
}
}
}
|
CrazyTechnology/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/InternalRow.scala
|
<gh_stars>0
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* An abstract class for row used internally in Spark SQL, which only contains the columns as
* internal types.
*Spark SQL在内部使用的行的抽象类,仅包含作为内部类型的列。
*/
abstract class InternalRow extends SpecializedGetters with Serializable {
def numFields: Int
// This is only use for test and will throw a null pointer exception if the position is null.
def getString(ordinal: Int): String = getUTF8String(ordinal).toString
def setNullAt(i: Int): Unit
/**
* Updates the value at column `i`. Note that after updating, the given value will be kept in this
* row, and the caller side should guarantee that this value won't be changed afterwards.
*/
def update(i: Int, value: Any): Unit
// default implementation (slow)
def setBoolean(i: Int, value: Boolean): Unit = update(i, value)
def setByte(i: Int, value: Byte): Unit = update(i, value)
def setShort(i: Int, value: Short): Unit = update(i, value)
def setInt(i: Int, value: Int): Unit = update(i, value)
def setLong(i: Int, value: Long): Unit = update(i, value)
def setFloat(i: Int, value: Float): Unit = update(i, value)
def setDouble(i: Int, value: Double): Unit = update(i, value)
/**
* Update the decimal column at `i`.
*
* Note: In order to support update decimal with precision > 18 in UnsafeRow,
* CAN NOT call setNullAt() for decimal column on UnsafeRow, call setDecimal(i, null, precision).
*/
def setDecimal(i: Int, value: Decimal, precision: Int) { update(i, value) }
/**
* Make a copy of the current [[InternalRow]] object.
*/
def copy(): InternalRow
/** Returns true if there are any NULL values in this row. */
def anyNull: Boolean = {
val len = numFields
var i = 0
while (i < len) {
if (isNullAt(i)) { return true }
i += 1
}
false
}
/* ---------------------- utility methods for Scala ---------------------- */
/**
* Return a Scala Seq representing the row. Elements are placed in the same order in the Seq.
*/
def toSeq(fieldTypes: Seq[DataType]): Seq[Any] = {
val len = numFields
assert(len == fieldTypes.length)
val values = new Array[Any](len)
var i = 0
while (i < len) {
values(i) = get(i, fieldTypes(i))
i += 1
}
values
}
def toSeq(schema: StructType): Seq[Any] = toSeq(schema.map(_.dataType))
}
object InternalRow {
/**
* This method can be used to construct a [[InternalRow]] with the given values.
*/
def apply(values: Any*): InternalRow = new GenericInternalRow(values.toArray)
/**
* This method can be used to construct a [[InternalRow]] from a [[Seq]] of values.
*/
def fromSeq(values: Seq[Any]): InternalRow = new GenericInternalRow(values.toArray)
/** Returns an empty [[InternalRow]]. */
val empty = apply()
/**
* Copies the given value if it's string/struct/array/map type.
*/
def copyValue(value: Any): Any = value match {
case v: UTF8String => v.copy()
case v: InternalRow => v.copy()
case v: ArrayData => v.copy()
case v: MapData => v.copy()
case _ => value
}
/**
* Returns an accessor for an `InternalRow` with given data type. The returned accessor
* actually takes a `SpecializedGetters` input because it can be generalized to other classes
* that implements `SpecializedGetters` (e.g., `ArrayData`) too.
*/
def getAccessor(dt: DataType, nullable: Boolean = true): (SpecializedGetters, Int) => Any = {
val getValueNullSafe: (SpecializedGetters, Int) => Any = dt match {
case BooleanType => (input, ordinal) => input.getBoolean(ordinal)
case ByteType => (input, ordinal) => input.getByte(ordinal)
case ShortType => (input, ordinal) => input.getShort(ordinal)
case IntegerType | DateType => (input, ordinal) => input.getInt(ordinal)
case LongType | TimestampType => (input, ordinal) => input.getLong(ordinal)
case FloatType => (input, ordinal) => input.getFloat(ordinal)
case DoubleType => (input, ordinal) => input.getDouble(ordinal)
case StringType => (input, ordinal) => input.getUTF8String(ordinal)
case BinaryType => (input, ordinal) => input.getBinary(ordinal)
case CalendarIntervalType => (input, ordinal) => input.getInterval(ordinal)
case t: DecimalType => (input, ordinal) => input.getDecimal(ordinal, t.precision, t.scale)
case t: StructType => (input, ordinal) => input.getStruct(ordinal, t.size)
case _: ArrayType => (input, ordinal) => input.getArray(ordinal)
case _: MapType => (input, ordinal) => input.getMap(ordinal)
case u: UserDefinedType[_] => getAccessor(u.sqlType, nullable)
case _ => (input, ordinal) => input.get(ordinal, dt)
}
if (nullable) {
(getter, index) => {
if (getter.isNullAt(index)) {
null
} else {
getValueNullSafe(getter, index)
}
}
} else {
getValueNullSafe
}
}
/**
* Returns a writer for an `InternalRow` with given data type.
*/
def getWriter(ordinal: Int, dt: DataType): (InternalRow, Any) => Unit = dt match {
case BooleanType => (input, v) => input.setBoolean(ordinal, v.asInstanceOf[Boolean])
case ByteType => (input, v) => input.setByte(ordinal, v.asInstanceOf[Byte])
case ShortType => (input, v) => input.setShort(ordinal, v.asInstanceOf[Short])
case IntegerType | DateType => (input, v) => input.setInt(ordinal, v.asInstanceOf[Int])
case LongType | TimestampType => (input, v) => input.setLong(ordinal, v.asInstanceOf[Long])
case FloatType => (input, v) => input.setFloat(ordinal, v.asInstanceOf[Float])
case DoubleType => (input, v) => input.setDouble(ordinal, v.asInstanceOf[Double])
case DecimalType.Fixed(precision, _) =>
(input, v) => input.setDecimal(ordinal, v.asInstanceOf[Decimal], precision)
case udt: UserDefinedType[_] => getWriter(ordinal, udt.sqlType)
case NullType => (input, _) => input.setNullAt(ordinal)
case StringType => (input, v) => input.update(ordinal, v.asInstanceOf[UTF8String].copy())
case _: StructType => (input, v) => input.update(ordinal, v.asInstanceOf[InternalRow].copy())
case _: ArrayType => (input, v) => input.update(ordinal, v.asInstanceOf[ArrayData].copy())
case _: MapType => (input, v) => input.update(ordinal, v.asInstanceOf[MapData].copy())
case _ => (input, v) => input.update(ordinal, v)
}
}
|
CrazyTechnology/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceRDD.scala
|
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.sources.v2.reader.{InputPartition, PartitionReader, PartitionReaderFactory}
class DataSourceRDDPartition(val index: Int, val inputPartition: InputPartition)
extends Partition with Serializable
// TODO: we should have 2 RDDs: an RDD[InternalRow] for row-based scan, an `RDD[ColumnarBatch]` for
// columnar scan.
class DataSourceRDD(
sc: SparkContext,
@transient private val inputPartitions: Seq[InputPartition],
partitionReaderFactory: PartitionReaderFactory,
columnarReads: Boolean)
extends RDD[InternalRow](sc, Nil) {
override protected def getPartitions: Array[Partition] = {
inputPartitions.zipWithIndex.map {
case (inputPartition, index) => new DataSourceRDDPartition(index, inputPartition)
}.toArray
}
private def castPartition(split: Partition): DataSourceRDDPartition = split match {
case p: DataSourceRDDPartition => p
case _ => throw new SparkException(s"[BUG] Not a DataSourceRDDPartition: $split")
}
override def compute(split: Partition, context: TaskContext): Iterator[InternalRow] = {
val inputPartition = castPartition(split).inputPartition
val reader: PartitionReader[_] = if (columnarReads) {
partitionReaderFactory.createColumnarReader(inputPartition)
} else {
partitionReaderFactory.createReader(inputPartition)
}
context.addTaskCompletionListener[Unit](_ => reader.close())
val iter = new Iterator[Any] {
private[this] var valuePrepared = false
override def hasNext: Boolean = {
if (!valuePrepared) {
valuePrepared = reader.next()
}
valuePrepared
}
override def next(): Any = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
valuePrepared = false
reader.get()
}
}
// TODO: SPARK-25083 remove the type erasure hack in data source scan
new InterruptibleIterator(context, iter.asInstanceOf[Iterator[InternalRow]])
}
override def getPreferredLocations(split: Partition): Seq[String] = {
castPartition(split).inputPartition.preferredLocations()
}
}
|
CrazyTechnology/spark
|
resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala
|
package org.apache.spark.scheduler.cluster
import java.util.concurrent.atomic.{AtomicBoolean}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.util.control.NonFatal
import org.apache.hadoop.yarn.api.records.{ApplicationAttemptId, ApplicationId}
import org.apache.spark.SparkContext
import org.apache.spark.internal.Logging
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.ui.JettyUtils
import org.apache.spark.util.{RpcUtils, ThreadUtils}
/**
* Abstract Yarn scheduler backend that contains common logic
* between the client and cluster Yarn scheduler backends.
*/
private[spark] abstract class YarnSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
private val stopped = new AtomicBoolean(false)
override val minRegisteredRatio =
if (conf.getOption("spark.scheduler.minRegisteredResourcesRatio").isEmpty) {
0.8
} else {
super.minRegisteredRatio
}
protected var totalExpectedExecutors = 0
private val yarnSchedulerEndpoint = new YarnSchedulerEndpoint(rpcEnv)
private val yarnSchedulerEndpointRef = rpcEnv.setupEndpoint(
YarnSchedulerBackend.ENDPOINT_NAME, yarnSchedulerEndpoint)
private implicit val askTimeout = RpcUtils.askRpcTimeout(sc.conf)
/** Application ID. */
protected var appId: Option[ApplicationId] = None
/** Attempt ID. This is unset for client-mode schedulers */
private var attemptId: Option[ApplicationAttemptId] = None
/** Scheduler extension services. */
private val services: SchedulerExtensionServices = new SchedulerExtensionServices()
/**
* Bind to YARN. This *must* be done before calling [[start()]].
*
* @param appId YARN application ID
* @param attemptId Optional YARN attempt ID
*/
protected def bindToYarn(appId: ApplicationId, attemptId: Option[ApplicationAttemptId]): Unit = {
this.appId = Some(appId)
this.attemptId = attemptId
}
override def start() {
require(appId.isDefined, "application ID unset")
val binding = SchedulerExtensionServiceBinding(sc, appId.get, attemptId)
services.start(binding)
super.start()
}
override def stop(): Unit = {
try {
// SPARK-12009: To prevent Yarn allocator from requesting backup for the executors which
// was Stopped by SchedulerBackend.
requestTotalExecutors(0, 0, Map.empty)
super.stop()
} finally {
stopped.set(true)
services.stop()
}
}
/**
* Get the attempt ID for this run, if the cluster manager supports multiple
* attempts. Applications run in client mode will not have attempt IDs.
* This attempt ID only includes attempt counter, like "1", "2".
*
* @return The application attempt id, if available.
*/
override def applicationAttemptId(): Option[String] = {
attemptId.map(_.getAttemptId.toString)
}
/**
* Get an application ID associated with the job.
* This returns the string value of [[appId]] if set, otherwise
* the locally-generated ID from the superclass.
* @return The application ID
*/
override def applicationId(): String = {
appId.map(_.toString).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
}
private[cluster] def prepareRequestExecutors(requestedTotal: Int): RequestExecutors = {
val nodeBlacklist: Set[String] = scheduler.nodeBlacklist()
// For locality preferences, ignore preferences for nodes that are blacklisted
val filteredHostToLocalTaskCount =
hostToLocalTaskCount.filter { case (k, v) => !nodeBlacklist.contains(k) }
RequestExecutors(requestedTotal, localityAwareTasks, filteredHostToLocalTaskCount,
nodeBlacklist)
}
/**
* Request executors from the ApplicationMaster by specifying the total number desired.
* This includes executors already pending or running.
*/
override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = {
yarnSchedulerEndpointRef.ask[Boolean](prepareRequestExecutors(requestedTotal))
}
/**
* Request that the ApplicationMaster kill the specified executors.
*/
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = {
yarnSchedulerEndpointRef.ask[Boolean](KillExecutors(executorIds))
}
override def sufficientResourcesRegistered(): Boolean = {
totalRegisteredExecutors.get() >= totalExpectedExecutors * minRegisteredRatio
}
/**
* Add filters to the SparkUI.
*/
private def addWebUIFilter(
filterName: String,
filterParams: Map[String, String],
proxyBase: String): Unit = {
if (proxyBase != null && proxyBase.nonEmpty) {
System.setProperty("spark.ui.proxyBase", proxyBase)
}
val hasFilter =
filterName != null && filterName.nonEmpty &&
filterParams != null && filterParams.nonEmpty
if (hasFilter) {
// SPARK-26255: Append user provided filters(spark.ui.filters) with yarn filter.
val allFilters = filterName + "," + conf.get("spark.ui.filters", "")
logInfo(s"Add WebUI Filter. $filterName, $filterParams, $proxyBase")
conf.set("spark.ui.filters", allFilters)
filterParams.foreach { case (k, v) => conf.set(s"spark.$filterName.param.$k", v) }
scheduler.sc.ui.foreach { ui => JettyUtils.addFilters(ui.getHandlers, conf) }
}
}
override def createDriverEndpoint(properties: Seq[(String, String)]): DriverEndpoint = {
new YarnDriverEndpoint(rpcEnv, properties)
}
/**
* Reset the state of SchedulerBackend to the initial state. This is happened when AM is failed
* and re-registered itself to driver after a failure. The stale state in driver should be
* cleaned.
*/
override protected def reset(): Unit = {
super.reset()
sc.executorAllocationManager.foreach(_.reset())
}
/**
* Override the DriverEndpoint to add extra logic for the case when an executor is disconnected.
* This endpoint communicates with the executors and queries the AM for an executor's exit
* status when the executor is disconnected.
*/
private class YarnDriverEndpoint(rpcEnv: RpcEnv, sparkProperties: Seq[(String, String)])
extends DriverEndpoint(rpcEnv, sparkProperties) {
/**
* When onDisconnected is received at the driver endpoint, the superclass DriverEndpoint
* handles it by assuming the Executor was lost for a bad reason and removes the executor
* immediately.
*
* In YARN's case however it is crucial to talk to the application master and ask why the
* executor had exited. If the executor exited for some reason unrelated to the running tasks
* (e.g., preemption), according to the application master, then we pass that information down
* to the TaskSetManager to inform the TaskSetManager that tasks on that lost executor should
* not count towards a job failure.
*/
override def onDisconnected(rpcAddress: RpcAddress): Unit = {
addressToExecutorId.get(rpcAddress).foreach { executorId =>
if (!stopped.get) {
if (disableExecutor(executorId)) {
yarnSchedulerEndpoint.handleExecutorDisconnectedFromDriver(executorId, rpcAddress)
}
}
}
}
}
/**
* An [[RpcEndpoint]] that communicates with the ApplicationMaster.
*/
private class YarnSchedulerEndpoint(override val rpcEnv: RpcEnv)
extends ThreadSafeRpcEndpoint with Logging {
private var amEndpoint: Option[RpcEndpointRef] = None
private[YarnSchedulerBackend] def handleExecutorDisconnectedFromDriver(
executorId: String,
executorRpcAddress: RpcAddress): Unit = {
val removeExecutorMessage = amEndpoint match {
case Some(am) =>
val lossReasonRequest = GetExecutorLossReason(executorId)
am.ask[ExecutorLossReason](lossReasonRequest, askTimeout)
.map { reason => RemoveExecutor(executorId, reason) }(ThreadUtils.sameThread)
.recover {
case NonFatal(e) =>
logWarning(s"Attempted to get executor loss reason" +
s" for executor id ${executorId} at RPC address ${executorRpcAddress}," +
s" but got no response. Marking as slave lost.", e)
RemoveExecutor(executorId, SlaveLost())
}(ThreadUtils.sameThread)
case None =>
logWarning("Attempted to check for an executor loss reason" +
" before the AM has registered!")
Future.successful(RemoveExecutor(executorId, SlaveLost("AM is not yet registered.")))
}
removeExecutorMessage.foreach { message => driverEndpoint.send(message) }
}
override def receive: PartialFunction[Any, Unit] = {
case RegisterClusterManager(am) =>
logInfo(s"ApplicationMaster registered as $am")
amEndpoint = Option(am)
reset()
case AddWebUIFilter(filterName, filterParams, proxyBase) =>
addWebUIFilter(filterName, filterParams, proxyBase)
case r @ RemoveExecutor(executorId, reason) =>
if (!stopped.get) {
logWarning(s"Requesting driver to remove executor $executorId for reason $reason")
driverEndpoint.send(r)
}
case u @ UpdateDelegationTokens(tokens) =>
// Add the tokens to the current user and send a message to the scheduler so that it
// notifies all registered executors of the new tokens.
driverEndpoint.send(u)
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case r: RequestExecutors =>
amEndpoint match {
case Some(am) =>
am.ask[Boolean](r).andThen {
case Success(b) => context.reply(b)
case Failure(NonFatal(e)) =>
logError(s"Sending $r to AM was unsuccessful", e)
context.sendFailure(e)
}(ThreadUtils.sameThread)
case None =>
logWarning("Attempted to request executors before the AM has registered!")
context.reply(false)
}
case k: KillExecutors =>
amEndpoint match {
case Some(am) =>
am.ask[Boolean](k).andThen {
case Success(b) => context.reply(b)
case Failure(NonFatal(e)) =>
logError(s"Sending $k to AM was unsuccessful", e)
context.sendFailure(e)
}(ThreadUtils.sameThread)
case None =>
logWarning("Attempted to kill executors before the AM has registered!")
context.reply(false)
}
case RetrieveLastAllocatedExecutorId =>
context.reply(currentExecutorIdCounter)
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (amEndpoint.exists(_.address == remoteAddress)) {
logWarning(s"ApplicationMaster has disassociated: $remoteAddress")
amEndpoint = None
}
}
}
}
private[spark] object YarnSchedulerBackend {
val ENDPOINT_NAME = "YarnScheduler"
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/MasterMessages.scala
|
package org.apache.spark.deploy.master
sealed trait MasterMessages extends Serializable
/** Contains messages seen only by the Master and its associated entities. */
private[master] object MasterMessages {
// LeaderElectionAgent to Master
case object ElectedLeader
case object RevokedLeadership
// Master to itself
case object CheckForWorkerTimeOut
case class BeginRecovery(storedApps: Seq[ApplicationInfo], storedWorkers: Seq[WorkerInfo])
case object CompleteRecovery
case object BoundPortsRequest
case class BoundPortsResponse(rpcEndpointPort: Int, webUIPort: Int, restPort: Option[Int])
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala
|
package org.apache.spark.deploy.history
import scala.annotation.tailrec
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* Command-line parser for the [[HistoryServer]].
*/
private[history] class HistoryServerArguments(conf: SparkConf, args: Array[String])
extends Logging {
private var propertiesFile: String = null
parse(args.toList)
@tailrec
private def parse(args: List[String]): Unit = {
args match {
case ("--help" | "-h") :: tail =>
printUsageAndExit(0)
case ("--properties-file") :: value :: tail =>
propertiesFile = value
parse(tail)
case Nil =>
case _ =>
printUsageAndExit(1)
}
}
// This mutates the SparkConf, so all accesses to it must be made after this line
Utils.loadDefaultSparkProperties(conf, propertiesFile)
private def printUsageAndExit(exitCode: Int) {
// scalastyle:off println
System.err.println(
"""
|Usage: HistoryServer [options]
|
|Options:
| --properties-file FILE Path to a custom Spark properties file.
| Default is conf/spark-defaults.conf.
|
|Configuration options can be set by setting the corresponding JVM system property.
|History Server options are always available; additional options depend on the provider.
|
|History Server options:
|
| spark.history.ui.port Port where server will listen for connections
| (default 18080)
| spark.history.acls.enable Whether to enable view acls for all applications
| (default false)
| spark.history.provider Name of history provider class (defaults to
| file system-based provider)
| spark.history.retainedApplications Max number of application UIs to keep loaded in memory
| (default 50)
|FsHistoryProvider options:
|
| spark.history.fs.logDirectory Directory where app logs are stored
| (default: file:/tmp/spark-events)
| spark.history.fs.updateInterval How often to reload log data from storage
| (in seconds, default: 10)
|""".stripMargin)
// scalastyle:on println
System.exit(exitCode)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/Partitioner.scala
|
package org.apache.spark
import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.math.log10
import scala.reflect.ClassTag
import scala.util.hashing.byteswap32
import org.apache.spark.rdd.{PartitionPruningRDD, RDD}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.util.{CollectionsUtils, Utils}
import org.apache.spark.util.random.SamplingUtils
/**
* An object that defines how the elements in a key-value pair RDD are partitioned by key.
* Maps each key to a partition ID, from 0 to `numPartitions - 1`.
* 一个对象,它定义键值对 RDD 中的元素如何按键进行分区。将每个键映射到一个分区 ID,从 0 到 `numPartitions - 1`。
* Note that, partitioner must be deterministic, i.e. it must return the same partition id given
* the same partition key.
*/
abstract class Partitioner extends Serializable {
def numPartitions: Int
def getPartition(key: Any): Int
}
object Partitioner {
/**
* Choose a partitioner to use for a cogroup-like operation between a number of RDDs.
*
* If spark.default.parallelism is set, we'll use the value of SparkContext defaultParallelism
* as the default partitions number, otherwise we'll use the max number of upstream partitions.
*
* When available, we choose the partitioner from rdds with maximum number of partitions. If this
* partitioner is eligible (number of partitions within an order of maximum number of partitions
* in rdds), or has partition number higher than default partitions number - we use this
* partitioner.
*
* Otherwise, we'll use a new HashPartitioner with the default partitions number.
*
* Unless spark.default.parallelism is set, the number of partitions will be the same as the
* number of partitions in the largest upstream RDD, as this should be least likely to cause
* out-of-memory errors.
*
* We use two method parameters (rdd, others) to enforce callers passing at least 1 RDD.
*/
def defaultPartitioner(rdd: RDD[_], others: RDD[_]*): Partitioner = {
val rdds = (Seq(rdd) ++ others)
val hasPartitioner = rdds.filter(_.partitioner.exists(_.numPartitions > 0))
val hasMaxPartitioner: Option[RDD[_]] = if (hasPartitioner.nonEmpty) {
Some(hasPartitioner.maxBy(_.partitions.length))
} else {
None
}
val defaultNumPartitions = if (rdd.context.conf.contains("spark.default.parallelism")) {
rdd.context.defaultParallelism
} else {
rdds.map(_.partitions.length).max
}
// If the existing max partitioner is an eligible one, or its partitions number is larger
// than the default number of partitions, use the existing partitioner.
if (hasMaxPartitioner.nonEmpty && (isEligiblePartitioner(hasMaxPartitioner.get, rdds) ||
defaultNumPartitions < hasMaxPartitioner.get.getNumPartitions)) {
hasMaxPartitioner.get.partitioner.get
} else {
new HashPartitioner(defaultNumPartitions)
}
}
/**
* Returns true if the number of partitions of the RDD is either greater than or is less than and
* within a single order of magnitude of the max number of upstream partitions, otherwise returns
* false.
*/
private def isEligiblePartitioner(
hasMaxPartitioner: RDD[_],
rdds: Seq[RDD[_]]): Boolean = {
val maxPartitions = rdds.map(_.partitions.length).max
log10(maxPartitions) - log10(hasMaxPartitioner.getNumPartitions) < 1
}
}
/**
* A [[org.apache.spark.Partitioner]] that implements hash-based partitioning using
* Java's `Object.hashCode`.
*
* Java arrays have hashCodes that are based on the arrays' identities rather than their contents,
* so attempting to partition an RDD[Array[_]] or RDD[(Array[_], _)] using a HashPartitioner will
* produce an unexpected or incorrect result.
*/
class HashPartitioner(partitions: Int) extends Partitioner {
require(partitions >= 0, s"Number of partitions ($partitions) cannot be negative.")
def numPartitions: Int = partitions
def getPartition(key: Any): Int = key match {
case null => 0
case _ => Utils.nonNegativeMod(key.hashCode, numPartitions)
}
override def equals(other: Any): Boolean = other match {
case h: HashPartitioner =>
h.numPartitions == numPartitions
case _ =>
false
}
override def hashCode: Int = numPartitions
}
/**
* A [[org.apache.spark.Partitioner]] that partitions sortable records by range into roughly
* equal ranges. The ranges are determined by sampling the content of the RDD passed in.
*
* @note The actual number of partitions created by the RangePartitioner might not be the same
* as the `partitions` parameter, in the case where the number of sampled records is less than
* the value of `partitions`.
*/
class RangePartitioner[K : Ordering : ClassTag, V](
partitions: Int,
rdd: RDD[_ <: Product2[K, V]],
private var ascending: Boolean = true,
val samplePointsPerPartitionHint: Int = 20)
extends Partitioner {
// A constructor declared in order to maintain backward compatibility for Java, when we add the
// 4th constructor parameter samplePointsPerPartitionHint. See SPARK-22160.
// This is added to make sure from a bytecode point of view, there is still a 3-arg ctor.
def this(partitions: Int, rdd: RDD[_ <: Product2[K, V]], ascending: Boolean) = {
this(partitions, rdd, ascending, samplePointsPerPartitionHint = 20)
}
// We allow partitions = 0, which happens when sorting an empty RDD under the default settings.
require(partitions >= 0, s"Number of partitions cannot be negative but found $partitions.")
require(samplePointsPerPartitionHint > 0,
s"Sample points per partition must be greater than 0 but found $samplePointsPerPartitionHint")
private var ordering = implicitly[Ordering[K]]
// An array of upper bounds for the first (partitions - 1) partitions
private var rangeBounds: Array[K] = {
if (partitions <= 1) {
Array.empty
} else {
// This is the sample size we need to have roughly balanced output partitions, capped at 1M.
// Cast to double to avoid overflowing ints or longs
val sampleSize = math.min(samplePointsPerPartitionHint.toDouble * partitions, 1e6)
// Assume the input partitions are roughly balanced and over-sample a little bit.
val sampleSizePerPartition = math.ceil(3.0 * sampleSize / rdd.partitions.length).toInt
val (numItems, sketched) = RangePartitioner.sketch(rdd.map(_._1), sampleSizePerPartition)
if (numItems == 0L) {
Array.empty
} else {
// If a partition contains much more than the average number of items, we re-sample from it
// to ensure that enough items are collected from that partition.
val fraction = math.min(sampleSize / math.max(numItems, 1L), 1.0)
val candidates = ArrayBuffer.empty[(K, Float)]
val imbalancedPartitions = mutable.Set.empty[Int]
sketched.foreach { case (idx, n, sample) =>
if (fraction * n > sampleSizePerPartition) {
imbalancedPartitions += idx
} else {
// The weight is 1 over the sampling probability.
val weight = (n.toDouble / sample.length).toFloat
for (key <- sample) {
candidates += ((key, weight))
}
}
}
if (imbalancedPartitions.nonEmpty) {
// Re-sample imbalanced partitions with the desired sampling probability.
val imbalanced = new PartitionPruningRDD(rdd.map(_._1), imbalancedPartitions.contains)
val seed = byteswap32(-rdd.id - 1)
val reSampled = imbalanced.sample(withReplacement = false, fraction, seed).collect()
val weight = (1.0 / fraction).toFloat
candidates ++= reSampled.map(x => (x, weight))
}
RangePartitioner.determineBounds(candidates, math.min(partitions, candidates.size))
}
}
}
def numPartitions: Int = rangeBounds.length + 1
private var binarySearch: ((Array[K], K) => Int) = CollectionsUtils.makeBinarySearch[K]
def getPartition(key: Any): Int = {
val k = key.asInstanceOf[K]
var partition = 0
if (rangeBounds.length <= 128) {
// If we have less than 128 partitions naive search
while (partition < rangeBounds.length && ordering.gt(k, rangeBounds(partition))) {
partition += 1
}
} else {
// Determine which binary search method to use only once.
partition = binarySearch(rangeBounds, k)
// binarySearch either returns the match location or -[insertion point]-1
if (partition < 0) {
partition = -partition-1
}
if (partition > rangeBounds.length) {
partition = rangeBounds.length
}
}
if (ascending) {
partition
} else {
rangeBounds.length - partition
}
}
override def equals(other: Any): Boolean = other match {
case r: RangePartitioner[_, _] =>
r.rangeBounds.sameElements(rangeBounds) && r.ascending == ascending
case _ =>
false
}
override def hashCode(): Int = {
val prime = 31
var result = 1
var i = 0
while (i < rangeBounds.length) {
result = prime * result + rangeBounds(i).hashCode
i += 1
}
result = prime * result + ascending.hashCode
result
}
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
sfactory match {
case js: JavaSerializer => out.defaultWriteObject()
case _ =>
out.writeBoolean(ascending)
out.writeObject(ordering)
out.writeObject(binarySearch)
val ser = sfactory.newInstance()
Utils.serializeViaNestedStream(out, ser) { stream =>
stream.writeObject(scala.reflect.classTag[Array[K]])
stream.writeObject(rangeBounds)
}
}
}
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
val sfactory = SparkEnv.get.serializer
sfactory match {
case js: JavaSerializer => in.defaultReadObject()
case _ =>
ascending = in.readBoolean()
ordering = in.readObject().asInstanceOf[Ordering[K]]
binarySearch = in.readObject().asInstanceOf[(Array[K], K) => Int]
val ser = sfactory.newInstance()
Utils.deserializeViaNestedStream(in, ser) { ds =>
implicit val classTag = ds.readObject[ClassTag[Array[K]]]()
rangeBounds = ds.readObject[Array[K]]()
}
}
}
}
private[spark] object RangePartitioner {
/**
* Sketches the input RDD via reservoir sampling on each partition.
*
* @param rdd the input RDD to sketch
* @param sampleSizePerPartition max sample size per partition
* @return (total number of items, an array of (partitionId, number of items, sample))
*/
def sketch[K : ClassTag](
rdd: RDD[K],
sampleSizePerPartition: Int): (Long, Array[(Int, Long, Array[K])]) = {
val shift = rdd.id
// val classTagK = classTag[K] // to avoid serializing the entire partitioner object
val sketched = rdd.mapPartitionsWithIndex { (idx, iter) =>
val seed = byteswap32(idx ^ (shift << 16))
val (sample, n) = SamplingUtils.reservoirSampleAndCount(
iter, sampleSizePerPartition, seed)
Iterator((idx, n, sample))
}.collect()
val numItems = sketched.map(_._2).sum
(numItems, sketched)
}
/**
* Determines the bounds for range partitioning from candidates with weights indicating how many
* items each represents. Usually this is 1 over the probability used to sample this candidate.
*
* @param candidates unordered candidates with weights
* @param partitions number of partitions
* @return selected bounds
*/
def determineBounds[K : Ordering : ClassTag](
candidates: ArrayBuffer[(K, Float)],
partitions: Int): Array[K] = {
val ordering = implicitly[Ordering[K]]
val ordered = candidates.sortBy(_._1)
val numCandidates = ordered.size
val sumWeights = ordered.map(_._2.toDouble).sum
val step = sumWeights / partitions
var cumWeight = 0.0
var target = step
val bounds = ArrayBuffer.empty[K]
var i = 0
var j = 0
var previousBound = Option.empty[K]
while ((i < numCandidates) && (j < partitions - 1)) {
val (key, weight) = ordered(i)
cumWeight += weight
if (cumWeight >= target) {
// Skip duplicate values.
if (previousBound.isEmpty || ordering.gt(key, previousBound.get)) {
bounds += key
target += step
j += 1
previousBound = Some(key)
}
}
i += 1
}
bounds.toArray
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/DriverInfo.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.deploy.master
import java.util.Date
import org.apache.spark.deploy.DriverDescription
import org.apache.spark.util.Utils
private[deploy] class DriverInfo(
val startTime: Long,
val id: String,
val desc: DriverDescription,
val submitDate: Date)
extends Serializable {
@transient var state: DriverState.Value = DriverState.SUBMITTED
/* If we fail when launching the driver, the exception is stored here. */
@transient var exception: Option[Exception] = None
/* Most recent worker assigned to this driver */
@transient var worker: Option[WorkerInfo] = None
init()
private def readObject(in: java.io.ObjectInputStream): Unit = Utils.tryOrIOException {
in.defaultReadObject()
init()
}
private def init(): Unit = {
state = DriverState.SUBMITTED
worker = None
exception = None
}
}
|
CrazyTechnology/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelationSuite.scala
|
<gh_stars>0
package org.apache.spark.sql.execution.datasources
import java.io.{File, FilenameFilter}
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.test.SharedSQLContext
class HadoopFsRelationSuite extends QueryTest with SharedSQLContext {
test("sizeInBytes should be the total size of all files") {
withTempDir{ dir =>
dir.delete()
spark.range(1000).write.parquet(dir.toString)
// ignore hidden files
val allFiles = dir.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = {
!name.startsWith(".") && !name.startsWith("_")
}
})
val totalSize = allFiles.map(_.length()).sum
val df = spark.read.parquet(dir.toString)
assert(df.queryExecution.logical.stats.sizeInBytes === BigInt(totalSize))
}
}
test("SPARK-22790: spark.sql.sources.compressionFactor takes effect") {
import testImplicits._
Seq(1.0, 0.5).foreach { compressionFactor =>
withSQLConf("spark.sql.sources.fileCompressionFactor" -> compressionFactor.toString,
"spark.sql.autoBroadcastJoinThreshold" -> "434") {
withTempPath { workDir =>
// the file size is 740 bytes
val workDirPath = workDir.getAbsolutePath
val data1 = Seq(100, 200, 300, 400).toDF("count")
data1.write.parquet(workDirPath + "/data1")
val df1FromFile = spark.read.parquet(workDirPath + "/data1")
val data2 = Seq(100, 200, 300, 400).toDF("count")
data2.write.parquet(workDirPath + "/data2")
val df2FromFile = spark.read.parquet(workDirPath + "/data2")
val joinedDF = df1FromFile.join(df2FromFile, Seq("count"))
if (compressionFactor == 0.5) {
val bJoinExec = joinedDF.queryExecution.executedPlan.collect {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.nonEmpty)
val smJoinExec = joinedDF.queryExecution.executedPlan.collect {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.isEmpty)
} else {
// compressionFactor is 1.0
val bJoinExec = joinedDF.queryExecution.executedPlan.collect {
case bJoin: BroadcastHashJoinExec => bJoin
}
assert(bJoinExec.isEmpty)
val smJoinExec = joinedDF.queryExecution.executedPlan.collect {
case smJoin: SortMergeJoinExec => smJoin
}
assert(smJoinExec.nonEmpty)
}
}
}
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/RecoveryModeFactory.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.deploy.master
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer
/**
* ::DeveloperApi::
*
* Implementation of this class can be plugged in as recovery mode alternative for Spark's
* Standalone mode.
*
*/
@DeveloperApi
abstract class StandaloneRecoveryModeFactory(conf: SparkConf, serializer: Serializer) {
/**
* PersistenceEngine defines how the persistent data(Information about worker, driver etc..)
* is handled for recovery.
*
*/
def createPersistenceEngine(): PersistenceEngine
/**
* Create an instance of LeaderAgent that decides who gets elected as master.
*/
def createLeaderElectionAgent(master: LeaderElectable): LeaderElectionAgent
}
/**
* LeaderAgent in this case is a no-op. Since leader is forever leader as the actual
* recovery is made by restoring from filesystem.
*/
private[master] class FileSystemRecoveryModeFactory(conf: SparkConf, serializer: Serializer)
extends StandaloneRecoveryModeFactory(conf, serializer) with Logging {
val RECOVERY_DIR = conf.get("spark.deploy.recoveryDirectory", "")
def createPersistenceEngine(): PersistenceEngine = {
logInfo("Persisting recovery state to directory: " + RECOVERY_DIR)
new FileSystemPersistenceEngine(RECOVERY_DIR, serializer)
}
def createLeaderElectionAgent(master: LeaderElectable): LeaderElectionAgent = {
new MonarchyLeaderAgent(master)
}
}
private[master] class ZooKeeperRecoveryModeFactory(conf: SparkConf, serializer: Serializer)
extends StandaloneRecoveryModeFactory(conf, serializer) {
def createPersistenceEngine(): PersistenceEngine = {
new ZooKeeperPersistenceEngine(conf, serializer)
}
def createLeaderElectionAgent(master: LeaderElectable): LeaderElectionAgent = {
new ZooKeeperLeaderElectionAgent(master, conf)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/Partition.scala
|
package org.apache.spark
/**
* An identifier for a partition in an RDD. RDD中分区的标识符
*/
trait Partition extends Serializable {
/**
* Get the partition's index within its parent RDD
*/
def index: Int
// A better default implementation of HashCode
override def hashCode(): Int = index
override def equals(other: Any): Boolean = super.equals(other)
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/RecoveryState.scala
|
package org.apache.spark.deploy.master
private[deploy] object RecoveryState extends Enumeration {
type MasterState = Value
val STANDBY, ALIVE, RECOVERING, COMPLETING_RECOVERY = Value
}
|
CrazyTechnology/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonUtils.scala
|
package org.apache.spark.sql.execution.datasources.json
import org.apache.spark.input.PortableDataStream
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.catalyst.json.JSONOptions
object JsonUtils {
/**
* Sample JSON dataset as configured by `samplingRatio`.
*/
def sample(json: Dataset[String], options: JSONOptions): Dataset[String] = {
require(options.samplingRatio > 0,
s"samplingRatio (${options.samplingRatio}) should be greater than 0")
if (options.samplingRatio > 0.99) {
json
} else {
json.sample(withReplacement = false, options.samplingRatio, 1)
}
}
/**
* Sample JSON RDD as configured by `samplingRatio`.
*/
def sample(json: RDD[PortableDataStream], options: JSONOptions): RDD[PortableDataStream] = {
require(options.samplingRatio > 0,
s"samplingRatio (${options.samplingRatio}) should be greater than 0")
if (options.samplingRatio > 0.99) {
json
} else {
json.sample(withReplacement = false, options.samplingRatio, 1)
}
}
}
|
CrazyTechnology/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
|
<filename>sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
package org.apache.spark.sql.hive
import java.io.{InputStream, OutputStream}
import java.rmi.server.UID
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import com.google.common.base.Objects
import org.apache.avro.Schema
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.{UDF, Utilities}
import org.apache.hadoop.hive.ql.plan.{FileSinkDesc, TableDesc}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils
import org.apache.hadoop.hive.serde2.avro.{AvroGenericRecordWritable, AvroSerdeUtils}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector
import org.apache.hadoop.io.Writable
import org.apache.hive.com.esotericsoftware.kryo.Kryo
import org.apache.hive.com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.types.Decimal
import org.apache.spark.util.Utils
private[hive] object HiveShim {
// Precision and scale to pass for unlimited decimals; these are the same as the precision and
// scale Hive 0.13 infers for BigDecimals from sources that don't specify them (e.g. UDFs)
val UNLIMITED_DECIMAL_PRECISION = 38
val UNLIMITED_DECIMAL_SCALE = 18
val HIVE_GENERIC_UDF_MACRO_CLS = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro"
/*
* This function in hive-0.13 become private, but we have to do this to work around hive bug
*/
private def appendReadColumnNames(conf: Configuration, cols: Seq[String]) {
val old: String = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")
val result: StringBuilder = new StringBuilder(old)
var first: Boolean = old.isEmpty
for (col <- cols) {
if (first) {
first = false
} else {
result.append(',')
}
result.append(col)
}
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, result.toString)
}
/*
* Cannot use ColumnProjectionUtils.appendReadColumns directly, if ids is null
*/
def appendReadColumns(conf: Configuration, ids: Seq[Integer], names: Seq[String]) {
if (ids != null) {
ColumnProjectionUtils.appendReadColumns(conf, ids.asJava)
}
if (names != null) {
appendReadColumnNames(conf, names)
}
}
/*
* Bug introduced in hive-0.13. AvroGenericRecordWritable has a member recordReaderID that
* is needed to initialize before serialization.
*/
def prepareWritable(w: Writable, serDeProps: Seq[(String, String)]): Writable = {
w match {
case w: AvroGenericRecordWritable =>
w.setRecordReaderID(new UID())
// In Hive 1.1, the record's schema may need to be initialized manually or a NPE will
// be thrown.
if (w.getFileSchema() == null) {
serDeProps
.find(_._1 == AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName())
.foreach { kv =>
w.setFileSchema(new Schema.Parser().parse(kv._2))
}
}
case _ =>
}
w
}
def toCatalystDecimal(hdoi: HiveDecimalObjectInspector, data: Any): Decimal = {
if (hdoi.preferWritable()) {
Decimal(hdoi.getPrimitiveWritableObject(data).getHiveDecimal().bigDecimalValue,
hdoi.precision(), hdoi.scale())
} else {
Decimal(hdoi.getPrimitiveJavaObject(data).bigDecimalValue(), hdoi.precision(), hdoi.scale())
}
}
/**
* This class provides the UDF creation and also the UDF instance serialization and
* de-serialization cross process boundary.
*
* Detail discussion can be found at https://github.com/apache/spark/pull/3640
*
* @param functionClassName UDF class name
* @param instance optional UDF instance which contains additional information (for macro)
*/
private[hive] case class HiveFunctionWrapper(var functionClassName: String,
private var instance: AnyRef = null) extends java.io.Externalizable {
// for Serialization
def this() = this(null)
override def hashCode(): Int = {
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
Objects.hashCode(functionClassName, instance.asInstanceOf[GenericUDFMacro].getBody())
} else {
functionClassName.hashCode()
}
}
override def equals(other: Any): Boolean = other match {
case a: HiveFunctionWrapper if functionClassName == a.functionClassName =>
// In case of udf macro, check to make sure they point to the same underlying UDF
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
a.instance.asInstanceOf[GenericUDFMacro].getBody() ==
instance.asInstanceOf[GenericUDFMacro].getBody()
} else {
true
}
case _ => false
}
@transient
def deserializeObjectByKryo[T: ClassTag](
kryo: Kryo,
in: InputStream,
clazz: Class[_]): T = {
val inp = new Input(in)
val t: T = kryo.readObject(inp, clazz).asInstanceOf[T]
inp.close()
t
}
@transient
def serializeObjectByKryo(
kryo: Kryo,
plan: Object,
out: OutputStream) {
val output: Output = new Output(out)
kryo.writeObject(output, plan)
output.close()
}
def deserializePlan[UDFType](is: java.io.InputStream, clazz: Class[_]): UDFType = {
deserializeObjectByKryo(Utilities.runtimeSerializationKryo.get(), is, clazz)
.asInstanceOf[UDFType]
}
def serializePlan(function: AnyRef, out: java.io.OutputStream): Unit = {
serializeObjectByKryo(Utilities.runtimeSerializationKryo.get(), function, out)
}
def writeExternal(out: java.io.ObjectOutput) {
// output the function name
out.writeUTF(functionClassName)
// Write a flag if instance is null or not
out.writeBoolean(instance != null)
if (instance != null) {
// Some of the UDF are serializable, but some others are not
// Hive Utilities can handle both cases
val baos = new java.io.ByteArrayOutputStream()
serializePlan(instance, baos)
val functionInBytes = baos.toByteArray
// output the function bytes
out.writeInt(functionInBytes.length)
out.write(functionInBytes, 0, functionInBytes.length)
}
}
def readExternal(in: java.io.ObjectInput) {
// read the function name
functionClassName = in.readUTF()
if (in.readBoolean()) {
// if the instance is not null
// read the function in bytes
val functionInBytesLength = in.readInt()
val functionInBytes = new Array[Byte](functionInBytesLength)
in.readFully(functionInBytes)
// deserialize the function object via Hive Utilities
instance = deserializePlan[AnyRef](new java.io.ByteArrayInputStream(functionInBytes),
Utils.getContextOrSparkClassLoader.loadClass(functionClassName))
}
}
def createFunction[UDFType <: AnyRef](): UDFType = {
if (instance != null) {
instance.asInstanceOf[UDFType]
} else {
val func = Utils.getContextOrSparkClassLoader
.loadClass(functionClassName).getConstructor().newInstance().asInstanceOf[UDFType]
if (!func.isInstanceOf[UDF]) {
// We cache the function if it's no the Simple UDF,
// as we always have to create new instance for Simple UDF
instance = func
}
func
}
}
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
implicit def wrapperToFileSinkDesc(w: ShimFileSinkDesc): FileSinkDesc = {
val f = new FileSinkDesc(new Path(w.dir), w.tableInfo, w.compressed)
f.setCompressCodec(w.compressCodec)
f.setCompressType(w.compressType)
f.setTableInfo(w.tableInfo)
f.setDestTableId(w.destTableId)
f
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
private[hive] class ShimFileSinkDesc(
var dir: String,
var tableInfo: TableDesc,
var compressed: Boolean)
extends Serializable with Logging {
var compressCodec: String = _
var compressType: String = _
var destTableId: Int = _
def setCompressed(compressed: Boolean) {
this.compressed = compressed
}
def getDirName(): String = dir
def setDestTableId(destTableId: Int) {
this.destTableId = destTableId
}
def setTableInfo(tableInfo: TableDesc) {
this.tableInfo = tableInfo
}
def setCompressCodec(intermediateCompressorCodec: String) {
compressCodec = intermediateCompressorCodec
}
def setCompressType(intermediateCompressType: String) {
compressType = intermediateCompressType
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/SparkException.scala
|
<gh_stars>0
package org.apache.spark
class SparkException(message: String, cause: Throwable)
extends Exception(message, cause) {
def this(message: String) = this(message, null)
}
/**
* Exception thrown when execution of some user code in the driver process fails, e.g.
* accumulator update fails or failure in takeOrdered (user supplies an Ordering implementation
* that can be misbehaving.
*/
private[spark] class SparkDriverExecutionException(cause: Throwable)
extends SparkException("Execution error", cause)
/**
* Exception thrown when the main user code is run as a child process (e.g. pyspark) and we want
* the parent SparkSubmit process to exit with the same exit code.
*/
private[spark] case class SparkUserAppException(exitCode: Int)
extends SparkException(s"User application exited with $exitCode")
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/rpc/RpcEndpointAddress.scala
|
<filename>core/src/main/scala/org/apache/spark/rpc/RpcEndpointAddress.scala
package org.apache.spark.rpc
import org.apache.spark.SparkException
/**
* An address identifier for an RPC endpoint.
*
* The `rpcAddress` may be null, in which case the endpoint is registered via a client-only
* connection and can only be reached via the client that sent the endpoint reference.
*
* @param rpcAddress The socket address of the endpoint. It's `null` when this address pointing to
* an endpoint in a client `NettyRpcEnv`.
* @param name Name of the endpoint.
*/
private[spark] case class RpcEndpointAddress(rpcAddress: RpcAddress, name: String) {
require(name != null, "RpcEndpoint name must be provided.")
def this(host: String, port: Int, name: String) = {
this(RpcAddress(host, port), name)
}
override val toString = if (rpcAddress != null) {
s"spark://$name@${rpcAddress.host}:${rpcAddress.port}"
} else {
s"spark-client://$name"
}
}
private[spark] object RpcEndpointAddress {
def apply(host: String, port: Int, name: String): RpcEndpointAddress = {
new RpcEndpointAddress(host, port, name)
}
def apply(sparkUrl: String): RpcEndpointAddress = {
try {
val uri = new java.net.URI(sparkUrl)
val host = uri.getHost
val port = uri.getPort
val name = uri.getUserInfo
if (uri.getScheme != "spark" ||
host == null ||
port < 0 ||
name == null ||
(uri.getPath != null && !uri.getPath.isEmpty) || // uri.getPath returns "" instead of null
uri.getFragment != null ||
uri.getQuery != null) {
throw new SparkException("Invalid Spark URL: " + sparkUrl)
}
new RpcEndpointAddress(host, port, name)
} catch {
case e: java.net.URISyntaxException =>
throw new SparkException("Invalid Spark URL: " + sparkUrl, e)
}
}
}
|
CrazyTechnology/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/ReadOnlySQLConf.scala
|
package org.apache.spark.sql.internal
import java.util.{Map => JMap}
import org.apache.spark.TaskContext
import org.apache.spark.internal.config.{ConfigEntry, ConfigProvider, ConfigReader}
/**
* A readonly SQLConf that will be created by tasks running at the executor side. It reads the
* configs from the local properties which are propagated from driver to executors.
*/
class ReadOnlySQLConf(context: TaskContext) extends SQLConf {
@transient override val settings: JMap[String, String] = {
context.getLocalProperties.asInstanceOf[JMap[String, String]]
}
@transient override protected val reader: ConfigReader = {
new ConfigReader(new TaskContextConfigProvider(context))
}
override protected def setConfWithCheck(key: String, value: String): Unit = {
throw new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
override def unsetConf(key: String): Unit = {
throw new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
override def unsetConf(entry: ConfigEntry[_]): Unit = {
throw new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
override def clear(): Unit = {
throw new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
override def clone(): SQLConf = {
throw new UnsupportedOperationException("Cannot clone/copy ReadOnlySQLConf.")
}
override def copy(entries: (ConfigEntry[_], Any)*): SQLConf = {
throw new UnsupportedOperationException("Cannot clone/copy ReadOnlySQLConf.")
}
}
class TaskContextConfigProvider(context: TaskContext) extends ConfigProvider {
override def get(key: String): Option[String] = Option(context.getLocalProperty(key))
}
|
CrazyTechnology/spark
|
core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala
|
package org.apache.spark.scheduler
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.AccumulatorV2
class ExternalClusterManagerSuite extends SparkFunSuite with LocalSparkContext {
test("launch of backend and scheduler") {
val conf = new SparkConf().setMaster("myclusterManager").setAppName("testcm")
sc = new SparkContext(conf)
// check if the scheduler components are created and initialized
sc.schedulerBackend match {
case dummy: DummySchedulerBackend => assert(dummy.initialized)
case other => fail(s"wrong scheduler backend: ${other}")
}
sc.taskScheduler match {
case dummy: DummyTaskScheduler => assert(dummy.initialized)
case other => fail(s"wrong task scheduler: ${other}")
}
}
}
/**
* Super basic ExternalClusterManager, just to verify ExternalClusterManagers can be configured.
*
* Note that if you want a special ClusterManager for tests, you are probably much more interested
* in [[MockExternalClusterManager]] and the corresponding [[SchedulerIntegrationSuite]]
*/
private class DummyExternalClusterManager extends ExternalClusterManager {
def canCreate(masterURL: String): Boolean = masterURL == "myclusterManager"
def createTaskScheduler(sc: SparkContext,
masterURL: String): TaskScheduler = new DummyTaskScheduler
def createSchedulerBackend(sc: SparkContext,
masterURL: String,
scheduler: TaskScheduler): SchedulerBackend = new DummySchedulerBackend()
def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {
scheduler.asInstanceOf[DummyTaskScheduler].initialized = true
backend.asInstanceOf[DummySchedulerBackend].initialized = true
}
}
private class DummySchedulerBackend extends SchedulerBackend {
var initialized = false
def start() {}
def stop() {}
def reviveOffers() {}
def defaultParallelism(): Int = 1
def maxNumConcurrentTasks(): Int = 0
}
private class DummyTaskScheduler extends TaskScheduler {
var initialized = false
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorMetrics: ExecutorMetrics): Boolean = true
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/TaskState.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark
private[spark] object TaskState extends Enumeration {
val LAUNCHING, RUNNING, FINISHED, FAILED, KILLED, LOST = Value
private val FINISHED_STATES = Set(FINISHED, FAILED, KILLED, LOST)
type TaskState = Value
def isFailed(state: TaskState): Boolean = (LOST == state) || (FAILED == state)
def isFinished(state: TaskState): Boolean = FINISHED_STATES.contains(state)
}
|
CrazyTechnology/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Command.scala
|
<gh_stars>0
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.expressions.Attribute
/**
* A logical node that represents a non-query command to be executed by the system. For example,
* commands can be used by parsers to represent DDL operations. Commands, unlike queries, are
* eagerly executed.
*/
trait Command extends LogicalPlan {
override def output: Seq[Attribute] = Seq.empty
override def children: Seq[LogicalPlan] = Seq.empty
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/DriverState.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.deploy.master
private[deploy] object DriverState extends Enumeration {
type DriverState = Value
// SUBMITTED: Submitted but not yet scheduled on a worker
// RUNNING: Has been allocated to a worker to run
// FINISHED: Previously ran and exited cleanly
// RELAUNCHING: Exited non-zero or due to worker failure, but has not yet started running again
// UNKNOWN: The state of the driver is temporarily not known due to master failure recovery
// KILLED: A user manually killed this driver
// FAILED: The driver exited non-zero and was not supervised
// ERROR: Unable to run or restart due to an unrecoverable error (e.g. missing jar file)
val SUBMITTED, RUNNING, FINISHED, RELAUNCHING, UNKNOWN, KILLED, FAILED, ERROR = Value
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala
|
package org.apache.spark.deploy.history
import java.io.{File, FileNotFoundException, IOException}
import java.nio.file.Files
import java.util.{Date, ServiceLoader}
import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Future, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.ExecutionException
import scala.io.Source
import scala.util.Try
import scala.xml.Node
import com.fasterxml.jackson.annotation.JsonIgnore
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.MoreExecutors
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.hdfs.{DFSInputStream, DistributedFileSystem}
import org.apache.hadoop.hdfs.protocol.HdfsConstants
import org.apache.hadoop.security.AccessControlException
import org.fusesource.leveldbjni.internal.NativeDB
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.DRIVER_LOG_DFS_DIR
import org.apache.spark.internal.config.History._
import org.apache.spark.internal.config.Status._
import org.apache.spark.io.CompressionCodec
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.ReplayListenerBus._
import org.apache.spark.status._
import org.apache.spark.status.KVUtils._
import org.apache.spark.status.api.v1.{ApplicationAttemptInfo, ApplicationInfo}
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
import org.apache.spark.util.kvstore._
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*
* == How new and updated attempts are detected ==
*
* - New attempts are detected in [[checkForLogs]]: the log dir is scanned, and any entries in the
* log dir whose size changed since the last scan time are considered new or updated. These are
* replayed to create a new attempt info entry and update or create a matching application info
* element in the list of applications.
* - Updated attempts are also found in [[checkForLogs]] -- if the attempt's log file has grown, the
* attempt is replaced by another one with a larger log size.
*
* The use of log size, rather than simply relying on modification times, is needed to
* address the following issues
* - some filesystems do not appear to update the `modtime` value whenever data is flushed to
* an open file output stream. Changes to the history may not be picked up.
* - the granularity of the `modtime` field may be 2+ seconds. Rapid changes to the FS can be
* missed.
*
* Tracking filesize works given the following invariant: the logs get bigger
* as new events are added. If a format was used in which this did not hold, the mechanism would
* break. Simple streaming of JSON-formatted events, as is implemented today, implicitly
* maintains this invariant.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import FsHistoryProvider._
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.get(CLEANER_INTERVAL_S)
// Number of threads used to replay event logs.
private val NUM_PROCESSING_THREADS = conf.getInt(SPARK_HISTORY_FS_NUM_REPLAY_THREADS,
Math.ceil(Runtime.getRuntime.availableProcessors() / 4f).toInt)
private val logDir = conf.get(EVENT_LOG_DIR)
private val HISTORY_UI_ACLS_ENABLE = conf.getBoolean("spark.history.ui.acls.enable", false)
private val HISTORY_UI_ADMIN_ACLS = conf.get("spark.history.ui.admin.acls", "")
private val HISTORY_UI_ADMIN_ACLS_GROUPS = conf.get("spark.history.ui.admin.acls.groups", "")
logInfo(s"History server ui acls " + (if (HISTORY_UI_ACLS_ENABLE) "enabled" else "disabled") +
"; users with admin permissions: " + HISTORY_UI_ADMIN_ACLS.toString +
"; groups with admin permissions" + HISTORY_UI_ADMIN_ACLS_GROUPS.toString)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
// Visible for testing
private[history] val fs: FileSystem = new Path(logDir).getFileSystem(hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-history-task-%d")
// The modification time of the newest log detected during the last scan. Currently only
// used for logging msgs (logs are re-scanned based on file size, rather than modtime)
private val lastScanTime = new java.util.concurrent.atomic.AtomicLong(-1)
private val pendingReplayTasksCount = new java.util.concurrent.atomic.AtomicInteger(0)
private val storePath = conf.get(LOCAL_STORE_DIR).map(new File(_))
private val fastInProgressParsing = conf.get(FAST_IN_PROGRESS_PARSING)
// Visible for testing.
private[history] val listing: KVStore = storePath.map { path =>
val dbPath = Files.createDirectories(new File(path, "listing.ldb").toPath()).toFile()
Utils.chmod700(dbPath)
val metadata = new FsHistoryProviderMetadata(CURRENT_LISTING_VERSION,
AppStatusStore.CURRENT_VERSION, logDir.toString())
try {
open(dbPath, metadata)
} catch {
// If there's an error, remove the listing database and any existing UI database
// from the store directory, since it's extremely likely that they'll all contain
// incompatible information.
case _: UnsupportedStoreVersionException | _: MetadataMismatchException =>
logInfo("Detected incompatible DB versions, deleting...")
path.listFiles().foreach(Utils.deleteRecursively)
open(dbPath, metadata)
case dbExc: NativeDB.DBException =>
// Get rid of the corrupted listing.ldb and re-create it.
logWarning(s"Failed to load disk store $dbPath :", dbExc)
Utils.deleteRecursively(dbPath)
open(dbPath, metadata)
}
}.getOrElse(new InMemoryStore())
private val diskManager = storePath.map { path =>
new HistoryServerDiskManager(conf, path, listing, clock)
}
private val blacklist = new ConcurrentHashMap[String, Long]
// Visible for testing
private[history] def isBlacklisted(path: Path): Boolean = {
blacklist.containsKey(path.getName)
}
private def blacklist(path: Path): Unit = {
blacklist.put(path.getName, clock.getTimeMillis())
}
/**
* Removes expired entries in the blacklist, according to the provided `expireTimeInSeconds`.
*/
private def clearBlacklist(expireTimeInSeconds: Long): Unit = {
val expiredThreshold = clock.getTimeMillis() - expireTimeInSeconds * 1000
blacklist.asScala.retain((_, creationTime) => creationTime >= expiredThreshold)
}
private val activeUIs = new mutable.HashMap[(String, Option[String]), LoadedAppUI]()
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* Fixed size thread pool to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!Utils.isTesting) {
ThreadUtils.newDaemonFixedThreadPool(NUM_PROCESSING_THREADS, "log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
diskManager.foreach(_.initialize())
// Validate the log directory.
val path = new Path(logDir)
try {
if (!fs.getFileStatus(path).isDirectory) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
} catch {
case f: FileNotFoundException =>
var msg = s"Log directory specified does not exist: $logDir"
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new FileNotFoundException(msg).initCause(f)
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
logDebug(s"Scheduling update thread every $UPDATE_INTERVAL_S seconds")
pool.scheduleWithFixedDelay(
getRunner(() => checkForLogs()), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.get(CLEANER_ENABLED)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(
getRunner(() => cleanLogs()), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
if (conf.contains(DRIVER_LOG_DFS_DIR) && conf.get(DRIVER_LOG_CLEANER_ENABLED)) {
pool.scheduleWithFixedDelay(getRunner(() => cleanDriverLogs()),
0,
conf.get(DRIVER_LOG_CLEANER_INTERVAL),
TimeUnit.SECONDS)
}
} else {
logDebug("Background update thread disabled for testing")
}
}
override def getListing(): Iterator[ApplicationInfo] = {
// Return the listing in end time descending order.
listing.view(classOf[ApplicationInfoWrapper])
.index("endTime")
.reverse()
.iterator()
.asScala
.map(_.toApplicationInfo())
}
override def getApplicationInfo(appId: String): Option[ApplicationInfo] = {
try {
Some(load(appId).toApplicationInfo())
} catch {
case _: NoSuchElementException =>
None
}
}
override def getEventLogsUnderProcess(): Int = pendingReplayTasksCount.get()
override def getLastUpdatedTime(): Long = lastScanTime.get()
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
val app = try {
load(appId)
} catch {
case _: NoSuchElementException =>
return None
}
val attempt = app.attempts.find(_.info.attemptId == attemptId).orNull
if (attempt == null) {
return None
}
val conf = this.conf.clone()
val secManager = new SecurityManager(conf)
secManager.setAcls(HISTORY_UI_ACLS_ENABLE)
// make sure to set admin acls before view acls so they are properly picked up
secManager.setAdminAcls(HISTORY_UI_ADMIN_ACLS + "," + attempt.adminAcls.getOrElse(""))
secManager.setViewAcls(attempt.info.sparkUser, attempt.viewAcls.getOrElse(""))
secManager.setAdminAclsGroups(HISTORY_UI_ADMIN_ACLS_GROUPS + "," +
attempt.adminAclsGroups.getOrElse(""))
secManager.setViewAclsGroups(attempt.viewAclsGroups.getOrElse(""))
val kvstore = try {
diskManager match {
case Some(sm) =>
loadDiskStore(sm, appId, attempt)
case _ =>
createInMemoryStore(attempt)
}
} catch {
case _: FileNotFoundException =>
return None
}
val ui = SparkUI.create(None, new AppStatusStore(kvstore), conf, secManager, app.info.name,
HistoryServer.getAttemptURI(appId, attempt.info.attemptId),
attempt.info.startTime.getTime(),
attempt.info.appSparkVersion)
loadPlugins().foreach(_.setupUI(ui))
val loadedUI = LoadedAppUI(ui)
synchronized {
activeUIs((appId, attemptId)) = loadedUI
}
Some(loadedUI)
}
override def getEmptyListingHtml(): Seq[Node] = {
<p>
Did you specify the correct logging directory? Please verify your setting of
<span style="font-style:italic">spark.history.fs.logDirectory</span>
listed above and whether you have the permissions to access it.
<br/>
It is also possible that your application did not run to
completion or did not stop the SparkContext.
</p>
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
try {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
Seq(pool, replayExecutor).foreach { executor =>
executor.shutdown()
if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
executor.shutdownNow()
}
}
} finally {
activeUIs.foreach { case (_, loadedUI) => loadedUI.ui.store.close() }
activeUIs.clear()
listing.close()
}
}
override def onUIDetached(appId: String, attemptId: Option[String], ui: SparkUI): Unit = {
val uiOption = synchronized {
activeUIs.remove((appId, attemptId))
}
uiOption.foreach { loadedUI =>
loadedUI.lock.writeLock().lock()
try {
loadedUI.ui.store.close()
} finally {
loadedUI.lock.writeLock().unlock()
}
diskManager.foreach { dm =>
// If the UI is not valid, delete its files from disk, if any. This relies on the fact that
// ApplicationCache will never call this method concurrently with getAppUI() for the same
// appId / attemptId.
dm.release(appId, attemptId, delete = !loadedUI.valid)
}
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = clock.getTimeMillis()
logDebug(s"Scanning $logDir with lastScanTime==$lastScanTime")
val updated = Option(fs.listStatus(new Path(logDir))).map(_.toSeq).getOrElse(Nil)
.filter { entry =>
!entry.isDirectory() &&
// FsHistoryProvider used to generate a hidden file which can't be read. Accidentally
// reading a garbage file is safe, but we would log an error which can be scary to
// the end-user.
!entry.getPath().getName().startsWith(".") &&
!isBlacklisted(entry.getPath)
}
.filter { entry =>
try {
val info = listing.read(classOf[LogInfo], entry.getPath().toString())
if (info.appId.isDefined) {
// If the SHS view has a valid application, update the time the file was last seen so
// that the entry is not deleted from the SHS listing. Also update the file size, in
// case the code below decides we don't need to parse the log.
listing.write(info.copy(lastProcessed = newLastScanTime, fileSize = entry.getLen()))
}
if (shouldReloadLog(info, entry)) {
if (info.appId.isDefined && fastInProgressParsing) {
// When fast in-progress parsing is on, we don't need to re-parse when the
// size changes, but we do need to invalidate any existing UIs.
// Also, we need to update the `lastUpdated time` to display the updated time in
// the HistoryUI and to avoid cleaning the inprogress app while running.
val appInfo = listing.read(classOf[ApplicationInfoWrapper], info.appId.get)
val attemptList = appInfo.attempts.map { attempt =>
if (attempt.info.attemptId == info.attemptId) {
new AttemptInfoWrapper(
attempt.info.copy(lastUpdated = new Date(newLastScanTime)),
attempt.logPath,
attempt.fileSize,
attempt.adminAcls,
attempt.viewAcls,
attempt.adminAclsGroups,
attempt.viewAclsGroups)
} else {
attempt
}
}
val updatedAppInfo = new ApplicationInfoWrapper(appInfo.info, attemptList)
listing.write(updatedAppInfo)
invalidateUI(info.appId.get, info.attemptId)
false
} else {
true
}
} else {
false
}
} catch {
case _: NoSuchElementException =>
// If the file is currently not being tracked by the SHS, add an entry for it and try
// to parse it. This will allow the cleaner code to detect the file as stale later on
// if it was not possible to parse it.
listing.write(LogInfo(entry.getPath().toString(), newLastScanTime, LogType.EventLogs,
None, None, entry.getLen()))
entry.getLen() > 0
}
}
.sortWith { case (entry1, entry2) =>
entry1.getModificationTime() > entry2.getModificationTime()
}
if (updated.nonEmpty) {
logDebug(s"New/updated attempts found: ${updated.size} ${updated.map(_.getPath)}")
}
val tasks = updated.flatMap { entry =>
try {
val task: Future[Unit] = replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(entry, newLastScanTime, true)
}, Unit)
Some(task -> entry.getPath)
} catch {
// let the iteration over the updated entries break, since an exception on
// replayExecutor.submit (..) indicates the ExecutorService is unable
// to take any more submissions at this time
case e: Exception =>
logError(s"Exception while submitting event log for replay", e)
None
}
}
pendingReplayTasksCount.addAndGet(tasks.size)
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
tasks.foreach { case (task, path) =>
try {
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: ExecutionException if e.getCause.isInstanceOf[AccessControlException] =>
// We don't have read permissions on the log file
logWarning(s"Unable to read log $path", e.getCause)
blacklist(path)
case e: Exception =>
logError("Exception while merging application listings", e)
} finally {
pendingReplayTasksCount.decrementAndGet()
}
}
// Delete all information about applications whose log files disappeared from storage.
// This is done by identifying the event logs which were not touched by the current
// directory scan.
//
// Only entries with valid applications are cleaned up here. Cleaning up invalid log
// files is done by the periodic cleaner task.
val stale = listing.view(classOf[LogInfo])
.index("lastProcessed")
.last(newLastScanTime - 1)
.asScala
.toList
stale.foreach { log =>
log.appId.foreach { appId =>
cleanAppData(appId, log.attemptId, log.logPath)
listing.delete(classOf[LogInfo], log.logPath)
}
}
lastScanTime.set(newLastScanTime)
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private[history] def shouldReloadLog(info: LogInfo, entry: FileStatus): Boolean = {
var result = info.fileSize < entry.getLen
if (!result && info.logPath.endsWith(EventLoggingListener.IN_PROGRESS)) {
try {
result = Utils.tryWithResource(fs.open(entry.getPath)) { in =>
in.getWrappedStream match {
case dfsIn: DFSInputStream => info.fileSize < dfsIn.getFileLength
case _ => false
}
}
} catch {
case e: Exception =>
logDebug(s"Failed to check the length for the file : ${info.logPath}", e)
}
}
result
}
private def cleanAppData(appId: String, attemptId: Option[String], logPath: String): Unit = {
try {
val app = load(appId)
val (attempt, others) = app.attempts.partition(_.info.attemptId == attemptId)
assert(attempt.isEmpty || attempt.size == 1)
val isStale = attempt.headOption.exists { a =>
if (a.logPath != new Path(logPath).getName()) {
// If the log file name does not match, then probably the old log file was from an
// in progress application. Just return that the app should be left alone.
false
} else {
val maybeUI = synchronized {
activeUIs.remove(appId -> attemptId)
}
maybeUI.foreach { ui =>
ui.invalidate()
ui.ui.store.close()
}
diskManager.foreach(_.release(appId, attemptId, delete = true))
true
}
}
if (isStale) {
if (others.nonEmpty) {
val newAppInfo = new ApplicationInfoWrapper(app.info, others)
listing.write(newAppInfo)
} else {
listing.delete(classOf[ApplicationInfoWrapper], appId)
}
}
} catch {
case _: NoSuchElementException =>
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = file.getFileSystem(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
val app = try {
load(appId)
} catch {
case _: NoSuchElementException =>
throw new SparkException(s"Logs for $appId not found.")
}
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
attemptId
.map { id => app.attempts.filter(_.info.attemptId == Some(id)) }
.getOrElse(app.attempts)
.map(_.logPath)
.foreach { log =>
zipFileToStream(new Path(logDir, log), log, zipStream)
}
} finally {
zipStream.close()
}
}
/**
* Replay the given log file, saving the application in the listing db.
*/
protected def mergeApplicationListing(
fileStatus: FileStatus,
scanTime: Long,
enableOptimizations: Boolean): Unit = {
val eventsFilter: ReplayEventsFilter = { eventString =>
eventString.startsWith(APPL_START_EVENT_PREFIX) ||
eventString.startsWith(APPL_END_EVENT_PREFIX) ||
eventString.startsWith(LOG_START_EVENT_PREFIX) ||
eventString.startsWith(ENV_UPDATE_EVENT_PREFIX)
}
val logPath = fileStatus.getPath()
val appCompleted = isCompleted(logPath.getName())
val reparseChunkSize = conf.get(END_EVENT_REPARSE_CHUNK_SIZE)
// Enable halt support in listener if:
// - app in progress && fast parsing enabled
// - skipping to end event is enabled (regardless of in-progress state)
val shouldHalt = enableOptimizations &&
((!appCompleted && fastInProgressParsing) || reparseChunkSize > 0)
val bus = new ReplayListenerBus()
val listener = new AppListingListener(fileStatus, clock, shouldHalt)
bus.addListener(listener)
logInfo(s"Parsing $logPath for listing data...")
Utils.tryWithResource(EventLoggingListener.openEventLog(logPath, fs)) { in =>
bus.replay(in, logPath.toString, !appCompleted, eventsFilter)
}
// If enabled above, the listing listener will halt parsing when there's enough information to
// create a listing entry. When the app is completed, or fast parsing is disabled, we still need
// to replay until the end of the log file to try to find the app end event. Instead of reading
// and parsing line by line, this code skips bytes from the underlying stream so that it is
// positioned somewhere close to the end of the log file.
//
// Because the application end event is written while some Spark subsystems such as the
// scheduler are still active, there is no guarantee that the end event will be the last
// in the log. So, to be safe, the code uses a configurable chunk to be re-parsed at
// the end of the file, and retries parsing the whole log later if the needed data is
// still not found.
//
// Note that skipping bytes in compressed files is still not cheap, but there are still some
// minor gains over the normal log parsing done by the replay bus.
//
// This code re-opens the file so that it knows where it's skipping to. This isn't as cheap as
// just skipping from the current position, but there isn't a a good way to detect what the
// current position is, since the replay listener bus buffers data internally.
val lookForEndEvent = shouldHalt && (appCompleted || !fastInProgressParsing)
if (lookForEndEvent && listener.applicationInfo.isDefined) {
Utils.tryWithResource(EventLoggingListener.openEventLog(logPath, fs)) { in =>
val target = fileStatus.getLen() - reparseChunkSize
if (target > 0) {
logInfo(s"Looking for end event; skipping $target bytes from $logPath...")
var skipped = 0L
while (skipped < target) {
skipped += in.skip(target - skipped)
}
}
val source = Source.fromInputStream(in).getLines()
// Because skipping may leave the stream in the middle of a line, read the next line
// before replaying.
if (target > 0) {
source.next()
}
bus.replay(source, logPath.toString, !appCompleted, eventsFilter)
}
}
logInfo(s"Finished parsing $logPath")
listener.applicationInfo match {
case Some(app) if !lookForEndEvent || app.attempts.head.info.completed =>
// In this case, we either didn't care about the end event, or we found it. So the
// listing data is good.
invalidateUI(app.info.id, app.attempts.head.info.attemptId)
addListing(app)
listing.write(LogInfo(logPath.toString(), scanTime, LogType.EventLogs, Some(app.info.id),
app.attempts.head.info.attemptId, fileStatus.getLen()))
// For a finished log, remove the corresponding "in progress" entry from the listing DB if
// the file is really gone.
if (appCompleted) {
val inProgressLog = logPath.toString() + EventLoggingListener.IN_PROGRESS
try {
// Fetch the entry first to avoid an RPC when it's already removed.
listing.read(classOf[LogInfo], inProgressLog)
if (!fs.isFile(new Path(inProgressLog))) {
listing.delete(classOf[LogInfo], inProgressLog)
}
} catch {
case _: NoSuchElementException =>
}
}
case Some(_) =>
// In this case, the attempt is still not marked as finished but was expected to. This can
// mean the end event is before the configured threshold, so call the method again to
// re-parse the whole log.
logInfo(s"Reparsing $logPath since end event was not found.")
mergeApplicationListing(fileStatus, scanTime, false)
case _ =>
// If the app hasn't written down its app ID to the logs, still record the entry in the
// listing db, with an empty ID. This will make the log eligible for deletion if the app
// does not make progress after the configured max log age.
listing.write(
LogInfo(logPath.toString(), scanTime, LogType.EventLogs, None, None, fileStatus.getLen()))
}
}
/**
* Invalidate an existing UI for a given app attempt. See LoadedAppUI for a discussion on the
* UI lifecycle.
*/
private def invalidateUI(appId: String, attemptId: Option[String]): Unit = {
synchronized {
activeUIs.get((appId, attemptId)).foreach { ui =>
ui.invalidate()
ui.ui.store.close()
}
}
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = Utils.tryLog {
val maxTime = clock.getTimeMillis() - conf.get(MAX_LOG_AGE_S) * 1000
val expired = listing.view(classOf[ApplicationInfoWrapper])
.index("oldestAttempt")
.reverse()
.first(maxTime)
.asScala
.toList
expired.foreach { app =>
// Applications may have multiple attempts, some of which may not need to be deleted yet.
val (remaining, toDelete) = app.attempts.partition { attempt =>
attempt.info.lastUpdated.getTime() >= maxTime
}
if (remaining.nonEmpty) {
val newApp = new ApplicationInfoWrapper(app.info, remaining)
listing.write(newApp)
}
toDelete.foreach { attempt =>
logInfo(s"Deleting expired event log for ${attempt.logPath}")
val logPath = new Path(logDir, attempt.logPath)
listing.delete(classOf[LogInfo], logPath.toString())
cleanAppData(app.id, attempt.info.attemptId, logPath.toString())
deleteLog(fs, logPath)
}
if (remaining.isEmpty) {
listing.delete(app.getClass(), app.id)
}
}
// Delete log files that don't have a valid application and exceed the configured max age.
val stale = listing.view(classOf[LogInfo])
.index("lastProcessed")
.reverse()
.first(maxTime)
.asScala
.filter { l => l.logType == null || l.logType == LogType.EventLogs }
.toList
stale.foreach { log =>
if (log.appId.isEmpty) {
logInfo(s"Deleting invalid / corrupt event log ${log.logPath}")
deleteLog(fs, new Path(log.logPath))
listing.delete(classOf[LogInfo], log.logPath)
}
}
// Clean the blacklist from the expired entries.
clearBlacklist(CLEAN_INTERVAL_S)
}
/**
* Delete driver logs from the configured spark dfs dir that exceed the configured max age
*/
private[history] def cleanDriverLogs(): Unit = Utils.tryLog {
val driverLogDir = conf.get(DRIVER_LOG_DFS_DIR).get
val driverLogFs = new Path(driverLogDir).getFileSystem(hadoopConf)
val currentTime = clock.getTimeMillis()
val maxTime = currentTime - conf.get(MAX_DRIVER_LOG_AGE_S) * 1000
val logFiles = driverLogFs.listLocatedStatus(new Path(driverLogDir))
while (logFiles.hasNext()) {
val f = logFiles.next()
// Do not rely on 'modtime' as it is not updated for all filesystems when files are written to
val deleteFile =
try {
val info = listing.read(classOf[LogInfo], f.getPath().toString())
// Update the lastprocessedtime of file if it's length or modification time has changed
if (info.fileSize < f.getLen() || info.lastProcessed < f.getModificationTime()) {
listing.write(
info.copy(lastProcessed = currentTime, fileSize = f.getLen()))
false
} else if (info.lastProcessed > maxTime) {
false
} else {
true
}
} catch {
case e: NoSuchElementException =>
// For every new driver log file discovered, create a new entry in listing
listing.write(LogInfo(f.getPath().toString(), currentTime, LogType.DriverLogs, None,
None, f.getLen()))
false
}
if (deleteFile) {
logInfo(s"Deleting expired driver log for: ${f.getPath().getName()}")
listing.delete(classOf[LogInfo], f.getPath().toString())
deleteLog(driverLogFs, f.getPath())
}
}
// Delete driver log file entries that exceed the configured max age and
// may have been deleted on filesystem externally.
val stale = listing.view(classOf[LogInfo])
.index("lastProcessed")
.reverse()
.first(maxTime)
.asScala
.filter { l => l.logType != null && l.logType == LogType.DriverLogs }
.toList
stale.foreach { log =>
logInfo(s"Deleting invalid driver log ${log.logPath}")
listing.delete(classOf[LogInfo], log.logPath)
deleteLog(driverLogFs, new Path(log.logPath))
}
}
/**
* Rebuilds the application state store from its event log.
*/
private def rebuildAppStore(
store: KVStore,
eventLog: FileStatus,
lastUpdated: Long): Unit = {
// Disable async updates, since they cause higher memory usage, and it's ok to take longer
// to parse the event logs in the SHS.
val replayConf = conf.clone().set(ASYNC_TRACKING_ENABLED, false)
val trackingStore = new ElementTrackingStore(store, replayConf)
val replayBus = new ReplayListenerBus()
val listener = new AppStatusListener(trackingStore, replayConf, false,
lastUpdateTime = Some(lastUpdated))
replayBus.addListener(listener)
for {
plugin <- loadPlugins()
listener <- plugin.createListeners(conf, trackingStore)
} replayBus.addListener(listener)
try {
val path = eventLog.getPath()
logInfo(s"Parsing $path to re-build UI...")
Utils.tryWithResource(EventLoggingListener.openEventLog(path, fs)) { in =>
replayBus.replay(in, path.toString(), maybeTruncated = !isCompleted(path.toString()))
}
trackingStore.close(false)
logInfo(s"Finished parsing $path")
} catch {
case e: Exception =>
Utils.tryLogNonFatalError {
trackingStore.close()
}
throw e
}
}
/**
* Checks whether HDFS is in safe mode.
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
/* true to check only for Active NNs status */
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)
}
/**
* String description for diagnostics
* @return a summary of the component state
*/
override def toString: String = {
val count = listing.count(classOf[ApplicationInfoWrapper])
s"""|FsHistoryProvider{logdir=$logDir,
| storedir=$storePath,
| last scan time=$lastScanTime
| application count=$count}""".stripMargin
}
private def load(appId: String): ApplicationInfoWrapper = {
listing.read(classOf[ApplicationInfoWrapper], appId)
}
/**
* Write the app's information to the given store. Serialized to avoid the (notedly rare) case
* where two threads are processing separate attempts of the same application.
*/
private def addListing(app: ApplicationInfoWrapper): Unit = listing.synchronized {
val attempt = app.attempts.head
val oldApp = try {
load(app.id)
} catch {
case _: NoSuchElementException =>
app
}
def compareAttemptInfo(a1: AttemptInfoWrapper, a2: AttemptInfoWrapper): Boolean = {
a1.info.startTime.getTime() > a2.info.startTime.getTime()
}
val attempts = oldApp.attempts.filter(_.info.attemptId != attempt.info.attemptId) ++
List(attempt)
val newAppInfo = new ApplicationInfoWrapper(
app.info,
attempts.sortWith(compareAttemptInfo))
listing.write(newAppInfo)
}
private def loadDiskStore(
dm: HistoryServerDiskManager,
appId: String,
attempt: AttemptInfoWrapper): KVStore = {
val metadata = new AppStatusStoreMetadata(AppStatusStore.CURRENT_VERSION)
// First check if the store already exists and try to open it. If that fails, then get rid of
// the existing data.
dm.openStore(appId, attempt.info.attemptId).foreach { path =>
try {
return KVUtils.open(path, metadata)
} catch {
case e: Exception =>
logInfo(s"Failed to open existing store for $appId/${attempt.info.attemptId}.", e)
dm.release(appId, attempt.info.attemptId, delete = true)
}
}
// At this point the disk data either does not exist or was deleted because it failed to
// load, so the event log needs to be replayed.
val status = fs.getFileStatus(new Path(logDir, attempt.logPath))
val isCompressed = EventLoggingListener.codecName(status.getPath()).flatMap { name =>
Try(CompressionCodec.getShortName(name)).toOption
}.isDefined
logInfo(s"Leasing disk manager space for app $appId / ${attempt.info.attemptId}...")
val lease = dm.lease(status.getLen(), isCompressed)
val newStorePath = try {
Utils.tryWithResource(KVUtils.open(lease.tmpPath, metadata)) { store =>
rebuildAppStore(store, status, attempt.info.lastUpdated.getTime())
}
lease.commit(appId, attempt.info.attemptId)
} catch {
case e: Exception =>
lease.rollback()
throw e
}
KVUtils.open(newStorePath, metadata)
}
private def createInMemoryStore(attempt: AttemptInfoWrapper): KVStore = {
val store = new InMemoryStore()
val status = fs.getFileStatus(new Path(logDir, attempt.logPath))
rebuildAppStore(store, status, attempt.info.lastUpdated.getTime())
store
}
private def loadPlugins(): Iterable[AppHistoryServerPlugin] = {
ServiceLoader.load(classOf[AppHistoryServerPlugin], Utils.getContextOrSparkClassLoader).asScala
}
/** For testing. Returns internal data about a single attempt. */
private[history] def getAttempt(appId: String, attemptId: Option[String]): AttemptInfoWrapper = {
load(appId).attempts.find(_.info.attemptId == attemptId).getOrElse(
throw new NoSuchElementException(s"Cannot find attempt $attemptId of $appId."))
}
private def deleteLog(fs: FileSystem, log: Path): Unit = {
if (isBlacklisted(log)) {
logDebug(s"Skipping deleting $log as we don't have permissions on it.")
} else {
try {
fs.delete(log, true)
} catch {
case _: AccessControlException =>
logInfo(s"No permission to delete $log, ignoring.")
case ioe: IOException =>
logError(s"IOException in cleaning $log", ioe)
}
}
}
private def isCompleted(name: String): Boolean = {
!name.endsWith(EventLoggingListener.IN_PROGRESS)
}
}
private[history] object FsHistoryProvider {
private val SPARK_HISTORY_FS_NUM_REPLAY_THREADS = "spark.history.fs.numReplayThreads"
private val APPL_START_EVENT_PREFIX = "{\"Event\":\"SparkListenerApplicationStart\""
private val APPL_END_EVENT_PREFIX = "{\"Event\":\"SparkListenerApplicationEnd\""
private val LOG_START_EVENT_PREFIX = "{\"Event\":\"SparkListenerLogStart\""
private val ENV_UPDATE_EVENT_PREFIX = "{\"Event\":\"SparkListenerEnvironmentUpdate\","
/**
* Current version of the data written to the listing database. When opening an existing
* db, if the version does not match this value, the FsHistoryProvider will throw away
* all data and re-generate the listing data from the event logs.
*/
private[history] val CURRENT_LISTING_VERSION = 1L
}
private[history] case class FsHistoryProviderMetadata(
version: Long,
uiVersion: Long,
logDir: String)
private[history] object LogType extends Enumeration {
val DriverLogs, EventLogs = Value
}
/**
* Tracking info for event logs detected in the configured log directory. Tracks both valid and
* invalid logs (e.g. unparseable logs, recorded as logs with no app ID) so that the cleaner
* can know what log files are safe to delete.
*/
private[history] case class LogInfo(
@KVIndexParam logPath: String,
@KVIndexParam("lastProcessed") lastProcessed: Long,
logType: LogType.Value,
appId: Option[String],
attemptId: Option[String],
fileSize: Long)
private[history] class AttemptInfoWrapper(
val info: ApplicationAttemptInfo,
val logPath: String,
val fileSize: Long,
val adminAcls: Option[String],
val viewAcls: Option[String],
val adminAclsGroups: Option[String],
val viewAclsGroups: Option[String])
private[history] class ApplicationInfoWrapper(
val info: ApplicationInfo,
val attempts: List[AttemptInfoWrapper]) {
@JsonIgnore @KVIndexParam
def id: String = info.id
@JsonIgnore @KVIndexParam("endTime")
def endTime(): Long = attempts.head.info.endTime.getTime()
@JsonIgnore @KVIndexParam("oldestAttempt")
def oldestAttempt(): Long = attempts.map(_.info.lastUpdated.getTime()).min
def toApplicationInfo(): ApplicationInfo = info.copy(attempts = attempts.map(_.info))
}
private[history] class AppListingListener(
log: FileStatus,
clock: Clock,
haltEnabled: Boolean) extends SparkListener {
private val app = new MutableApplicationInfo()
private val attempt = new MutableAttemptInfo(log.getPath().getName(), log.getLen())
private var gotEnvUpdate = false
private var halted = false
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
app.id = event.appId.orNull
app.name = event.appName
attempt.attemptId = event.appAttemptId
attempt.startTime = new Date(event.time)
attempt.lastUpdated = new Date(clock.getTimeMillis())
attempt.sparkUser = event.sparkUser
checkProgress()
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
attempt.endTime = new Date(event.time)
attempt.lastUpdated = new Date(log.getModificationTime())
attempt.duration = event.time - attempt.startTime.getTime()
attempt.completed = true
}
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = {
// Only parse the first env update, since any future changes don't have any effect on
// the ACLs set for the UI.
if (!gotEnvUpdate) {
val allProperties = event.environmentDetails("Spark Properties").toMap
attempt.viewAcls = allProperties.get("spark.ui.view.acls")
attempt.adminAcls = allProperties.get("spark.admin.acls")
attempt.viewAclsGroups = allProperties.get("spark.ui.view.acls.groups")
attempt.adminAclsGroups = allProperties.get("spark.admin.acls.groups")
gotEnvUpdate = true
checkProgress()
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = event match {
case SparkListenerLogStart(sparkVersion) =>
attempt.appSparkVersion = sparkVersion
case _ =>
}
def applicationInfo: Option[ApplicationInfoWrapper] = {
if (app.id != null) {
Some(app.toView())
} else {
None
}
}
/**
* Throws a halt exception to stop replay if enough data to create the app listing has been
* read.
*/
private def checkProgress(): Unit = {
if (haltEnabled && !halted && app.id != null && gotEnvUpdate) {
halted = true
throw new HaltReplayException()
}
}
private class MutableApplicationInfo {
var id: String = null
var name: String = null
var coresGranted: Option[Int] = None
var maxCores: Option[Int] = None
var coresPerExecutor: Option[Int] = None
var memoryPerExecutorMB: Option[Int] = None
def toView(): ApplicationInfoWrapper = {
val apiInfo = ApplicationInfo(id, name, coresGranted, maxCores, coresPerExecutor,
memoryPerExecutorMB, Nil)
new ApplicationInfoWrapper(apiInfo, List(attempt.toView()))
}
}
private class MutableAttemptInfo(logPath: String, fileSize: Long) {
var attemptId: Option[String] = None
var startTime = new Date(-1)
var endTime = new Date(-1)
var lastUpdated = new Date(-1)
var duration = 0L
var sparkUser: String = null
var completed = false
var appSparkVersion = ""
var adminAcls: Option[String] = None
var viewAcls: Option[String] = None
var adminAclsGroups: Option[String] = None
var viewAclsGroups: Option[String] = None
def toView(): AttemptInfoWrapper = {
val apiInfo = ApplicationAttemptInfo(
attemptId,
startTime,
endTime,
lastUpdated,
duration,
sparkUser,
completed,
appSparkVersion)
new AttemptInfoWrapper(
apiInfo,
logPath,
fileSize,
adminAcls,
viewAcls,
adminAclsGroups,
viewAclsGroups)
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/Aggregator.scala
|
<gh_stars>0
package org.apache.spark
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.collection.ExternalAppendOnlyMap
/**
* :: DeveloperApi ::
* A set of functions used to aggregate data.
*
* @param createCombiner function to create the initial value of the aggregation.
* @param mergeValue function to merge a new value into the aggregation result.
* @param mergeCombiners function to merge outputs from multiple mergeValue function.
*/
@DeveloperApi
case class Aggregator[K, V, C] (
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C) {
def combineValuesByKey(
iter: Iterator[_ <: Product2[K, V]],
context: TaskContext): Iterator[(K, C)] = {
val combiners = new ExternalAppendOnlyMap[K, V, C](createCombiner, mergeValue, mergeCombiners)
combiners.insertAll(iter)
updateMetrics(context, combiners)
combiners.iterator
}
def combineCombinersByKey(
iter: Iterator[_ <: Product2[K, C]],
context: TaskContext): Iterator[(K, C)] = {
val combiners = new ExternalAppendOnlyMap[K, C, C](identity, mergeCombiners, mergeCombiners)
combiners.insertAll(iter)
updateMetrics(context, combiners)
combiners.iterator
}
/** Update task metrics after populating the external map. */
private def updateMetrics(context: TaskContext, map: ExternalAppendOnlyMap[_, _, _]): Unit = {
Option(context).foreach { c =>
c.taskMetrics().incMemoryBytesSpilled(map.memoryBytesSpilled)
c.taskMetrics().incDiskBytesSpilled(map.diskBytesSpilled)
c.taskMetrics().incPeakExecutionMemory(map.peakMemoryUsedBytes)
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperLeaderElectionAgent.scala
|
package org.apache.spark.deploy.master
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging {
val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"
private var zk: CuratorFramework = _
private var leaderLatch: LeaderLatch = _
private var status = LeadershipStatus.NOT_LEADER
start()
private def start() {
logInfo("Starting ZooKeeper LeaderElection agent")
zk = SparkCuratorUtil.newClient(conf)
leaderLatch = new LeaderLatch(zk, WORKING_DIR)
leaderLatch.addListener(this)
leaderLatch.start()
}
override def stop() {
leaderLatch.close()
zk.close()
}
override def isLeader() {
synchronized {
// could have lost leadership by now.
if (!leaderLatch.hasLeadership) {
return
}
logInfo("We have gained leadership")
updateLeadershipStatus(true)
}
}
override def notLeader() {
synchronized {
// could have gained leadership by now.
if (leaderLatch.hasLeadership) {
return
}
logInfo("We have lost leadership")
updateLeadershipStatus(false)
}
}
private def updateLeadershipStatus(isLeader: Boolean) {
if (isLeader && status == LeadershipStatus.NOT_LEADER) {
status = LeadershipStatus.LEADER
masterInstance.electedLeader()
} else if (!isLeader && status == LeadershipStatus.LEADER) {
status = LeadershipStatus.NOT_LEADER
masterInstance.revokedLeadership()
}
}
private object LeadershipStatus extends Enumeration {
type LeadershipStatus = Value
val LEADER, NOT_LEADER = Value
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/WorkerState.scala
|
<filename>core/src/main/scala/org/apache/spark/deploy/master/WorkerState.scala<gh_stars>0
package org.apache.spark.deploy.master
private[master] object WorkerState extends Enumeration {
type WorkerState = Value
val ALIVE, DEAD, DECOMMISSIONED, UNKNOWN = Value
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerSource.scala
|
<filename>core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerSource.scala
package org.apache.spark.scheduler
import com.codahale.metrics.{Gauge, MetricRegistry, Timer}
import org.apache.spark.metrics.source.Source
private[scheduler] class DAGSchedulerSource(val dagScheduler: DAGScheduler)
extends Source {
override val metricRegistry = new MetricRegistry()
override val sourceName = "DAGScheduler"
metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.failedStages.size
})
metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.runningStages.size
})
metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.waitingStages.size
})
metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] {
override def getValue: Int = dagScheduler.numTotalJobs
})
metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] {
override def getValue: Int = dagScheduler.activeJobs.size
})
/** Timer that tracks the time to process messages in the DAGScheduler's event loop */
val messageProcessingTimer: Timer =
metricRegistry.timer(MetricRegistry.name("messageProcessingTime"))
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/util/taskListeners.scala
|
package org.apache.spark.util
import java.util.EventListener
import org.apache.spark.TaskContext
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
*
* Listener providing a callback function to invoke when a task's execution completes.
*/
@DeveloperApi
trait TaskCompletionListener extends EventListener {
def onTaskCompletion(context: TaskContext): Unit
}
/**
* :: DeveloperApi ::
*
* Listener providing a callback function to invoke when a task's execution encounters an error.
* Operations defined here must be idempotent, as `onTaskFailure` can be called multiple times.
*/
@DeveloperApi
trait TaskFailureListener extends EventListener {
def onTaskFailure(context: TaskContext, error: Throwable): Unit
}
/**
* Exception thrown when there is an exception in executing the callback in TaskCompletionListener.
*/
private[spark]
class TaskCompletionListenerException(
errorMessages: Seq[String],
val previousError: Option[Throwable] = None)
extends RuntimeException {
override def getMessage: String = {
val listenerErrorMessage =
if (errorMessages.size == 1) {
errorMessages.head
} else {
errorMessages.zipWithIndex.map { case (msg, i) => s"Exception $i: $msg" }.mkString("\n")
}
val previousErrorMessage = previousError.map { e =>
"\n\nPrevious exception in task: " + e.getMessage + "\n" +
e.getStackTrace.mkString("\t", "\n\t", "")
}.getOrElse("")
listenerErrorMessage + previousErrorMessage
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperPersistenceEngine.scala
|
<gh_stars>0
package org.apache.spark.deploy.master
import java.nio.ByteBuffer
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer
private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
extends PersistenceEngine
with Logging {
private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)
SparkCuratorUtil.mkdir(zk, WORKING_DIR)
override def persist(name: String, obj: Object): Unit = {
serializeIntoFile(WORKING_DIR + "/" + name, obj)
}
override def unpersist(name: String): Unit = {
zk.delete().forPath(WORKING_DIR + "/" + name)
}
override def read[T: ClassTag](prefix: String): Seq[T] = {
zk.getChildren.forPath(WORKING_DIR).asScala
.filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
}
override def close() {
zk.close()
}
private def serializeIntoFile(path: String, value: AnyRef) {
val serialized = serializer.newInstance().serialize(value)
val bytes = new Array[Byte](serialized.remaining())
serialized.get(bytes)
zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
}
private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
try {
Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
} catch {
case e: Exception =>
logWarning("Exception while reading persisted file, deleting", e)
zk.delete().forPath(WORKING_DIR + "/" + filename)
None
}
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala
|
<reponame>CrazyTechnology/spark<filename>core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala
package org.apache.spark.scheduler
/**
* A backend interface for scheduling systems that allows plugging in different ones under
* TaskSchedulerImpl. We assume a Mesos-like model where the application gets resource offers as
* machines become available and can launch tasks on them.
* 调度系统的后端接口,允许在TaskSchedulerImpl下插入不同的接口
* 我们假设一个类似Mesos的模型,其中当机器可用时,应用程序将获得资源提供,并可以在机器上启动任务。
*/
private[spark] trait SchedulerBackend {
private val appId = "spark-application-" + System.currentTimeMillis
def start(): Unit
def stop(): Unit
def reviveOffers(): Unit
def defaultParallelism(): Int
/**
* Requests that an executor kills a running task.
*
* @param taskId Id of the task.
* @param executorId Id of the executor the task is running on.
* @param interruptThread Whether the executor should interrupt the task thread.
* @param reason The reason for the task kill.
*/
def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit =
throw new UnsupportedOperationException
def isReady(): Boolean = true
/**
* Get an application ID associated with the job.
*
* @return An application ID
*/
def applicationId(): String = appId
/**
* Get the attempt ID for this run, if the cluster manager supports multiple
* attempts. Applications run in client mode will not have attempt IDs.
*
* @return The application attempt id, if available.
*/
def applicationAttemptId(): Option[String] = None
/**
* Get the URLs for the driver logs. These URLs are used to display the links in the UI
* Executors tab for the driver.
* @return Map containing the log names and their respective URLs
*/
def getDriverLogUrls: Option[Map[String, String]] = None
/**
* Get the max number of tasks that can be concurrent launched currently.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @return The max number of tasks that can be concurrent launched currently.
*/
def maxNumConcurrentTasks(): Int
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/broadcast/BroadcastManager.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.broadcast
import java.util.concurrent.atomic.AtomicLong
import scala.reflect.ClassTag
import org.apache.commons.collections.map.{AbstractReferenceMap, ReferenceMap}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging
private[spark] class BroadcastManager(
val isDriver: Boolean,
conf: SparkConf,
securityManager: SecurityManager)
extends Logging {
private var initialized = false
private var broadcastFactory: BroadcastFactory = null
initialize()
// Called by SparkContext or Executor before using Broadcast
private def initialize() {
synchronized {
if (!initialized) {
broadcastFactory = new TorrentBroadcastFactory
broadcastFactory.initialize(isDriver, conf, securityManager)
initialized = true
}
}
}
def stop() {
broadcastFactory.stop()
}
private val nextBroadcastId = new AtomicLong(0)
private[broadcast] val cachedValues = {
new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK)
}
def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
}
def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/master/ApplicationState.scala
|
package org.apache.spark.deploy.master
private[master] object ApplicationState extends Enumeration {
type ApplicationState = Value
val WAITING, RUNNING, FINISHED, FAILED, KILLED, UNKNOWN = Value
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/metrics/source/Source.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.metrics.source
import com.codahale.metrics.MetricRegistry
private[spark] trait Source {
def sourceName: String
def metricRegistry: MetricRegistry
}
|
CrazyTechnology/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/package.scala
|
<gh_stars>0
package org.apache.spark.sql
/**
* Support for running Spark SQL queries using functionality from Apache Hive (does not require an
* existing Hive installation). Supported Hive features include:
* - Using HiveQL to express queries.
* - Reading metadata from the Hive Metastore using HiveSerDes.
* - Hive UDFs, UDAs, UDTs
*
* Users that would like access to this functionality should create a
* [[hive.HiveContext HiveContext]] instead of a [[SQLContext]].
*/
package object hive
|
CrazyTechnology/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Relation.scala
|
<reponame>CrazyTechnology/spark
package org.apache.spark.sql.execution.datasources.v2
import java.util.UUID
import scala.collection.JavaConverters._
import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, NamedRelation}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.writer.BatchWriteSupport
import org.apache.spark.sql.types.StructType
/**
* A logical plan representing a data source v2 scan.
*
* @param source An instance of a [[DataSourceV2]] implementation.
* @param options The options for this scan. Used to create fresh [[BatchWriteSupport]].
* @param userSpecifiedSchema The user-specified schema for this scan.
*/
case class DataSourceV2Relation(
// TODO: remove `source` when we finish API refactor for write.
source: TableProvider,
table: SupportsBatchRead,
output: Seq[AttributeReference],
options: Map[String, String],
userSpecifiedSchema: Option[StructType] = None)
extends LeafNode with MultiInstanceRelation with NamedRelation {
import DataSourceV2Relation._
override def name: String = table.name()
override def simpleString(maxFields: Int): String = {
s"RelationV2${truncatedString(output, "[", ", ", "]", maxFields)} $name"
}
def newWriteSupport(): BatchWriteSupport = source.createWriteSupport(options, schema)
def newScanBuilder(): ScanBuilder = {
val dsOptions = new DataSourceOptions(options.asJava)
table.newScanBuilder(dsOptions)
}
override def computeStats(): Statistics = {
val scan = newScanBuilder().build()
scan match {
case r: SupportsReportStatistics =>
val statistics = r.estimateStatistics()
Statistics(sizeInBytes = statistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
override def newInstance(): DataSourceV2Relation = {
copy(output = output.map(_.newInstance()))
}
}
/**
* A specialization of [[DataSourceV2Relation]] with the streaming bit set to true.
*
* Note that, this plan has a mutable reader, so Spark won't apply operator push-down for this plan,
* to avoid making the plan mutable. We should consolidate this plan and [[DataSourceV2Relation]]
* after we figure out how to apply operator push-down for streaming data sources.
*/
case class StreamingDataSourceV2Relation(
output: Seq[AttributeReference],
source: DataSourceV2,
options: Map[String, String],
readSupport: ReadSupport,
scanConfigBuilder: ScanConfigBuilder)
extends LeafNode with MultiInstanceRelation with DataSourceV2StringFormat {
override def isStreaming: Boolean = true
override def simpleString(maxFields: Int): String = {
"Streaming RelationV2 " + metadataString(maxFields)
}
override def pushedFilters: Seq[Expression] = Nil
override def newInstance(): LogicalPlan = copy(output = output.map(_.newInstance()))
// TODO: unify the equal/hashCode implementation for all data source v2 query plans.
override def equals(other: Any): Boolean = other match {
case other: StreamingDataSourceV2Relation =>
output == other.output && readSupport.getClass == other.readSupport.getClass &&
options == other.options
case _ => false
}
override def hashCode(): Int = {
Seq(output, source, options).hashCode()
}
override def computeStats(): Statistics = readSupport match {
case r: OldSupportsReportStatistics =>
val statistics = r.estimateStatistics(scanConfigBuilder.build())
Statistics(sizeInBytes = statistics.sizeInBytes().orElse(conf.defaultSizeInBytes))
case _ =>
Statistics(sizeInBytes = conf.defaultSizeInBytes)
}
}
object DataSourceV2Relation {
private implicit class SourceHelpers(source: DataSourceV2) {
def asWriteSupportProvider: BatchWriteSupportProvider = {
source match {
case provider: BatchWriteSupportProvider =>
provider
case _ =>
throw new AnalysisException(s"Data source is not writable: $name")
}
}
def name: String = {
source match {
case registered: DataSourceRegister =>
registered.shortName()
case _ =>
source.getClass.getSimpleName
}
}
def createWriteSupport(
options: Map[String, String],
schema: StructType): BatchWriteSupport = {
asWriteSupportProvider.createBatchWriteSupport(
UUID.randomUUID().toString,
schema,
SaveMode.Append,
new DataSourceOptions(options.asJava)).get
}
}
def create(
provider: TableProvider,
table: SupportsBatchRead,
options: Map[String, String],
userSpecifiedSchema: Option[StructType] = None): DataSourceV2Relation = {
val output = table.schema().toAttributes
DataSourceV2Relation(provider, table, output, options, userSpecifiedSchema)
}
// TODO: remove this when we finish API refactor for write.
def createRelationForWrite(
source: DataSourceV2,
options: Map[String, String]): DataSourceV2Relation = {
val provider = source.asInstanceOf[TableProvider]
val dsOptions = new DataSourceOptions(options.asJava)
val table = provider.getTable(dsOptions)
create(provider, table.asInstanceOf[SupportsBatchRead], options)
}
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClientListener.scala
|
package org.apache.spark.deploy.client
/**
* Callbacks invoked by deploy client when various events happen. There are currently five events:
* connecting to the cluster, disconnecting, being given an executor, having an executor removed
* (either due to failure or due to revocation), and having a worker removed.
*
* Users of this API should *not* block inside the callback methods.
*/
private[spark] trait StandaloneAppClientListener {
def connected(appId: String): Unit
/** Disconnection may be a temporary state, as we fail over to a new Master. */
def disconnected(): Unit
/** An application death is an unrecoverable failure condition. */
def dead(reason: String): Unit
def executorAdded(
fullId: String, workerId: String, hostPort: String, cores: Int, memory: Int): Unit
def executorRemoved(
fullId: String, message: String, exitStatus: Option[Int], workerLost: Boolean): Unit
def workerRemoved(workerId: String, host: String, message: String): Unit
}
|
CrazyTechnology/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/ExecutorData.scala
|
<gh_stars>0
package org.apache.spark.scheduler.cluster
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef}
/**
* Grouping of data for an executor used by CoarseGrainedSchedulerBackend.
*
* @param executorEndpoint The RpcEndpointRef representing this executor
* @param executorAddress The network address of this executor
* @param executorHost The hostname that this executor is running on
* @param freeCores The current number of cores available for work on the executor
* @param totalCores The total number of cores available to the executor
*/
private[cluster] class ExecutorData(
val executorEndpoint: RpcEndpointRef,
val executorAddress: RpcAddress,
override val executorHost: String,
var freeCores: Int,
override val totalCores: Int,
override val logUrlMap: Map[String, String]
) extends ExecutorInfo(executorHost, totalCores, logUrlMap)
|
lamastex/spark-trend-calculus-examples
|
notebooks/db/03streamable-trend-calculus-estimators.scala
|
// Databricks notebook source
// MAGIC %md
// MAGIC # Markov Model for Trend Calculus
// MAGIC
// MAGIC <NAME>, <NAME> and <NAME>
// MAGIC
// MAGIC 2020, Uppsala, Sweden
// MAGIC
// MAGIC This project was supported by Combient Mix AB through summer internships at:
// MAGIC
// MAGIC Combient Competence Centre for Data Engineering Sciences,
// MAGIC Department of Mathematics,
// MAGIC Uppsala University, Uppsala, Sweden
// MAGIC
// MAGIC ## Resources
// MAGIC
// MAGIC This builds on the following library and its antecedents therein:
// MAGIC
// MAGIC - [https://github.com/lamastex/spark-trend-calculus](https://github.com/lamastex/spark-trend-calculus)
// MAGIC
// MAGIC
// MAGIC ## This work was inspired by:
// MAGIC
// MAGIC - <NAME>'s [texata-2017](https://github.com/aamend/texata-r2-2017)
// MAGIC - <NAME>'s [Trend Calculus Library](https://github.com/ByteSumoLtd/TrendCalculus-lua)
// COMMAND ----------
// MAGIC %md
// MAGIC We use the dataset generated in the last notebook to build a simple, proof of concept Markov model for predicting trends.
// COMMAND ----------
import java.sql.Timestamp
import io.delta.tables._
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.streaming.{GroupState, GroupStateTimeout, OutputMode, Trigger}
import org.apache.spark.sql.types._
import org.apache.spark.sql.expressions.{Window, WindowSpec}
import org.lamastex.spark.trendcalculus._
import scala.util.Random
// COMMAND ----------
dbutils.widgets.dropdown("m", "5", (1 to 10).map(_.toString).toSeq ++ Seq(15,20,25,30).map(_.toString) :+ "100")
dbutils.widgets.dropdown("n", "1", (1 to 3).map(_.toString).toSeq)
dbutils.widgets.dropdown("k", "max", (1 to 10).map(_.toString).toSeq :+ "max")
dbutils.widgets.dropdown("numTrainingSets", "10", (1 to 20).map( i => (i*5).toString).toSeq)
// COMMAND ----------
// MAGIC %md
// MAGIC Reading the joined dataset from the last notebook.
// MAGIC
// MAGIC We train the model using both oil and gold data and predict trends in oil data. We show that this yields better results than just training on the oil data.
// COMMAND ----------
val maxRevPath = "s3a://osint-gdelt-reado/canwrite/summerinterns2020/johannes/streamable-trend-calculus/maxRev"
val maxRevDS = spark.read.format("delta").load(maxRevPath).as[FlatReversal]
// COMMAND ----------
// MAGIC %md
// MAGIC We want to predict what the trend of the next data point will be given the trend reversals we have observed.
// MAGIC
// MAGIC For this, we use an m-th order Markov model. We look at the reversal state of the last `m` points and use this to predict the trends in the next `n` points. `k` is the maximum order of reversal that is considered when training the model.
// MAGIC
// MAGIC `trainingRatio` is the ratio of the data used for training the model, the rest is used for testing.
// COMMAND ----------
val modelPath = "s3a://osint-gdelt-reado/canwrite/summerinterns2020/johannes/streamable-trend-calculus/estimators/"
val maxRevDSWithLagCountPath = modelPath + "maxRevDSWithLag"
val numPartitions = dbutils.widgets.get("numTrainingSets").toInt // 5
val partialModelPaths = (1 to numPartitions).map( i => modelPath + s"partialModel${i}" )
val fullModelPath = modelPath + "fullModel"
val m = dbutils.widgets.get("m").toInt // 5
val n = dbutils.widgets.get("n").toInt // 1
val k = dbutils.widgets.get("k") match { // 17
case "max" => math.abs(maxRevDS.orderBy(abs($"reversal").desc).first.reversal) + 1
case _ => dbutils.widgets.get("k").toInt
}
val trainingRatio = 0.7
type FinalModel = Map[Seq[Int], Map[Seq[Int], Double]]
// COMMAND ----------
def truncRev(k: Int)(rev: Int): Int = {
if (math.abs(rev) > k) k*rev.signum else rev
}
val truncRevUDF = udf{ rev: Int => rev.signum }
def truncRevsUDF(k: Int) = udf{ revs: Seq[Int] => revs.map(truncRev(k)) }
def lagColumn(df: DataFrame, orderColumnName: String, lagKeyName: String, lagValueName: String, m: Int, n: Int): DataFrame = {
val windowSpec = Window.partitionBy("ticker").orderBy(orderColumnName)
val laggedKeyColNames = (1 to m).map( i => s"lagKey$i" ).toSeq
val laggedValueColNames = (1 to n).map( i => s"lagValue$i" ).toSeq
val dfWithLaggedKeyColumns = (n+1 to m+n)
.foldLeft(df)( (df: DataFrame, i: Int) => df.withColumn(laggedKeyColNames(i-n-1), lag(lagKeyName, i-1, Int.MaxValue).over(windowSpec)) )
val dfWithLaggedKeyValueColumns = (1 to n)
.foldLeft(dfWithLaggedKeyColumns)( (df: DataFrame, i: Int) => df.withColumn(laggedValueColNames(i-1), lag(lagValueName, i-1, Int.MaxValue).over(windowSpec)) )
dfWithLaggedKeyValueColumns
.withColumn("lagKey", array(laggedKeyColNames.reverse.take(m).map(col(_)):_*))
.withColumn("lagValue", array(laggedValueColNames.reverse.takeRight(n).map(col(_)):_*))
.withColumn("lagKeyFirst", col(laggedKeyColNames.last))
.filter($"lagKeyFirst" =!= Int.MaxValue)
.drop("lagKeyFirst")
.drop(laggedKeyColNames:_*)
.drop(laggedValueColNames:_*)
}
// COMMAND ----------
// MAGIC %md
// MAGIC The trend at each point can be extracted from the trend reversals by taking the sum of all previous 1-st order trend reversals. This sum will always be either 0 (up trend) or -1 (down trend) and 0 is therefore mapped to 1 to get (1, -1) as (up, down).
// COMMAND ----------
val maxRevDSWithLag = lagColumn(
maxRevDS
.orderBy("x")
.toDF
.withColumn("truncRev", truncRevUDF($"reversal"))
.withColumn("tmpTrend", sum("truncRev").over(Window.partitionBy("ticker").orderBy("x").rowsBetween(Window.unboundedPreceding, Window.currentRow)))
.withColumn("trend", when($"tmpTrend" === 0, 1).otherwise(-1))
.drop("truncRev", "tmpTrend"),
"x",
"reversal",
"trend",
m,
n
)
// COMMAND ----------
// MAGIC %md
// MAGIC We now want to predict `lagValue` from `lagKey`.
// COMMAND ----------
display(maxRevDSWithLag)
// COMMAND ----------
// MAGIC %md
// MAGIC Cleaning up last run and writing model training input to delta tables.
// COMMAND ----------
dbutils.fs.rm(maxRevDSWithLagCountPath, recurse=true)
maxRevDSWithLag
//.withColumn("lagValueTrunc", truncRevsUDF(1)($"lagValue"))
.withColumn("count", lit(1L))
.write
.format("delta")
.mode("overwrite")
.save(maxRevDSWithLagCountPath)
// COMMAND ----------
partialModelPaths.map(dbutils.fs.rm(_, recurse=true))
// COMMAND ----------
val divUDF = udf{ (a: Long, b: Long) => a.toDouble/b }
val maxRevDSWithLagCount = spark.read.format("delta").load(maxRevDSWithLagCountPath)
val numberOfRows = maxRevDSWithLagCount.count
// COMMAND ----------
// MAGIC %md
// MAGIC The data is split into training and testing data. This is *not* done randomly as there is a dependence on previous data points. We don't want to train on data that is dependent on the testing data and therefore the training data consists on looking at the first (for example) 70% of the data and the last 30% is saved for testing. This also reflects how the model would be used since we can only train on data points that have already been observed.
// COMMAND ----------
val tickers = maxRevDSWithLagCount.select("ticker").distinct.as[String].collect.toSeq
val tickerDFs = tickers.map( ticker => maxRevDSWithLagCount.filter($"ticker" === ticker))
val trainingDF = tickerDFs.map( df => df.limit((df.count*trainingRatio).toInt) ).reduce( _.union(_) ).orderBy("x")
val trainingRows = trainingDF.count
val testingDF = maxRevDSWithLagCount.except(trainingDF)
// COMMAND ----------
// MAGIC %md
// MAGIC Create `numTrainingSets` training set of increasing size to get snapshots of how a partially trained model looks like. The sizes are spaced logarithmically since the improvement in the model is fastest in the beginning.
// COMMAND ----------
val rowsInPartitions = (1 to numPartitions).map{ i: Int => (math.exp(math.log(trainingRows)*i/numPartitions)).toInt }//.scanLeft(0.0)(_-_)
// COMMAND ----------
// MAGIC %md
// MAGIC Model is trained by counting how many times each (`lagKey`, `lagValue`) pair is observed and dividing by how many times `lagKey` is observed to get an estimation of the transition probabilities.
// COMMAND ----------
val keyValueCountPartialDFs = rowsInPartitions.map(
trainingDF
.limit(_)
.withColumn("keyValueObs", sum("count").over(Window.partitionBy($"lagKey", $"lagValue")))
.withColumn("totalKeyObs", sum("count").over(Window.partitionBy($"lagKey")))
.drop("count")
)
// COMMAND ----------
display(keyValueCountPartialDFs.last.orderBy($"keyValueObs".desc))
// COMMAND ----------
keyValueCountPartialDFs
.map( df =>
df
.withColumn("probability", divUDF($"keyValueObs", $"totalKeyObs"))
.drop("keyValueObs", "totalKeyObs")
)
.zip(partialModelPaths).map{ case (df: DataFrame, path: String) =>
df.write.mode("overwrite").format("delta").save(path)
}
// COMMAND ----------
val probDFs = partialModelPaths.map(spark.read.format("delta").load(_))
// COMMAND ----------
display(probDFs.last.orderBy("probability"))
// COMMAND ----------
// MAGIC %md
// MAGIC The prediction is given by taking
// MAGIC
// MAGIC $$ V \in argmax(P_K(V)) $$
// MAGIC
// MAGIC where *P_K(V)* is the probability that *V* is the next trend when the last `m` points have had reversals *K*. If there are more than one elements in argmax, an element is chosen uniformly at random.
// COMMAND ----------
val aggWindow = Window.partitionBy("lagKey").orderBy('probability desc)
val testedDFs = probDFs
.map { df =>
val predictionDF = df
.select("lagKey", "lagValue", "probability")
.distinct
.withColumn("rank", rank().over(aggWindow))
.filter("rank == 1")
.groupBy("lagKey")
.agg(collect_list("lagValue"))
testingDF
.filter($"ticker" === "BCOUSD")
.join(predictionDF, Seq("lagKey"), "left")
.withColumnRenamed("collect_list(lagValue)", "test")
}
// COMMAND ----------
// MAGIC %md
// MAGIC We use a binary loss function that indicates if the prediction was correct or not.
// COMMAND ----------
val getRandomUDF = udf( (arr: Seq[Seq[Int]]) => {
val safeArr = Option(arr).getOrElse(Seq[Seq[Int]]())
if (safeArr.isEmpty) Seq[Int]() else safeArr(Random.nextInt(safeArr.size))
} )
val lossUDF = udf{ (value: Seq[Int], pred: Seq[Int]) =>
if (value == pred) 0 else 1
}
// COMMAND ----------
val lossDFs = testedDFs.map(_.withColumn("prediction", getRandomUDF($"test")).withColumn("loss", lossUDF($"lagValue", $"prediction")))
// COMMAND ----------
display(lossDFs.last)
// COMMAND ----------
val testLength = testingDF.count
val oilTestDF = testingDF.filter($"ticker" === "BCOUSD")
val oilTestLength = oilTestDF.count
// COMMAND ----------
// MAGIC %md
// MAGIC We find the mean loss for each training dataset of increasing size. As one can see, the loss decreases as more data is supplied.
// MAGIC
// MAGIC Further, training on both oil and gold data yields a better result than just oil, suggesting that trends behave similarly in the two commodities.
// COMMAND ----------
val losses = lossDFs.map( _.agg(sum("loss")).select($"sum(loss)".as("totalLoss")).collect.head.getLong(0).toDouble/oilTestLength )
// Data up to 2019
// k=max, m=2 ,n=1: (0.5489615950080244, 0.4014721726541194, 0.3660973816818738, 0.36497087640146797, 0.36485163238050344)
// k=max, m=10,n=1: (0.9812730246821787, 0.8523390131056561, 0.5819573469954631, 0.3552177121357085, 0.2807503135425113)
// k=max,m=100,n=1: (1.0, 1.0, 1.0, 1.0, 1.0)
// k=5 , m=10,n=1: (0.9812730246821787, 0.8522267831239777, 0.5820597568537447, 0.3550507700379618, 0.2806352778112909)
// k=1 , m=10,n=1: (0.9812730246821787, 0.852200128503329, 0.5821242890932098, 0.3553383593660128, 0.2806549180580846)
// k=max, m=10,n=2: (0.9879380657977248, 0.9049354606556204, 0.7243136776273427, 0.5472986906951395, 0.4783823708897465)
// k=max(17), m=10,n=1, 10 training sets: (0.9977203284971564, 0.9812730246821787, 0.9455375956409875, 0.8523810993487855, 0.7183644724769999, 0.5819320952495854, 0.44607349380350214, 0.35513915114853356, 0.3029480010437388, 0.280629666312207)
// Data up to last month
// k=max(18), m=10,n=1, 10 training sets: (0.9973104051296998, 0.9843882250072865, 0.9510789857184494, 0.8574351501020111, 0.7515744680851064, 0.6360547945205479, 0.5099656076945497, 0.4249921305741766, 0.3735668901194987, 0.34609501603031184)
// Could the difference be due to Corona?
// Trained on both oil and gold. testing on oil as previously.
// k=max(18), m=10,n=1, 10 training sets: (0.9999778490236083, 0.9980728650539201, 0.9527158262897114, 0.8921317400174876, 0.7559988341591373, 0.5820915185077237, 0.46675488195861264, 0.3923101136694841, 0.3574876129408336, 0.3355837948120082)
// COMMAND ----------
val trainingSizes = probDFs.map(_.count)
val lossesDS = sc
.parallelize(losses.zip(trainingSizes))
.toDF("loss", "size")
.withColumn("training", lit("Oil and Gold"))
.union(
sc
.parallelize(Seq(0.9973104051296998, 0.9843882250072865, 0.9510789857184494, 0.8574351501020111, 0.7515744680851064, 0.6360547945205479, 0.5099656076945497, 0.4249921305741766, 0.3735668901194987, 0.34609501603031184).zip(Seq(4, 18, 77, 331, 1414, 6036, 25759, 109918, 469036, 2001430)))
.toDF("loss", "size")
.withColumn("training", lit("Oil"))
)
.as[(Double,Long,String)]
// COMMAND ----------
display(lossesDS)
// COMMAND ----------
// MAGIC %md
// MAGIC We collect the models in order to calculate the total variation distance between them.
// COMMAND ----------
val partialModels = probDFs.map{df =>
val tmpMap = df.select("lagKey", "lagValue", "probability").distinct.collect.map{ r =>
(r.getAs[Seq[Int]](0), r.getAs[Seq[Int]](1), r.getDouble(2))
}.groupBy(_._1).mapValues(_.map(tup => Map(tup._2 -> tup._3)).flatten.toMap)
tmpMap: FinalModel
}
// COMMAND ----------
def totalVarDist(m1: FinalModel, m2: FinalModel): Map[Seq[Int], Double] = {
val allKeys = m1.keys.toSet.union(m2.keys.toSet)
val sharedKeys = m1.keys.toSet.intersect(m2.keys.toSet)
val totalVarDists = allKeys.toSeq.map{ key =>
if (!sharedKeys.contains(key)) {
1.0
} else {
val val1 = m1.getOrElse(key, Map())
val val2 = m2.getOrElse(key, Map())
val allValKeys = val1.keys.toSet.union(val2.keys.toSet)
allValKeys.map( valKey => 0.5*math.abs(val1.getOrElse(valKey, 0.0) - val2.getOrElse(valKey, 0.0)) ).sum
}
}
allKeys.zip(totalVarDists).toMap
}
// COMMAND ----------
val totalVariationDistances = partialModels.map( m1 => partialModels.map( m2 => totalVarDist(m1,m2) ) )
// COMMAND ----------
def aggToMatrix(totalVarDists: Seq[Seq[Map[Seq[Int],Double]]], aggFunc: Seq[Double] => Double): Seq[Seq[Double]] = {
totalVariationDistances.map(_.map( t => aggFunc(t.values.toSeq)))
}
def printMatrix(mat: Seq[Seq[Double]]): Unit = {
mat.map( s => { s.map( a => print(f"$a%2.3f ") ); println() } )
}
// COMMAND ----------
val maxDists = aggToMatrix(totalVariationDistances, s => s.max)
val minDists = aggToMatrix(totalVariationDistances, s => s.min)
val meanDists = aggToMatrix(totalVariationDistances, s => s.sum/s.size)
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC Each model is a mapping `{key: {value: probability}}` where `key` is a sequence of reversals and non-reversals of length `m`, `value` is a sequence of trends of length `n` and `probability` is the estimated probability that `value` is observed directly after `key`.
// MAGIC
// MAGIC Hence, for any two models A and B, we can calculate the total variation distance between the mappings `{value: probability}` for a given key in the union of the keys for A and B. If a key is not present in one of the models, the total variation distance is 1 for that key.
// MAGIC
// MAGIC In the matrix below, the (i,j)-th position is the arithmetic mean of the total variation distances for all keys in the union of the keysets. The matrix is symmetric with the smallest model in the top row and leftmost column and the largest model in the bottom row and rightmost column.
// MAGIC
// MAGIC If there are three models labeled M_1, M_2, M_3 and V_i_j is the arithmetic mean described above, the matrix is
// MAGIC
// MAGIC $$ \begin{matrix} V_{1,1} & V_{1,2} & V_{1,3} \end{matrix} $$
// MAGIC $$ \begin{matrix} V_{2,1} & V_{2,2} & V_{2,3} \end{matrix} $$
// MAGIC $$ \begin{matrix} V_{3,1} & V_{3,2} & V_{3,3} \end{matrix} $$
// MAGIC
// MAGIC As one can see, the models differ a lot from each other, suggesting that the estimate can still be improved given more data.
// COMMAND ----------
printMatrix(meanDists)
|
lamastex/spark-trend-calculus-examples
|
notebooks/db/gdelt-POI-detection.scala
|
<reponame>lamastex/spark-trend-calculus-examples
// Databricks notebook source
// MAGIC %md
// MAGIC # Detecting Persons of Interest to OIL/GAS Price Trends
// MAGIC
// MAGIC <NAME>, <NAME> and <NAME>
// MAGIC
// MAGIC 2020, Uppsala, Sweden
// MAGIC
// MAGIC
// MAGIC This project was supported by Combient Mix AB through summer internships at:
// MAGIC
// MAGIC Combient Competence Centre for Data Engineering Sciences,
// MAGIC Department of Mathematics,
// MAGIC Uppsala University, Uppsala, Sweden
// MAGIC
// MAGIC See Example notebooks to detect events and persons or entities of interest
// MAGIC
// MAGIC - [notebooks/db/gdelt-EOI-detection](notebooks/db/gdelt-EOI-detection.md)
// MAGIC - [notebooks/db/gdelt-POI-detection](notebooks/db/gdelt-POI-detection.md)
// MAGIC
// MAGIC # Resources
// MAGIC
// MAGIC This builds on the following libraries and its antecedents therein:
// MAGIC
// MAGIC - [https://github.com/aamend/spark-gdelt](https://github.com/aamend/spark-gdelt)
// MAGIC - [https://github.com/lamastex/spark-trend-calculus](https://github.com/lamastex/spark-trend-calculus)
// MAGIC
// MAGIC
// MAGIC ## This work was inspired by:
// MAGIC
// MAGIC - <NAME>'s [texata-2017](https://github.com/aamend/texata-r2-2017)
// MAGIC - <NAME>'s [Trend Calculus Library](https://github.com/ByteSumoLtd/TrendCalculus-lua)
// COMMAND ----------
import spark.implicits._
import io.delta.tables._
import com.aamend.spark.gdelt._
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.functions.to_date
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions._
import java.sql.Date
import java.sql.Timestamp
import java.text.SimpleDateFormat
import org.apache.spark.sql.functions._
import org.graphframes.GraphFrame
// COMMAND ----------
val gkg_v1 = spark.read.format("delta").load("s3a://osint-gdelt-reado/GDELT/del/bronze/v1/gkg").as[GKGEventV1]
// COMMAND ----------
val gkg_v1_filt = gkg_v1.filter($"publishDate">"2013-04-01 00:00:00" && $"publishDate"<"2019-12-31 00:00:00")
val oil_gas_themeGKG = gkg_v1_filt.filter(c =>c.themes.contains("ENV_GAS") || c.themes.contains("ENV_OIL"))
// COMMAND ----------
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.graphframes._
val edges = oil_gas_themeGKG.select($"persons",$"numArticles")
.withColumn("src",explode($"persons"))
.withColumn("dst",explode($"persons"))
.filter($"src".notEqual($"dst") && $"src" =!= "" && $"dst" =!= "")
.groupBy($"src",$"dst")
.agg(sum("numArticles").as("count"))
.toDF()
val vertices = oil_gas_themeGKG.select($"persons",$"numArticles")
.withColumn("id",explode($"persons"))
.filter($"id" =!= "")
.drop($"persons")
.groupBy($"id")
.agg(sum("numArticles").as("numArticles"))
.toDF()
val pers_graph = GraphFrame(vertices,edges)
// COMMAND ----------
println("vertex count: " +pers_graph.vertices.count())
println("edge count: " + pers_graph.edges.count())
// COMMAND ----------
val fil_pers_graph = pers_graph.filterEdges($"count" >10).dropIsolatedVertices()
// COMMAND ----------
println("filtered vertex count: " +fil_pers_graph.vertices.count())
println("filtered edge count: " + fil_pers_graph.edges.count())
// COMMAND ----------
sc.setCheckpointDir("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/")
// COMMAND ----------
val comp_vertices = fil_pers_graph.connectedComponents.run()
comp_vertices.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/comp_vertices")
// COMMAND ----------
val comp_vertices = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/comp_vertices")
val comp_graph = GraphFrame(comp_vertices,fil_pers_graph.edges)
// COMMAND ----------
comp_graph.vertices.groupBy($"component").agg(count("component").as("count")).orderBy(desc("count")).show()
// COMMAND ----------
val big_comp_graph = comp_graph.filterVertices($"component" === 0)
// COMMAND ----------
val label_vertices = big_comp_graph.labelPropagation.maxIter(10).run()
// COMMAND ----------
label_vertices.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/label_vertices")
big_comp_graph.edges.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/label_edges")
// COMMAND ----------
val label_vertices = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/label_vertices")
val label_edges = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/label_edges")
// COMMAND ----------
val label_graph = GraphFrame(label_vertices,label_edges)
// COMMAND ----------
label_graph.vertices.show()
// COMMAND ----------
val com_rank_graph =label_graph.pageRank.resetProbability(0.15).tol(0.015).run()
// COMMAND ----------
com_rank_graph.vertices.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/com_rank_vertices")
com_rank_graph.edges.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/com_rank_edges")
// COMMAND ----------
val com_rank_vertices = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/com_rank_vertices")
val com_rank_edges =spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/person_graph/com_rank_edges")
val com_rank_graph = GraphFrame(com_rank_vertices,com_rank_edges)
// COMMAND ----------
com_rank_graph.vertices.groupBy($"label").agg(count($"label").as("count")).orderBy(desc("count")).show()
// COMMAND ----------
// MAGIC %md
// MAGIC # look at three top com.
// COMMAND ----------
val toplabel1 = com_rank_graph.filterVertices($"label" === 1520418423783L)
val toplabel2 = com_rank_graph.filterVertices($"label" === 8589934959L)
val toplabel3 =com_rank_graph.filterVertices($"label" === 1580547965452L)
// COMMAND ----------
toplabel1.vertices.orderBy(desc("pagerank")).show(100,false)
// COMMAND ----------
print(deg((0)).toInt)
// COMMAND ----------
val toplabel1Filt = toplabel1.filterVertices($"pagerank" >=55.47527731815801)//.filterEdges($"count">2000).dropIsolatedVertices()
// COMMAND ----------
val toplabel1FiltE = toplabel1Filt.filterEdges($"count">2000).dropIsolatedVertices()
// COMMAND ----------
import org.apache.spark.sql._
//import com.databricks.backend.daemon.driver.EnhancedRDDFunctions.displayHTML
case class Edge(src: String, dst: String, count: Long)
case class Node(name: String,importance: Double)
case class Link(source: Int, target: Int, value: Long)
case class Graph(nodes: Seq[Node], links: Seq[Link])
object graphs {
// val sqlContext = SQLContext.getOrCreate(org.apache.spark.SparkContext.getOrCreate()) /// fix
val sqlContext = SparkSession.builder().getOrCreate().sqlContext
import sqlContext.implicits._
def force(vertices: Dataset[Node],clicks: Dataset[Edge], height: Int = 100, width: Int = 960): Unit = {
val data = clicks.collect()
val nodes = vertices.collect()
val links = data.map { t =>
Link(nodes.indexWhere(_.name == t.src.replaceAll("_", " ")), nodes.indexWhere(_.name == t.dst.replaceAll("_", " ")), t.count / 20 + 1)
}
showGraph(height, width, Seq(Graph(nodes, links)).toDF().toJSON.first())
}
/**
* Displays a force directed graph using d3
* input: {"nodes": [{"name": "..."}], "links": [{"source": 1, "target": 2, "value": 0}]}
*/
def showGraph(height: Int, width: Int, graph: String): Unit = {
displayHTML(s"""
<style>
.node_circle {
stroke: #777;
stroke-width: 1.3px;
}
.node_label {
pointer-events: none;
}
.link {
stroke: #777;
stroke-opacity: .2;
}
.node_count {
stroke: #777;
stroke-width: 1.0px;
fill: #999;
}
text.legend {
font-family: Verdana;
font-size: 13px;
fill: #000;
}
.node text {
font-family: "Helvetica Neue","Helvetica","Arial",sans-serif;
font-size: function(d) {return (d.importance)+ "px"};
font-weight: 200;
}
</style>
<div id="clicks-graph">
<script src="//d3js.org/d3.v3.min.js"></script>
<script>
var graph = $graph;
var width = $width,
height = $height;
var color = d3.scale.category20();
var force = d3.layout.force()
.charge(-200)
.linkDistance(350)
.size([width, height]);
var svg = d3.select("#clicks-graph").append("svg")
.attr("width", width)
.attr("height", height);
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value)/10; });
var node = svg.selectAll(".node")
.data(graph.nodes)
.enter().append("g")
.attr("class", "node")
.call(force.drag);
node.append("circle")
.attr("r", function(d) { return Math.sqrt(d.importance); })
.style("fill", function (d) {
if (d.name.startsWith("other")) { return color(1); } else { return color(2); };
})
node.append("text")
.attr("dx", function(d) { return (Math.sqrt(d.importance)*30)/Math.sqrt(1661.1815574713858); })
.attr("dy", ".35em")
.text(function(d) { return d.name });
//Now we are giving the SVGs co-ordinates - the force layout is generating the co-ordinates which this code is using to update the attributes of the SVG elements
force.on("tick", function () {
link.attr("x1", function (d) {
return d.source.x;
})
.attr("y1", function (d) {
return d.source.y;
})
.attr("x2", function (d) {
return d.target.x;
})
.attr("y2", function (d) {
return d.target.y;
});
d3.selectAll("circle").attr("cx", function (d) {
return d.x;
})
.attr("cy", function (d) {
return d.y;
});
d3.selectAll("text").attr("x", function (d) {
return d.x;
})
.attr("y", function (d) {
return d.y;
});
});
</script>
</div>
""")
}
def help() = {
displayHTML("""
<p>
Produces a force-directed graph given a collection of edges of the following form:</br>
<tt><font color="#a71d5d">case class</font> <font color="#795da3">Edge</font>(<font color="#ed6a43">src</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">dest</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">count</font>: <font color="#a71d5d">Long</font>)</tt>
</p>
<p>Usage:<br/>
<tt><font color="#a71d5d">import</font> <font color="#ed6a43">d3._</font></tt><br/>
<tt><font color="#795da3">graphs.force</font>(</br>
<font color="#ed6a43">height</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">width</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">clicks</font>: <font color="#795da3">Dataset</font>[<font color="#795da3">Edge</font>])</tt>
</p>""")
}
}
graphs.force(
height = 800,
width = 1200,
clicks = toplabel1FiltE.edges.as[Edge],
vertices = toplabel1FiltE.vertices.select($"id".as("name"),$"pagerank".as("importance")).as[Node]
)
// COMMAND ----------
toplabel2.vertices.orderBy(desc("pagerank")).show(100,false)
// COMMAND ----------
val toplabel2Filt = toplabel2.filterVertices($"pagerank" >=7.410990956624706)
// COMMAND ----------
val toplabel2FiltE = toplabel2Filt.filterEdges($"count">136).dropIsolatedVertices()
// COMMAND ----------
import org.apache.spark.sql._
//import com.databricks.backend.daemon.driver.EnhancedRDDFunctions.displayHTML
case class Edge(src: String, dst: String, count: Long)
case class Node(name: String,importance: Double)
case class Link(source: Int, target: Int, value: Long)
case class Graph(nodes: Seq[Node], links: Seq[Link])
object graphs {
// val sqlContext = SQLContext.getOrCreate(org.apache.spark.SparkContext.getOrCreate()) /// fix
val sqlContext = SparkSession.builder().getOrCreate().sqlContext
import sqlContext.implicits._
def force(vertices: Dataset[Node],clicks: Dataset[Edge], height: Int = 100, width: Int = 960): Unit = {
val data = clicks.collect()
val nodes = vertices.collect()
val links = data.map { t =>
Link(nodes.indexWhere(_.name == t.src.replaceAll("_", " ")), nodes.indexWhere(_.name == t.dst.replaceAll("_", " ")), t.count / 20 + 1)
}
showGraph(height, width, Seq(Graph(nodes, links)).toDF().toJSON.first())
}
/**
* Displays a force directed graph using d3
* input: {"nodes": [{"name": "..."}], "links": [{"source": 1, "target": 2, "value": 0}]}
*/
def showGraph(height: Int, width: Int, graph: String): Unit = {
displayHTML(s"""
<style>
.node_circle {
stroke: #777;
stroke-width: 1.3px;
}
.node_label {
pointer-events: none;
}
.link {
stroke: #777;
stroke-opacity: .2;
}
.node_count {
stroke: #777;
stroke-width: 1.0px;
fill: #999;
}
text.legend {
font-family: Verdana;
font-size: 13px;
fill: #000;
}
.node text {
font-family: "Helvetica Neue","Helvetica","Arial",sans-serif;
font-size: function(d) {return (d.importance)+ "px"};
font-weight: 200;
}
</style>
<div id="clicks-graph">
<script src="//d3js.org/d3.v3.min.js"></script>
<script>
var graph = $graph;
var width = $width,
height = $height;
var color = d3.scale.category20();
var force = d3.layout.force()
.charge(-200)
.linkDistance(350)
.size([width, height]);
var svg = d3.select("#clicks-graph").append("svg")
.attr("width", width)
.attr("height", height);
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value)/10; });
var node = svg.selectAll(".node")
.data(graph.nodes)
.enter().append("g")
.attr("class", "node")
.call(force.drag);
node.append("circle")
.attr("r", function(d) { return Math.sqrt(d.importance); })
.style("fill", function (d) {
if (d.name.startsWith("other")) { return color(1); } else { return color(2); };
})
node.append("text")
.attr("dx", function(d) { return (Math.sqrt(d.importance)*30)/Math.sqrt(453.6031403843406); })
.attr("dy", ".35em")
.text(function(d) { return d.name });
//Now we are giving the SVGs co-ordinates - the force layout is generating the co-ordinates which this code is using to update the attributes of the SVG elements
force.on("tick", function () {
link.attr("x1", function (d) {
return d.source.x;
})
.attr("y1", function (d) {
return d.source.y;
})
.attr("x2", function (d) {
return d.target.x;
})
.attr("y2", function (d) {
return d.target.y;
});
d3.selectAll("circle").attr("cx", function (d) {
return d.x;
})
.attr("cy", function (d) {
return d.y;
});
d3.selectAll("text").attr("x", function (d) {
return d.x;
})
.attr("y", function (d) {
return d.y;
});
});
</script>
</div>
""")
}
def help() = {
displayHTML("""
<p>
Produces a force-directed graph given a collection of edges of the following form:</br>
<tt><font color="#a71d5d">case class</font> <font color="#795da3">Edge</font>(<font color="#ed6a43">src</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">dest</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">count</font>: <font color="#a71d5d">Long</font>)</tt>
</p>
<p>Usage:<br/>
<tt><font color="#a71d5d">import</font> <font color="#ed6a43">d3._</font></tt><br/>
<tt><font color="#795da3">graphs.force</font>(</br>
<font color="#ed6a43">height</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">width</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">clicks</font>: <font color="#795da3">Dataset</font>[<font color="#795da3">Edge</font>])</tt>
</p>""")
}
}
graphs.force(
height = 800,
width = 1200,
clicks = toplabel2FiltE.edges.as[Edge],
vertices = toplabel2FiltE.vertices.select($"id".as("name"),$"pagerank".as("importance")).as[Node]
)
// COMMAND ----------
toplabel3.vertices.orderBy(desc("pagerank")).show(100,false)
// COMMAND ----------
val toplabel3Filt = toplabel3.filterVertices($"pagerank" >=3.160183413696083).filterEdges($"count">4*18).dropIsolatedVertices()
// COMMAND ----------
val toplabel3FiltE = toplabel3Filt.filterEdges($"count">50).dropIsolatedVertices()
// COMMAND ----------
// We use a package object so that we can define top level classes like Edge that need to be used in other cells
// This was modified by <NAME> to make sure it is compatible the latest databricks notebook
import org.apache.spark.sql._
//import com.databricks.backend.daemon.driver.EnhancedRDDFunctions.displayHTML
case class Edge(src: String, dst: String, count: Long)
case class Node(name: String,importance: Double)
case class Link(source: Int, target: Int, value: Long)
case class Graph(nodes: Seq[Node], links: Seq[Link])
object graphs {
// val sqlContext = SQLContext.getOrCreate(org.apache.spark.SparkContext.getOrCreate()) /// fix
val sqlContext = SparkSession.builder().getOrCreate().sqlContext
import sqlContext.implicits._
def force(vertices: Dataset[Node],clicks: Dataset[Edge], height: Int = 100, width: Int = 960): Unit = {
val data = clicks.collect()
val nodes = vertices.collect()
val links = data.map { t =>
Link(nodes.indexWhere(_.name == t.src.replaceAll("_", " ")), nodes.indexWhere(_.name == t.dst.replaceAll("_", " ")), t.count / 20 + 1)
}
showGraph(height, width, Seq(Graph(nodes, links)).toDF().toJSON.first())
}
/**
* Displays a force directed graph using d3
* input: {"nodes": [{"name": "..."}], "links": [{"source": 1, "target": 2, "value": 0}]}
*/
def showGraph(height: Int, width: Int, graph: String): Unit = {
displayHTML(s"""
<style>
.node_circle {
stroke: #777;
stroke-width: 1.3px;
}
.node_label {
pointer-events: none;
}
.link {
stroke: #777;
stroke-opacity: .2;
}
.node_count {
stroke: #777;
stroke-width: 1.0px;
fill: #999;
}
text.legend {
font-family: Verdana;
font-size: 13px;
fill: #000;
}
.node text {
font-family: "Helvetica Neue","Helvetica","Arial",sans-serif;
font-size: function(d) {return (d.importance)+ "px"};
font-weight: 200;
}
</style>
<div id="clicks-graph">
<script src="//d3js.org/d3.v3.min.js"></script>
<script>
var graph = $graph;
var width = $width,
height = $height;
var color = d3.scale.category20();
var force = d3.layout.force()
.charge(-200)
.linkDistance(300)
.size([width, height]);
var svg = d3.select("#clicks-graph").append("svg")
.attr("width", width)
.attr("height", height);
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value)/3; });
var node = svg.selectAll(".node")
.data(graph.nodes)
.enter().append("g")
.attr("class", "node")
.call(force.drag);
node.append("circle")
.attr("r", function(d) { return (Math.sqrt(d.importance)*30)/Math.sqrt(98.7695771886648); })
.style("fill", function (d) {
if (d.name.startsWith("other")) { return color(1); } else { return color(2); };
})
node.append("text")
.attr("dx", function(d) { return (Math.sqrt(d.importance)*30)/Math.sqrt(26.343032735543023); })
.attr("dy", ".35em")
.text(function(d) { return d.name });
//Now we are giving the SVGs co-ordinates - the force layout is generating the co-ordinates which this code is using to update the attributes of the SVG elements
force.on("tick", function () {
link.attr("x1", function (d) {
return d.source.x;
})
.attr("y1", function (d) {
return d.source.y;
})
.attr("x2", function (d) {
return d.target.x;
})
.attr("y2", function (d) {
return d.target.y;
});
d3.selectAll("circle").attr("cx", function (d) {
return d.x;
})
.attr("cy", function (d) {
return d.y;
});
d3.selectAll("text").attr("x", function (d) {
return d.x;
})
.attr("y", function (d) {
return d.y;
});
});
</script>
</div>
""")
}
def help() = {
displayHTML("""
<p>
Produces a force-directed graph given a collection of edges of the following form:</br>
<tt><font color="#a71d5d">case class</font> <font color="#795da3">Edge</font>(<font color="#ed6a43">src</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">dest</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">count</font>: <font color="#a71d5d">Long</font>)</tt>
</p>
<p>Usage:<br/>
<tt><font color="#a71d5d">import</font> <font color="#ed6a43">d3._</font></tt><br/>
<tt><font color="#795da3">graphs.force</font>(</br>
<font color="#ed6a43">height</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">width</font> = <font color="#795da3">500</font>,<br/>
<font color="#ed6a43">clicks</font>: <font color="#795da3">Dataset</font>[<font color="#795da3">Edge</font>])</tt>
</p>""")
}
}
graphs.force(
height = 800,
width = 1200,
clicks = toplabel3FiltE.edges.as[Edge],
vertices = toplabel3FiltE.vertices.select($"id".as("name"),$"pagerank".as("importance")).as[Node]
)
// COMMAND ----------
|
lamastex/spark-trend-calculus-examples
|
notebooks/db/FX1M.scala
|
// Databricks notebook source
// MAGIC %md
// MAGIC # Historical Yahoo! Finance data
// MAGIC
// MAGIC <NAME>, <NAME> and <NAME>
// MAGIC
// MAGIC 2020, Uppsala, Sweden
// MAGIC
// MAGIC This project was supported by Combient Mix AB through summer internships at:
// MAGIC
// MAGIC Combient Competence Centre for Data Engineering Sciences,
// MAGIC Department of Mathematics,
// MAGIC Uppsala University, Uppsala, Sweden
// MAGIC
// MAGIC ## Resources
// MAGIC
// MAGIC This builds on the following repository:
// MAGIC
// MAGIC - https://github.com/philipperemy/FX-1-Minute-Data
// COMMAND ----------
// MAGIC %md
// MAGIC The [Trend Calculus library](https://github.com/lamastex/spark-trend-calculus) is needed for case classes and parsers for the data.
// COMMAND ----------
import org.lamastex.spark.trendcalculus._
// COMMAND ----------
val filePathRoot = "s3a://osint-gdelt-reado/findata/com/histdata/free/FX-1-Minute-Data/"
// COMMAND ----------
display(dbutils.fs.ls(filePathRoot))
// COMMAND ----------
val oilPath = filePathRoot + "bcousd/*.csv.gz"
val oilDS = spark.read.fx1m(oilPath).orderBy($"time")
// COMMAND ----------
display(oilDS)
|
lamastex/spark-trend-calculus-examples
|
notebooks/db/Overview.scala
|
<reponame>lamastex/spark-trend-calculus-examples
// Databricks notebook source
// MAGIC %md
// MAGIC # Trend Calculus of OIL Price
// MAGIC
// MAGIC <NAME>, <NAME> and <NAME>
// MAGIC
// MAGIC 2020, Uppsala, Sweden
// MAGIC
// MAGIC This work was inspired by <NAME>'s texata-2017 repository forked here:
// MAGIC
// MAGIC - https://github.com/lamastex/spark-texata-2020/
// MAGIC
// MAGIC and <NAME>'s Trend Calculus Library extended and adapted for Spark structured streams here:
// MAGIC
// MAGIC - https://github.com/lamastex/spark-trend-calculus
// MAGIC
// MAGIC
// MAGIC This project was supported by Combient Mix AB through summer internships at:
// MAGIC
// MAGIC Combient Competence Centre for Data Engineering Sciences,
// MAGIC Department of Mathematics,
// MAGIC Uppsala University, Uppsala, Sweden
// COMMAND ----------
// MAGIC %md
// MAGIC # Notebooks
// MAGIC
// MAGIC - [Showcasing Trend Calculus](notebooks/db/01trend-calculus-showcase.md)
// MAGIC - [Streaming Trend Calculus](notebooks/db/02streamable-trend-calculus.md)
// MAGIC - [Markov Model](notebooks/db/03streamable-trend-calculus-estimators.md)
|
lamastex/spark-trend-calculus-examples
|
notebooks/db/spark-gdelt-examples/gdelt-EOI-detection.scala
|
<reponame>lamastex/spark-trend-calculus-examples<filename>notebooks/db/spark-gdelt-examples/gdelt-EOI-detection.scala
// Databricks notebook source
// MAGIC %md
// MAGIC # Detecting Events of Interest to OIL/GAS Price Trends
// MAGIC
// MAGIC <NAME>, <NAME> and <NAME>
// MAGIC
// MAGIC 2020, Uppsala, Sweden
// MAGIC
// MAGIC
// MAGIC This project was supported by Combient Mix AB through summer internships at:
// MAGIC
// MAGIC Combient Competence Centre for Data Engineering Sciences,
// MAGIC Department of Mathematics,
// MAGIC Uppsala University, Uppsala, Sweden
// MAGIC
// MAGIC See Example notebooks to detect events and persons or entities of interest
// MAGIC
// MAGIC - [notebooks/db/gdelt-EOI-detection](notebooks/db/gdelt-EOI-detection.md)
// MAGIC - [notebooks/db/gdelt-POI-detection](notebooks/db/gdelt-POI-detection.md)
// MAGIC
// MAGIC # Resources
// MAGIC
// MAGIC This builds on the following libraries and its antecedents therein:
// MAGIC
// MAGIC - [https://github.com/aamend/spark-gdelt](https://github.com/aamend/spark-gdelt)
// MAGIC - [https://github.com/lamastex/spark-trend-calculus](https://github.com/lamastex/spark-trend-calculus)
// MAGIC
// MAGIC
// MAGIC ## This work was inspired by:
// MAGIC
// MAGIC - <NAME>'s [texata-2017](https://github.com/aamend/texata-r2-2017)
// MAGIC - <NAME>'s [Trend Calculus Library](https://github.com/ByteSumoLtd/TrendCalculus-lua)
// COMMAND ----------
import spark.implicits._
import io.delta.tables._
import com.aamend.spark.gdelt._
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.functions._
import org.apache.spark.sql.expressions._
import java.sql.Date
import org.apache.spark.sql.functions._
import org.lamastex.spark.trendcalculus._
import org.apache.spark.sql.functions.to_date
import java.sql.Date
import java.sql.Timestamp
import java.text.SimpleDateFormat
// COMMAND ----------
// MAGIC %fs
// MAGIC ls s3a://osint-gdelt-reado/GDELT/del/bronze/normdailycountry
// COMMAND ----------
val gkg_v1 = spark.read.format("delta").load("s3a://osint-gdelt-reado/GDELT/del/bronze/v1/gkg").as[GKGEventV1]
val eve_v1 = spark.read.format("delta").load("s3a://osint-gdelt-reado/GDELT/del/bronze/v1/events").as[EventV1]
// COMMAND ----------
val gkg_v1_filt = gkg_v1.filter($"publishDate">"2013-04-01 00:00:00" && $"publishDate"<"2019-12-31 00:00:00")
val oil_gas_themeGKG = gkg_v1_filt.filter(c =>c.themes.contains("ENV_GAS") || c.themes.contains("ENV_OIL"))
.select(explode($"eventIds"))
.toDF("eventId")
.groupBy($"eventId")
.agg(count($"eventId"))
.toDF("eventId","count")
// COMMAND ----------
val oil_gas_eventDF = eve_v1.toDF()
.join( oil_gas_themeGKG, "eventId")
oil_gas_eventDF.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_event_v1")
// COMMAND ----------
val oil_gas_eventDF = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_event_v1")
// COMMAND ----------
def movingAverage(df:DataFrame,size:Int,avgOn:String):DataFrame = {
val windowSpec = Window.partitionBy($"country").orderBy($"date").rowsBetween(-size/2, size/2)
return df.withColumn("coverage",avg(avgOn).over(windowSpec))
}
val oilEventTemp = oil_gas_eventDF.filter(length(col("eventGeo.countryCode")) > 0)
.groupBy(
col("eventGeo.countryCode").as("country"),
col("eventDay").as("date")
)
.agg(
sum(col("numArticles")).as("articles"),
avg(col("goldstein")).as("goldstein")
)
val (mean_articles, std_articles) = oilEventTemp.select(mean("articles"), stddev("articles"))
.as[(Double, Double)]
.first()
val oilEventWeeklyCoverage = movingAverage(oilEventTemp.withColumn("normArticles", ($"articles"-mean_articles) /std_articles) ,7,"normArticles")
oilEventWeeklyCoverage.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_cov_norm")
// COMMAND ----------
val oilEventWeeklyCoverage = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_cov_norm/")
// COMMAND ----------
val oilEventWeeklyCoverageC = oilEventWeeklyCoverage.drop($"goldstein").drop($"normArticles").drop($"articles").toDF("country","tempDate","coverage")
val oilEventCoverageDF = oilEventWeeklyCoverageC.join(oil_gas_eventDF,oil_gas_eventDF("eventDay") === oilEventWeeklyCoverageC("tempDate") && oil_gas_eventDF("eventGeo.countryCode")
=== oilEventWeeklyCoverageC("country"))
oilEventCoverageDF.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_eve_cov/")
// COMMAND ----------
val oil_gas_cov_norm = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_cov_norm/")
// COMMAND ----------
oil_gas_cov_norm
// COMMAND ----------
// MAGIC %md
// MAGIC # Lets look at 2018
// COMMAND ----------
display(oil_gas_cov_norm.filter($"date" >"2018-01-01" && $"date"<"2018-12-31").orderBy(desc("coverage")).limit(1000))
// COMMAND ----------
display(oil_gas_cov_norm.filter($"date" >"2018-01-01" && $"date"<"2018-12-31" && $"country" =!="US").orderBy(desc("coverage")).limit(1000))
// COMMAND ----------
//USA has so much more coverage than the rest
display(oil_gas_cov_norm.filter($"date" >"2018-01-01" && $"date"<"2018-12-31" && $"country" ==="US").orderBy(desc("coverage")).limit(1000))
// COMMAND ----------
val normData = spark.read.format("delta").load("s3a://osint-gdelt-reado/GDELT/del/bronze/normdailycountry/").as[EventNormDailyByCountry]
// COMMAND ----------
// MAGIC %md
// MAGIC # enrich data with trendCalculus
// COMMAND ----------
val oilData2018 = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/johannes/streamable-trend-calculus/oilData2018").withColumn("ticker",lit("oil")).select($"ticker",$"x",$"y").as[TickerPoint]
val trend_oil_2018 = new TrendCalculus2(oilData2018,2,spark).nReversalsJoinedWithMaxRev(10)
trend_oil_2018.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/trend_oil_2018")
// COMMAND ----------
val oil_data_all = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/johannes/streamable-trend-calculus/oilDataAll").withColumn("ticker",lit("oil")).select($"ticker",$"x",$"y").as[TickerPoint]
val trend_oil_all = new TrendCalculus2(oil_data_all,2,spark).nReversalsJoinedWithMaxRev(15)
trend_oil_all.write.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/trend_oil_all")
// COMMAND ----------
// MAGIC %md
// MAGIC # Python
// COMMAND ----------
// MAGIC %python
// MAGIC
// MAGIC from plotly.offline import plot
// MAGIC from plotly.graph_objs import *
// MAGIC from datetime import *
// MAGIC from pyspark.sql import functions as F
// MAGIC import pyspark.sql.functions
// MAGIC from pyspark.sql.functions import col, avg
// COMMAND ----------
// MAGIC %python
// MAGIC trend_oil_all = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/trend_oil_all")
// MAGIC
// MAGIC oil_gas_cov_us_2015_2018 = spark.read.parquet("s3a://osint-gdelt-reado/canwrite/summerinterns2020/albert/texata/oil_gas_cov_norm/").select(F.col('date'),F.col('country'),F.col('coverage')).filter(F.col('country') == 'US').drop('country').filter(F.col('date')>'2015-01-01').filter(F.col('date')<'2018-12-31')
// MAGIC
// MAGIC trend_oil_2015_2018 = trend_oil_all.filter(F.col('x')>'2015-01-01').filter(F.col('x')<'2018-12-31').orderBy(F.col('x'))
// MAGIC
// MAGIC max_price = trend_oil_2015_2018.agg({'y': 'max'}).first()[0]
// MAGIC min_price = trend_oil_2015_2018.agg({'y': 'min'}).first()[0]
// MAGIC trend_oil_2015_2018_2 =trend_oil_2015_2018.withColumn('sy', (F.col('y')-min_price)/(max_price-min_price))
// MAGIC
// MAGIC fullTS = trend_oil_2015_2018_2.filter("maxRev > 2").select("x","sy","maxRev").collect()
// MAGIC coverage =oil_gas_cov_us_2015_2018.collect()
// MAGIC
// MAGIC TS = [row for row in fullTS]
// COMMAND ----------
// MAGIC %python
// MAGIC oil_gas_cov_us_2015_2018.count()
// COMMAND ----------
// MAGIC %md
// MAGIC # 2015 - 2018
// COMMAND ----------
// MAGIC %python
// MAGIC numReversals = 15
// MAGIC startReversal = 7
// MAGIC
// MAGIC allData = {'x': [row['x'] for row in TS], 'y': [row['sy'] for row in TS], 'maxRev': [row['maxRev'] for row in TS]}
// MAGIC allDataCov = {'x': [row['date'] for row in us_coverage], 'y': [row['coverage'] for row in us_coverage]}
// MAGIC
// MAGIC temp2 = max(allDataCov['y'])-min(allDataCov['y'])
// MAGIC standardCoverage = list(map(lambda x: (x-min(allDataCov['y']))/temp2,allDataCov['y']))
// MAGIC
// MAGIC revTS = [row for row in TS if row[2] >= startReversal]
// MAGIC colorList = ['rgba(' + str(tmp) + ',' + str(255-tmp) + ',' + str(255-tmp) + ',1)' for tmp in [int(i*255/(numReversals-startReversal+1)) for i in range(1,numReversals-startReversal+2)]]
// MAGIC
// MAGIC def getRevTS(tsWithRevMax, revMax):
// MAGIC x = [row[0] for row in tsWithRevMax if row[2] >= revMax]
// MAGIC y = [row[1] for row in tsWithRevMax if row[2] >= revMax]
// MAGIC return x,y,revMax
// MAGIC
// MAGIC reducedData = [getRevTS(revTS, i) for i in range(startReversal, numReversals+1)]
// MAGIC
// MAGIC markerPlots = [Scattergl(x=x, y=y, mode='markers', marker=dict(color=colorList[i-startReversal], size=i), name='Reversal ' + str(i)) for (x,y,i) in [getRevTS(revTS, i) for i in range(startReversal, numReversals+1)]]
// COMMAND ----------
// MAGIC %python
// MAGIC p = plot(
// MAGIC [Scattergl(x=allData['x'], y=allData['y'], mode='lines', name='Oil Price'),Scattergl(x=allDataCov['x'], y=standardCoverage, mode='lines', name='Oil and gas coverage usa ')] + markerPlots
// MAGIC ,
// MAGIC output_type='div'
// MAGIC )
// MAGIC displayHTML(p)
|
atomicbits/scramlgen
|
modules/scraml-raml-parser/src/main/scala/io/atomicbits/scraml/ramlparser/parser/KeyedList.scala
|
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.ramlparser.parser
import play.api.libs.json.{JsArray, JsObject, JsString, Json}
/**
* Created by peter on 26/02/16.
*/
object KeyedList {
/**
* A keyed list is of the form (in yaml):
*
* * traits:
* * - secured: !include traits/secured.raml
* * - rateLimited: !include traits/rate-limited.raml
*
* And its is parsed to JSON as:
*
* * "traits": [
* * {
* * "secured": {
* * "!include": "traits/secured.raml"
* * }
* * },
* * {
* * "rateLimited": {
* * "!include": "traits/rate-limited.raml"
* * }
* * }
* * ]
*
* toJsObject 'flattens' this to:
*
* * "traits":
* * {
* * "secured": {
* * "!include": "traits/secured.raml"
* * },
* * "rateLimited": {
* * "!include": "traits/rate-limited.raml"
* * }
* * }
*
* @param keyedList
* @return
*/
def toJsObject(keyedList: JsArray): JsObject = {
val collected: scala.collection.IndexedSeq[JsObject] =
keyedList.value.collect {
case jsObj: JsObject => jsObj
case JsString(value) => Json.obj() + (value -> Json.obj())
}
collected.foldLeft(Json.obj()) {
case (aggr, js) => aggr ++ js
}
}
}
|
atomicbits/scramlgen
|
modules/scraml-dsl-scala/src/main/scala/io/atomicbits/scraml/dsl/scalaplay/client/ClientConfig.scala
|
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.dsl.scalaplay.client
import java.nio.charset.Charset
/**
* Created by peter on 24/08/15.
*
* Time is in ms.
*/
case class ClientConfig(requestTimeout: Int = 60 * 1000,
maxRequestRetry: Int = 5,
connectTimeout: Int = 5 * 1000,
connectionTTL: Int = -1,
readTimeout: Int = 60 * 1000,
webSocketTimeout: Int = 15 * 60 * 1000,
maxConnections: Int = -1,
maxConnectionsPerHost: Int = -1,
allowPoolingConnections: Boolean = true,
allowPoolingSslConnections: Boolean = true,
pooledConnectionIdleTimeout: Int = 60 * 1000,
useInsecureTrustManager: Boolean = false,
followRedirect: Boolean = false,
maxRedirects: Int = 5,
strict302Handling: Boolean = false,
responseCharset: Charset = Charset.defaultCharset(),
requestCharset: Charset = Charset.defaultCharset())
|
atomicbits/scramlgen
|
modules/scraml-raml-parser/src/main/scala/io/atomicbits/scraml/ramlparser/model/parsedtypes/ParsedGenericObject.scala
|
<reponame>atomicbits/scramlgen
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.ramlparser.model.parsedtypes
import io.atomicbits.scraml.ramlparser.model._
import io.atomicbits.scraml.ramlparser.parser.{ ParseContext, RamlParseException }
import io.atomicbits.scraml.util.TryUtils
import play.api.libs.json.{ JsObject, JsString, JsValue }
import scala.util.{ Failure, Success, Try }
/**
* Created by peter on 1/04/16.
*/
/**
* Created by peter on 16/09/15.
*
* Generic Object elements have a 'genericType' field:
*
* | {
* | "$schema": "http://json-schema.org/draft-03/schema",
* | "id": "http://atomicbits.io/schema/paged-list.json#",
* | "type": "object",
* | "typeVariables": ["T", "U"],
* | "description": "A paged list with an optional owner of the list",
* | "properties": {
* | "count": {
* | "type": "integer",
* | "required": true
* | },
* | "elements": {
* | "required": true,
* | "type": "array",
* | "items": {
* | "type": "object",
* | "genericType": "T"
* | }
* | },
* | "owner": {
* | "required": false,
* | "type": "object",
* | "genericType": "U"
* | }
* | }
* | }
*
*
*/
case class ParsedGenericObject(id: Id,
typeVariable: String,
required: Option[Boolean] = None,
fragments: Fragments = Fragments(),
model: TypeModel = RamlModel)
extends Fragmented
with AllowedAsObjectField
with NonPrimitiveType {
override def updated(updatedId: Id): ParsedGenericObject = copy(id = updatedId)
override def asTypeModel(typeModel: TypeModel): ParsedType = copy(model = typeModel)
}
object ParsedGenericObject {
val value = "genericType"
def apply(json: JsValue)(implicit parseContext: ParseContext): Try[ParsedGenericObject] = {
val model: TypeModel = TypeModel(json)
// Process the id
val id: Id = JsonSchemaIdExtractor(json)
// Process the required field
val required = (json \ "required").asOpt[Boolean]
val fragments = json match {
case Fragments(fragment) => fragment
}
val genericType = (json \ "genericType")
.asOpt[String]
.map(Success(_))
.getOrElse(Failure[String](RamlParseException(s"A generic object must have a 'genericType' field: $id")))
TryUtils.withSuccess(
Success(id),
genericType,
Success(required),
fragments,
Success(model)
)(ParsedGenericObject(_, _, _, _, _))
}
def unapply(json: JsValue)(implicit parseContext: ParseContext): Option[Try[ParsedGenericObject]] = {
(ParsedType.typeDeclaration(json), (json \ "properties").toOption, (json \ "genericType").toOption) match {
case (Some(JsString(ParsedObject.value)), _, Some(JsString(genT))) => Some(ParsedGenericObject(json))
case (None, Some(jsObj), Some(JsString(genT))) => Some(ParsedGenericObject(json))
case _ => None
}
}
}
|
atomicbits/scramlgen
|
modules/scraml-raml-parser/src/test/scala/io/atomicbits/scraml/ramlparser/model/FetchReplaceStringsTest.scala
|
<filename>modules/scraml-raml-parser/src/test/scala/io/atomicbits/scraml/ramlparser/model/FetchReplaceStringsTest.scala
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.ramlparser.model
import org.scalatest.GivenWhenThen
import org.scalatest.featurespec.AnyFeatureSpec
import org.scalatest.matchers.should.Matchers._
/**
* Created by peter on 14/06/17.
*/
class FetchReplaceStringsTest extends AnyFeatureSpec with GivenWhenThen {
Feature("Fetch the trait and resource type replace strings from their definition") {
Scenario("fetch the replace strings form a trait or resource type definition") {
Given("a trait or resource type definition")
val definition = "Return <<resourcePathName>> that have their <<queryParamName | !singularize>> matching the given value"
When("we fetch the replace strings from the definition")
val replaceStrings: Seq[ReplaceString] = ModelMergeTestImpl.fetchReplaceStrings(definition)
Then("we get the expected replace string")
replaceStrings shouldBe Seq(
ReplaceString(toReplace = "<<resourcePathName>>", matchString = "resourcePathName", operations = Seq(), partial = true),
ReplaceString(toReplace = "<<queryParamName | !singularize>>",
matchString = "queryParamName",
operations = Seq(Singularize),
partial = true)
)
}
}
}
object ModelMergeTestImpl extends ModelMerge {}
|
atomicbits/scramlgen
|
modules/scraml-generator/src/main/scala/io/atomicbits/scraml/generator/platform/javajackson/InterfaceGenerator.scala
|
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.generator.platform.javajackson
import io.atomicbits.scraml.generator.codegen.GenerationAggr
import io.atomicbits.scraml.generator.platform.SourceGenerator
import io.atomicbits.scraml.generator.typemodel._
import io.atomicbits.scraml.generator.platform.Platform._
import io.atomicbits.scraml.ramlparser.model.canonicaltypes.CanonicalName
import io.atomicbits.scraml.ramlparser.parser.SourceFile
/**
* Created by peter on 1/03/17.
*/
case class InterfaceGenerator(javaJackson: CommonJavaJacksonPlatform) extends SourceGenerator with PojoGeneratorSupport {
implicit val platform: CommonJavaJacksonPlatform = javaJackson
def generate(generationAggr: GenerationAggr, toInterfaceDefinition: TransferObjectInterfaceDefinition): GenerationAggr = {
val toClassDefinition = toInterfaceDefinition.origin
val originalToCanonicalName = toClassDefinition.reference.canonicalName
val parentNames: List[CanonicalName] = generationAggr.allParents(originalToCanonicalName)
val initialTosWithInterface: Seq[TransferObjectClassDefinition] = Seq(toInterfaceDefinition.origin)
val ownFields: Seq[Field] = toInterfaceDefinition.origin.fields
val interfacesAndFieldsAggr = (initialTosWithInterface, ownFields)
val fields: Seq[Field] =
parentNames.foldLeft(toInterfaceDefinition.origin.fields) { (collectedFields, parentName) =>
val parentDefinition: TransferObjectClassDefinition =
generationAggr.toMap.getOrElse(parentName, sys.error(s"Expected to find $parentName in the generation aggregate."))
collectedFields ++ parentDefinition.fields
}
val interfacesToImplement =
generationAggr
.directParents(originalToCanonicalName)
.foldLeft(Seq.empty[TransferObjectClassDefinition]) { (intsToImpl, parentName) =>
val parentDefinition =
generationAggr.toMap.getOrElse(parentName, sys.error(s"Expected to find $parentName in the generation aggregate."))
intsToImpl :+ parentDefinition
}
.map(TransferObjectInterfaceDefinition(_, toInterfaceDefinition.discriminator))
val isTopLevelInterface = !generationAggr.hasParents(originalToCanonicalName)
val childrenToSerialize =
if (isTopLevelInterface) {
generationAggr
.allChildren(originalToCanonicalName)
.map(child => generationAggr.toMap.getOrElse(child, sys.error(s"Expected to find $child in the generation aggregate.")))
} else {
List.empty[TransferObjectClassDefinition]
}
val (recursiveExtendedParents, allFields) =
parentNames.foldLeft(interfacesAndFieldsAggr) { (aggr, parentName) =>
val (interfaces, fields) = aggr
val parentDefinition: TransferObjectClassDefinition =
generationAggr.toMap.getOrElse(parentName, sys.error(s"Expected to find $parentName in the generation aggregate."))
val withParentFields = fields ++ parentDefinition.fields
val withParentInterface = interfaces :+ parentDefinition
(withParentInterface, withParentFields)
}
val discriminator: String =
(toClassDefinition.typeDiscriminator +: recursiveExtendedParents.map(_.typeDiscriminator)).flatten.headOption
.getOrElse(PojoGenerator(platform).defaultDiscriminator)
val jsonTypeInfo: Option[JsonTypeInfo] =
if (generationAggr.isInHierarchy(originalToCanonicalName)) {
Some(JsonTypeInfo(discriminator = discriminator, discriminatorValue = toClassDefinition.actualTypeDiscriminatorValue))
} else {
None
}
generateInterface(
originalToCanonicalName,
interfacesToImplement.toList,
childrenToSerialize,
allFields,
jsonTypeInfo.map(_.discriminator),
toInterfaceDefinition.classReference,
toClassDefinition,
jsonTypeInfo,
generationAggr
)
}
private def generateInterface(originalToCanonicalName: CanonicalName,
interfacesToImplement: List[TransferObjectInterfaceDefinition],
childrenToSerialize: List[TransferObjectClassDefinition],
fieldsToGenerate: Seq[Field],
skipFieldName: Option[String],
interfaceClassReference: ClassReference,
interfaceClassDefinition: TransferObjectClassDefinition,
jsonTypeInfo: Option[JsonTypeInfo],
generationAggr: GenerationAggr): GenerationAggr = {
val childrenToSerialize = compileChildrenToSerialize(originalToCanonicalName, interfaceClassDefinition, generationAggr)
val importPointers: Seq[ClassPointer] = {
fieldsToGenerate.map(_.classPointer) ++ childrenToSerialize.map(_.classReference) ++
interfacesToImplement.map(_.origin.reference.classPointer)
}
val imports: Set[String] = platform.importStatements(interfaceClassReference, importPointers.toSet)
val jsonTypeAnnotations = generateJsonTypeAnnotations(childrenToSerialize, jsonTypeInfo)
val source =
s"""
package ${interfaceClassReference.packageName};
import com.fasterxml.jackson.annotation.*;
${imports.mkString("\n")};
$jsonTypeAnnotations
${generateInterfaceSource(interfaceClassReference, interfacesToImplement, fieldsToGenerate, skipFieldName)}
"""
val sourceFile =
SourceFile(
filePath = interfaceClassReference.toFilePath,
content = source
)
generationAggr.addSourceFile(sourceFile)
}
private def generateInterfaceSource(toClassReference: ClassReference,
interfacesToImplement: List[TransferObjectInterfaceDefinition],
fieldsToGenerate: Seq[Field],
skipFieldName: Option[String] = None): String = {
val selectedFields =
skipFieldName map { skipField =>
fieldsToGenerate.filterNot(_.fieldName == skipField)
} getOrElse fieldsToGenerate
val sortedFields = selectedFields.sortBy(_.safeFieldName) // In Java Pojo's, we sort by field name!
val getterAndSetters = sortedFields map {
case fieldRep @ Field(fieldName, classPointer, required) =>
val fieldNameCap = fieldRep.safeFieldName.capitalize
s"""
public ${classPointer.classDefinition} get$fieldNameCap();
public void set$fieldNameCap(${classPointer.classDefinition} ${fieldRep.safeFieldName});
"""
}
val implementsInterfaces = interfacesToImplement.map(classToImpl => s"${classToImpl.origin.reference.classDefinition}")
val implementsStatement =
if (implementsInterfaces.nonEmpty) implementsInterfaces.mkString("implements ", ",", "")
else ""
val fieldDeclarations = sortedFields.map(_.fieldDeclaration)
s"""
public interface ${toClassReference.classDefinition} $implementsStatement {
${getterAndSetters.mkString("\n")}
}
"""
}
}
|
atomicbits/scramlgen
|
modules/scraml-raml-parser/src/main/scala/io/atomicbits/scraml/ramlparser/lookup/transformers/FallbackTransformer.scala
|
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.ramlparser.lookup.transformers
import io.atomicbits.scraml.ramlparser.lookup.{ CanonicalLookupHelper, CanonicalNameGenerator }
import io.atomicbits.scraml.ramlparser.model.canonicaltypes.{ CanonicalName, StringType, TypeReference }
import io.atomicbits.scraml.ramlparser.model.parsedtypes.{ ParsedString, ParsedType }
/**
* Created by peter on 6/07/17.
*
* This transformer transforms any parsed type to a string type.
*/
object FallbackTransformer {
// format: off
def unapply(parsedTypeContext: ParsedTypeContext)
(implicit canonicalNameGenerator: CanonicalNameGenerator): Option[(TypeReference, CanonicalLookupHelper)] = { // format: on
val parsed: ParsedType = parsedTypeContext.parsedType
val canonicalLookupHelper: CanonicalLookupHelper = parsedTypeContext.canonicalLookupHelper
parsed match {
case _ => Some((StringType, canonicalLookupHelper))
}
}
}
|
atomicbits/scramlgen
|
modules/scraml-generator/src/test/scala/io/atomicbits/scraml/generator/WithEnumGeneratorTest.scala
|
/*
*
* (C) Copyright 2018 Atomic BITS (http://atomicbits.io).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* <NAME>
*
*/
package io.atomicbits.scraml.generator
import io.atomicbits.scraml.generator.codegen.GenerationAggr
import io.atomicbits.scraml.generator.platform.Platform
import io.atomicbits.scraml.generator.platform.scalaplay.ScalaPlay
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import org.scalatest.featurespec.AnyFeatureSpec
import org.scalatest.matchers.should.Matchers._
class WithEnumGeneratorTest extends AnyFeatureSpec with GivenWhenThen with BeforeAndAfterAll with ScalaFutures {
import io.atomicbits.scraml.generator.platform.Platform._
implicit val platform: ScalaPlay.type = ScalaPlay
Feature("The scraml generator generates DSL classes suited for enums") {
Scenario("test generated Scala DSL") {
Given("a RAML specification")
val apiResourceUrl = this.getClass.getClassLoader.getResource("withenum/EnumApi.raml")
When("we generate the RAMl specification into class representations")
implicit val platform: Platform = ScalaPlay(List("io", "atomicbits"))
val generationAggr: GenerationAggr =
ScramlGenerator
.buildGenerationAggr(
ramlApiPath = apiResourceUrl.toString,
apiClassName = "EnumApi",
platform
)
.generate
Then("we should get valid class representations")
val generatedClasses = generationAggr.sourceDefinitionsProcessed.map(_.classReference.fullyQualifiedName).toSet
val expectedClasses = Set(
"io.atomicbits.EnumApi",
"io.atomicbits.rest.RestResource",
"io.atomicbits.rest.withenum.WithEnumResource",
"io.atomicbits.schema.WithEnum",
"io.atomicbits.schema.WithEnumMethod"
)
generatedClasses -- expectedClasses shouldBe Set.empty
expectedClasses -- generatedClasses shouldBe Set.empty
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.