code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*******************************************************************************
* This is part of ltlfo2mon (https://github.com/jckuester/ltlfo2mon).
*
* Copyright (c) 2013 by Jan-Christoph Kuester <kuester@sdf.org>
*
* Ltlfo2mon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Ltlfo2mon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ltlfo2mon. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package ltlfo2mon
import datatype._
object Conf {
var index: Int = 1
/**
* First-order structure
*/
var struct = new Structure()
/**
* U-operators
*/
struct.addUoperator("v")
struct.addUoperator("w")
/**
* Constants and functions
*/
struct.addConst("pi", 3.14)
struct.addFunct("add", (args: Vector[Any]) => args(0).toString.toInt + args(1).toString.toInt)
struct.addFunct("mul", (args: Vector[Any]) => args(0).toString.toInt * args(1).toString.toInt)
struct.addFunct("sub", (args: Vector[Any]) => args(0).toString.toInt - args(1).toString.toInt)
/**
* I-operators
*/
struct.addIoperator("top", (args: Vector[Any]) => true)
struct.addIoperator("bot", (args: Vector[Any]) => false)
struct.addIoperator("leq", (args: Vector[Any]) => args(0).toString.toInt <= args(1).toString.toInt)
struct.addIoperator("eq", (args: Vector[Any]) => args(0).toString.toInt == args(1).toString.toInt)
struct.addIoperator("even", (args: Vector[Any]) => args(0).toString.toInt % 2 == 0)
struct.addIoperator("odd", (args: Vector[Any]) => args(0).toString.toInt % 2 != 0)
struct.addIoperator("div4", (args:Vector[Any]) => args(0).toString.toInt % 4 == 0)
struct.addIoperator("regex", (args:Vector[Any]) => args(0).toString.matches(args(1).toString))
// for RV'13 experiments
struct.addIoperator("m", (args:Vector[Any]) => if((Conf.index+77) % (args(0).toString.toInt+80) == 0) true else false)
struct.addIoperator("n", (args:Vector[Any]) => if((Conf.index+21) % (args(0).toString.toInt+80) == 0) true else false)
struct.addIoperator("o", (args:Vector[Any]) => if((Conf.index+7) % (args(0).toString.toInt+80) == 0) true else false)
struct.addIoperator("p", (args: Vector[Any]) => false, isRigid = true) // rigid false
struct.addIoperator("q", (args: Vector[Any]) => args(0).toString.toInt % 2 == 0) // even
struct.addIoperator("r", (args:Vector[Any]) => if(Conf.index % (args(0).toString.toInt+1) == 0) true else false) // r(x): r becomes true max. 0-x worlds later
struct.addIoperator("s", (args:Vector[Any]) => if(Conf.index % 20 == 0) true else false)
struct.addIoperator("t", (args:Vector[Any]) => if(Conf.index % (args(0).toString.toInt+args(1).toString.toInt+1) == 0) true else false)
struct.addIoperator("u", (args:Vector[Any]) => true, isRigid = true) // rigid true
/*
* print options
*/
var verbose = false
var verbose2 = false
val path = "/tmp/"
/*
* trace parameter
*/
var numOfTraces = 1
var traceLength = 1000
var eventSize = 5
/*
* formulae
*/
var formulae: Array[String] = Array(
// Absence: p is false, globally
//"G A x:w.!p(x)",
// Absence: p is false, after q
//"G ((E x:w.q(x)) -> G A y:w.!p(y))"
// Absence: p is false, between q and r
//"G A x:w.(s(x) && !r(x) && F r(x)) -> (!p(x) U r(x))"
//"G A x:w.s(x) -> F (q(x) && F r(x))"
//"G A x:w.F s(x) && G A x:w.F r(x)"
// Universality: p is true, between q and r
//"G A x:w.(s(x) && !r(x) && F r(x)) -> (u(x) U r(x))",
// Response: o responds to m, between n and r
//"G A x:w.(n(x) && !r(x) && F r(x)) -> ((m(x) -> (!r(x) U (o(x) && !r(x)))) U r(x))",
//"G A x:w.q(x) -> ((!u(x) && !r(x)) U (r(x) || ((u(x) && !r(x)) U (r(x) || ((!u(x) && !r(x)) U (r(x) || ((u(x) && !r(x)) U (r(x) || (!u(x) W r(x)) || G u(x)))))))))"
//"G A x:w.q(x) -> ((!u(x) && !r(x)) U (r(x) || ((u(x) && !r(x)) U (r(x) || ((!u(x) && !r(x)) U (r(x) || ((u(x) && !r(x)) U (r(x) || (!u(x) W r(x)) || G u(x)))))))))",
//"G A x:w.(q(x) && !p(x)) -> (!p(x) U (E y:w.t(x,y) && !p(x)))"
/*
//"G A x:w.!p1(x)" // 1
G ((E x:w.q1(x)) -> G A y:w.!p1(y)), // 2
G ((A x:w.q1(x)) -> G A y:w.!p1(y)), // 3
G A x:w.((q1(x) && (!r1(x)) && F r1(x)) -> ((!p1(x)) U r1(x))), // 4
G ((A x:w.q1(x) && (!r2(x)))-> (A y:w.!p1(y)) W E z:w.r2(z)), // 5
G A x:w.((!q2(x)) || F (q2(x) && F p2(x))), // 6
G A x:w.((q4(x) && (! r4(x)) && F r4(x)) -> (p4(x) U r4(x))), // 7
G A x:w.(q9(x) U r9(x)), // 8
G A x:w.((q8(x) && (! r8(x)) && F r8(x)) -> ((p8(x) -> ((! r8(x)) U (s8(x) && (! r8(x))))) U r8(x))), // 9
G A x:w.((q8(x) && (! r8(x))) -> ((p8(x) -> ((! r8(x)) U (s8(x) && (!r8(x))))) W r8(x))), // 10
G A x:w.(v(x) U A y:w.(v(y) U r6(x,y))), // 11
G A x:w.A y:w.A z:w.(v(x,y,z) U r6(x,y,z)), // 12
G A x:w.A y:w.A z:w.X v(x,y,z), // 13
G (A x:w.((q1(x) && !r3(x)) -> ((!r3(x)) U (E y:w.p3(x,y) && !r3(x))))), // 14
G A x:w.(e(x) -> (((!v(x)) && (!r(x))) U (r(x) || ((v(x) && (!r(x))) U
(r(x) || (((!v(x)) && (!r(x))) U (r(x) || ((v(x) && (!r(x))) U
(r(x) || ((!v(x)) W r(x)) || G v(x)))))))))), // 15
G A x:w.A y:w.A z:w.((q4(x,y,z) && (! r4(x,y,z)) && F r4(x,y,z)) -> (p4(x,y,z) U r4(x,y,z))), // takes too long // 16
G A x:w.((q1(x) && (! r3(x))) -> (! r3(x)) W (p2(x) && (! r3(x)))), // 17
G A x:w.((q1(x) && (! r3(x))) -> (! r3(x)) U (p2(x) && (! r3(x)))) // 18
*/
)
/*
* traces to monitor
*/
var traces: Array[String] = Array(
//"{w(12),w(10),w(9),w(9)},{w(10),w(5),w(6),w(10),w(11),w(4),w(8)},{w(8),w(10),w(14),w(13),w(6)}"
)
} | jckuester/ltlfo2mon | src/main/scala/ltlfo2mon/Conf.scala | Scala | gpl-3.0 | 6,134 |
package com.rasterfoundry.api.utils
import com.rasterfoundry.notification.intercom.Model._
import com.typesafe.config.ConfigFactory
trait Config {
val config = ConfigFactory.load()
private val httpConfig = config.getConfig("http")
private val auth0Config = config.getConfig("auth0")
private val clientConfig = config.getConfig("client")
private val intercomConfig = config.getConfig("intercom")
private val rollbarConfig = config.getConfig("rollbar")
private val s3Config = config.getConfig("s3")
private val tileServerConfig = config.getConfig("tileServer")
private val dropboxConfig = config.getConfig("dropbox")
private val sentinel2Config = config.getConfig("sentinel2")
private val groundworkConfig = config.getConfig("groundwork")
val httpHost = httpConfig.getString("interface")
val httpPort = httpConfig.getInt("port")
val auth0Domain = auth0Config.getString("domain")
val auth0Bearer = auth0Config.getString("bearer")
val auth0ClientId = auth0Config.getString("clientId")
val auth0ManagementClientId = auth0Config.getString("managementClientId")
val auth0ManagementSecret = auth0Config.getString("managementSecret")
val auth0GroundworkConnectionName =
auth0Config.getString("groundworkConnectionName")
val auth0AnonymizedConnectionName =
auth0Config.getString("anonymizedUserCreateConnectionName")
val auth0AnonymizedConnectionId =
auth0Config.getString("anonymizedUserCreateConnectionId")
val auth0AnonymizedConnectionAltName =
auth0Config.getString("anonymizedUserCreateConnectionAltName")
val auth0AnonymizedConnectionAltId =
auth0Config.getString("anonymizedUserCreateConnectionAltId")
val clientEnvironment = clientConfig.getString("clientEnvironment")
val intercomAppId = intercomConfig.getString("appId")
val intercomToken = IntercomToken(intercomConfig.getString("token"))
val intercomAdminId = AdminId(intercomConfig.getString("adminId"))
val groundworkUrlBase = intercomConfig.getString("groundworkUrlBase")
val rollbarClientToken = rollbarConfig.getString("clientToken")
val region = s3Config.getString("region")
val dataBucket = s3Config.getString("dataBucket")
val thumbnailBucket = s3Config.getString("thumbnailBucket")
val tileServerLocation = tileServerConfig.getString("location")
val dropboxClientId = dropboxConfig.getString("appKey")
val scopedUploadRoleArn = s3Config.getString("scopedUploadRoleArn")
val sentinel2DatasourceId = sentinel2Config.getString("datasourceId")
val groundworkSampleProject = groundworkConfig.getString("sampleProject")
}
| raster-foundry/raster-foundry | app-backend/api/src/main/scala/utils/Config.scala | Scala | apache-2.0 | 2,585 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.notifications.v1m0
import connectors.AmlsNotificationConnector
import models.notifications.ContactType._
import models.notifications.{ContactType, IDType, NotificationDetails, NotificationRow}
import org.joda.time.{DateTime, DateTimeZone}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import play.api.i18n.MessagesApi
import play.api.inject.bind
import play.api.inject.guice.GuiceInjectorBuilder
import play.api.test.Helpers._
import services.{CustomAttributeProvider, NotificationService}
import uk.gov.hmrc.http.HeaderCarrier
import utils.AmlsSpec
import scala.concurrent.Future
class MessageDetailsSpec extends AmlsSpec with MockitoSugar {
implicit val hc = HeaderCarrier()
trait Fixture {
val accountTypeId = ("org", "id")
val amlsNotificationConnector = mock[AmlsNotificationConnector]
val injector = new GuiceInjectorBuilder()
.overrides(bind[AmlsNotificationConnector].to(amlsNotificationConnector))
.bindings(bind[MessagesApi].to(messagesApi)).build()
val service = injector.instanceOf[NotificationService]
val testNotifications = NotificationRow(
status = None,
contactType = None,
contactNumber = None,
variation = false,
receivedAt = new DateTime(2017, 12, 1, 1, 3, DateTimeZone.UTC),
false,
"XJML00000200000",
"1",
IDType("132456")
)
val dateTime = new DateTime(1479730062573L, DateTimeZone.UTC)
val testList = Seq(
testNotifications.copy(contactType = Some(ApplicationApproval), receivedAt = new DateTime(1981, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(variation = true, receivedAt = new DateTime(1976, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(RenewalApproval), receivedAt = new DateTime(2016, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(RejectionReasons), receivedAt = new DateTime(2001, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications,
testNotifications.copy(contactType = Some(RevocationReasons), receivedAt = new DateTime(1998, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(AutoExpiryOfRegistration), receivedAt = new DateTime(2017, 11, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(ReminderToPayForApplication), receivedAt = new DateTime(2012, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(ReminderToPayForVariation), receivedAt = new DateTime(2017, 12, 1, 3, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(ReminderToPayForRenewal), receivedAt = new DateTime(2017, 12, 3, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(ReminderToPayForManualCharges), receivedAt = new DateTime(2007, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(RenewalReminder), receivedAt = new DateTime(1991, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(MindedToReject), receivedAt = new DateTime(1971, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(MindedToRevoke), receivedAt = new DateTime(2017, 10, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(NoLongerMindedToReject), receivedAt = new DateTime(2003, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(NoLongerMindedToRevoke), receivedAt = new DateTime(2002, 12, 1, 1, 3, DateTimeZone.UTC)),
testNotifications.copy(contactType = Some(Others), receivedAt = new DateTime(2017, 12, 1, 1, 3, DateTimeZone.UTC))
)
}
val messageWithAmountRefNumberAndStatus = "parameter1-1234|parameter2-ABC1234|Status-04-Approved"
val messageWithDateAndRefNumber = "parameter1-31/07/2018|parameter2-ABC1234"
val messageWithDate = "parameter1-31/07/2018"
"The Notification Service" must {
"templateVersion = v1m0" when {
"return static message details" when {
"contact type is auto-rejected for failure to pay" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ContactType.ApplicationAutorejectionForFailureToPay),
None,
None,
true,
dateTime
))))
val result = await(service.getMessageDetails(
"thing",
"thing",
ContactType.ApplicationAutorejectionForFailureToPay,
"v1m0",
accountTypeId
))
result.get.messageText.get mustBe (
"""<p>Your application to be supervised by HM Revenue and Customs (HMRC) under The Money Laundering, Terrorist Financing and Transfer of Funds (Information on the Payer) Regulations 2017 has failed.</p>""" +
"""<p>As you’ve not paid the full fees due, your application has automatically expired.</p>""" +
"""<p>You need to be registered with a <a href="https://www.gov.uk/guidance/money-laundering-regulations-who-needs-to-register">supervisory body</a>""" +
""" if Money Laundering Regulations apply to your business. If you’re not supervised you may be subject to penalties and criminal charges.</p>""" +
"""<p>If you still need to be registered with HMRC you should submit a new application immediately. You can apply from your account """ +
"""<a href="""" +
controllers.routes.StatusController.get() +
"""">status page</a>.</p>"""
)
}
"contact type is registration variation approval" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ContactType.RegistrationVariationApproval),
None,
None,
true,
dateTime
))))
val result = await(service.getMessageDetails(
"thing",
"thing",
ContactType.RegistrationVariationApproval,
"v1m0",
accountTypeId
))
result.get.messageText.get mustBe (
"""<p>The recent changes made to your details have been approved.</p>""" +
"""<p>You can find details of your registration on your <a href="""" +
controllers.routes.StatusController.get() +
"""">status page</a>.</p>"""
)
}
"contact type is DeRegistrationEffectiveDateChange" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ContactType.DeRegistrationEffectiveDateChange),
None,
None,
true,
dateTime
))))
val result = await(service.getMessageDetails(
"thing",
"thing",
ContactType.DeRegistrationEffectiveDateChange,
"v1m0",
accountTypeId
))
result.get.messageText.get mustBe (
"""<p>The date your anti-money laundering supervision ended has been changed.</p>""" +
"""<p>You can see the new effective date on your <a href="""" +
controllers.routes.StatusController.get() +
"""">status page</a>.</p>"""
)
}
}
"return correct message content" when {
"contact type is ReminderToPayForVariation" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ReminderToPayForVariation),
None,
Some(messageWithAmountRefNumberAndStatus),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ReminderToPayForVariation, "v1m0", accountTypeId))
result.get.messageText.get mustBe ("<p>You need to pay £1234.00 for the recent changes made to your details.</p><p>Your payment reference is: ABC1234.</p><p>Find details of how to pay on your online account home page.</p><p>It can take time for some payments to clear, so if you’ve already paid you can ignore this message.</p>")
}
"contact type is ReminderToPayForApplication" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ReminderToPayForApplication),
None,
Some(messageWithAmountRefNumberAndStatus),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ReminderToPayForApplication, "v1m0", accountTypeId))
result.get.messageText.get mustBe ("<p>You need to pay £1234.00 for your application to register with HM Revenue and Customs.</p><p>Your payment reference is: ABC1234.</p><p>Find details of how to pay on your online account home page.</p><p>It can take time for some payments to clear, so if you’ve already paid you can ignore this message.</p>")
}
"contact type is ReminderToPayForRenewal" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ReminderToPayForRenewal),
None,
Some(messageWithAmountRefNumberAndStatus),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ReminderToPayForRenewal, "v1m0", accountTypeId))
result.get.messageText.get mustBe ("<p>You need to pay £1234.00 for your annual renewal.</p><p>Your payment reference is: ABC1234.</p><p>Find details of how to pay on your online account home page.</p><p>It can take time for some payments to clear, so if you’ve already paid you can ignore this message.</p>")
}
"contact type is ReminderToPayForManualCharges" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ReminderToPayForManualCharges),
None,
Some(messageWithAmountRefNumberAndStatus),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ReminderToPayForManualCharges, "v1m0", accountTypeId))
result.get.messageText.get mustBe ("<p>You need to pay £1234.00 for the recent charge added to your account.</p><p>Your payment reference is: ABC1234.</p><p>Find details of how to pay on your online account home page.</p><p>It can take time for some payments to clear, so if you’ve already paid you can ignore this message.</p>")
}
"contact type is ApplicationApproval" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(ApplicationApproval),
None,
Some(messageWithDateAndRefNumber),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ApplicationApproval, "v1m0", accountTypeId))
result.get.messageText.get mustBe (s"<p>Your application to register has been approved. You’re now registered until 2018-07-31.</p><p>Your anti-money laundering registration number is: ABC1234.</p><p>You can find details of your registration on your <a href=${"\\"" + controllers.routes.StatusController.get().url + "\\""}>status page</a>.</p>")
}
"contact type is RenewalApproval" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(RenewalApproval),
None,
Some(messageWithDate),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.RenewalApproval, "v1m0", accountTypeId))
result.get.messageText.get mustBe (s"<p>Your renewal has been approved. You’re now registered for supervision until 2018-07-31.</p><p>You can find details of your registration on your <a href=${"\\"" + controllers.routes.StatusController.get().url + "\\""}>status page</a>.</p>")
}
"contact type is AutoExpiryOfRegistration" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(AutoExpiryOfRegistration),
None,
Some(messageWithDate),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.AutoExpiryOfRegistration, "v1m0", accountTypeId))
result.get.messageText.get mustBe (s"<p>Your registration to be supervised by HM Revenue and Customs (HMRC) under The Money Laundering, Terrorist Financing and Transfer of Funds (Information on the Payer) Regulations 2017 expired on 2018-07-31.</p><p>You need to be registered with a <a href=${"\\""}https://www.gov.uk/guidance/money-laundering-regulations-who-needs-to-register${"\\""}>supervisory body</a> if Money Laundering Regulations apply to your business. If you’re not supervised you may be subject to penalties and criminal charges.</p><p>If you still need to be registered with HMRC you should submit a new application immediately. You can apply from your <a href=${"\\"" + controllers.routes.StatusController.get().url + "\\""}>status page</a>.</p>")
}
"contact type is RenewalReminder" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(RenewalReminder),
None,
Some(messageWithDate),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.RenewalReminder, "v1m0", accountTypeId))
result.get.messageText.get mustBe (s"<p>You need to renew your registration before 2018-07-31.</p><p>You can renew from your <a href=${"\\"" + controllers.routes.StatusController.get().url + "\\""}>status page</a>.</p><p>If you don’t renew and pay your fees before this date your registration will expire and you won’t be supervised by HM Revenue and Customs.</p>")
}
"content message is ETMP markdown" in new Fixture {
val message = "<P># Test Heading</P><P>* bullet 1</P><P>* bullet 2</P><P>* bullet 3</P>"
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(Some(NotificationDetails(
Some(MindedToReject),
None,
Some(message),
true,
dateTime
))))
val result = await(service.getMessageDetails("regNo", "id", ContactType.MindedToRevoke, "v1m0", accountTypeId))
result.get.messageText.get mustBe CustomAttributeProvider.commonMark(message)
}
}
"return None" when {
"getMessageDetails returns None and message is of type with end date only message" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(None))
val result = await(service.getMessageDetails("regNo", "id", ContactType.RenewalReminder, "v1m0", accountTypeId))
result mustBe None
}
"getMessageDetails returns None and message is of type with end date and ref number message" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(None))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ApplicationApproval, "v1m0", accountTypeId))
result mustBe None
}
"getMessageDetails returns None and message is of type with ref number, amount and status message" in new Fixture {
when(amlsNotificationConnector.getMessageDetailsByAmlsRegNo(any(), any(), any())(any(), any()))
.thenReturn(Future.successful(None))
val result = await(service.getMessageDetails("regNo", "id", ContactType.ReminderToPayForVariation, "v1m0", accountTypeId))
result mustBe None
}
}
}
}
}
| hmrc/amls-frontend | test/services/notifications/v1m0/MessageDetailsSpec.scala | Scala | apache-2.0 | 18,042 |
/*
* macroRules.scala
*/
package at.logic.gapt.proofs.lk
import at.logic.gapt.expr._
import at.logic.gapt.expr.hol.{ isPrenex, instantiate, HOLPosition }
import at.logic.gapt.proofs.HOLSequent
import at.logic.gapt.proofs.expansionTrees._
import at.logic.gapt.proofs.lk.base._
import at.logic.gapt.proofs.occurrences._
import at.logic.gapt.utils.ds.trees._
import at.logic.gapt.utils.logging.Logger
import scala.collection.mutable.ListBuffer
trait MacroRuleLogger extends Logger {
override def loggerName = "MacroRuleLogger"
}
object AndLeftRule {
/**
* <pre>Replaces a formulas A, B (marked by term1oc & term2oc) with the conjunction
* A ∧ B in the antecedent of a sequent.
*
* The rule:
* (rest of s1)
* sL, A, B |- sR
* ------------------- (AndLeft)
* sL, A ∧ B |- sR
* </pre>
*
* @param s1 The top proof with (sL, A, B |- sR) as the bottommost sequent.
* @param term1oc The occurrence of A in the antecedent of s1.
* @param term2oc The occurrence of B in the antecedent of s2.
* @return An LK Proof ending with the new inference.
*/
def apply( s1: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence ) = {
val p0 = AndLeft1Rule( s1, term1oc, term2oc.formula.asInstanceOf[HOLFormula] )
val p1 = AndLeft2Rule( p0, term1oc.formula.asInstanceOf[HOLFormula], p0.getDescendantInLowerSequent( term2oc ).get )
ContractionLeftRule( p1, p1.prin.head, p1.getDescendantInLowerSequent( p0.prin.head ).get )
}
/**
* <pre>Replaces a formulas term1, term2 with the conjunction
* term1 ∧ term2 in the antecedent of a sequent.
*
* The rule:
* (rest of s1)
* sL, term1, term2 |- sR
* ---------------------- (AndLeft)
* sL, term1 ∧ term2 |- sR
* </pre>
*
* @param s1 The top proof with (sL, term1, term2 |- sR) as the bottommost sequent.
* @param term1 The first formula to be replaced in the antecedent of s1.
* @param term2 The second formula to be replaced in the antecedent of s2.
* @return An LK Proof ending with the new inference.
*/
def apply( s1: LKProof, term1: HOLFormula, term2: HOLFormula ): UnaryTree[OccSequent] with UnaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
val x1 = s1.root.antecedent.find( _.formula == term1 )
if ( x1 == None )
throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
val x2 = s1.root.antecedent.find( x => x.formula == term2 && x != x1.get )
if ( x2 == None )
throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
apply( s1, x1.get, x2.get )
}
}
object OrRightRule {
/**
* <pre>Replaces a formulas A, B (marked by term1oc & term2oc) with the disjunction
* A ∨ B in the succedent of a sequent.
*
* The rule:
* (rest of s1)
* sL|- sR, A, B
* ------------------- (OrRight)
* sL |- sR, A ∨ B
* </pre>
*
* @param s1 The top proof with (sL |- sR, A, B) as the bottommost sequent.
* @param term1oc The occurrence of A in the succedent of s1.
* @param term2oc The occurrence of B in the succedent of s2.
* @return An LK Proof ending with the new inference.
*/
def apply( s1: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence ) = {
val p0 = OrRight1Rule( s1, term1oc, term2oc.formula )
val p1 = OrRight2Rule( p0, term1oc.formula, p0.getDescendantInLowerSequent( term2oc ).get )
ContractionRightRule( p1, p1.prin.head, p1.getDescendantInLowerSequent( p0.prin.head ).get )
}
/**
* <pre>Replaces a formulas term1, term2 with the disjunction
* term1 ∨ term2 in the succedent of a sequent.
*
* The rule:
* (rest of s1)
* sL |- sR, term1, term2
* ---------------------- (OrRight)
* sL |- sR, term1 ∨ term2
* </pre>
*
* @param s1 The top proof with (sL |- sR, term1, term2) as the bottommost sequent.
* @param term1 The first formula to be replaced in the succedent of s1.
* @param term2 The second formula to be replaced in the succedent of s2.
* @return An LK Proof ending with the new inference.
*/
def apply( s1: LKProof, term1: HOLFormula, term2: HOLFormula ): UnaryTree[OccSequent] with UnaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
val x1 = s1.root.succedent.find( _.formula == term1 )
if ( x1 == None )
throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
val x2 = s1.root.succedent.find( x => x.formula == term2 && x != x1.get )
if ( x2 == None )
throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
apply( s1, x1.get, x2.get )
}
}
object TransRule {
/**
* <pre>Performs a proof employing transitivity.
*
* Takes a proof s2 with end-sequent of the form
* (x=z), Trans, ... |- ...
* and return one with end-sequent of the form
* (x=y), (y=z), Trans, ... |- ...
* where Trans is defined as Forall xyz.((x=y ∧ y=z) -> x=z)
* </pre>
* @param x X
* @param y Y
* @param z Z
* @param s2 The proof which contains the (x=z) which is to be shown.
* @return A proof wich s2 as a subtree and the formula (x=z) replaced by (x=y) and (y=z).
*/
def apply( x: FOLTerm, y: FOLTerm, z: FOLTerm, s2: LKProof ): LKProof = {
val xv = FOLVar( "x" )
val yv = FOLVar( "y" )
val zv = FOLVar( "z" )
//Forall xyz.(x = y ^ y = z -> x = z)
val Trans = All( xv, All( yv, All( zv, Imp( And( Eq( xv, yv ), Eq( yv, zv ) ), Eq( xv, zv ) ) ) ) )
def TransX( x: FOLTerm ) = All( yv, All( zv, Imp( And( Eq( x, yv ), Eq( yv, zv ) ), Eq( x, zv ) ) ) )
def TransXY( x: FOLTerm, y: FOLTerm ) = All( zv, Imp( And( Eq( x, y ), Eq( y, zv ) ), Eq( x, zv ) ) )
def TransXYZ( x: FOLTerm, y: FOLTerm, z: FOLTerm ) = Imp( And( Eq( x, y ), Eq( y, z ) ), Eq( x, z ) )
val xy = Eq( x, y )
val yz = Eq( y, z )
val xz = Eq( x, z )
val ax_xy = Axiom( xy :: Nil, xy :: Nil )
val ax_yz = Axiom( yz :: Nil, yz :: Nil )
val s1 = AndRightRule( ax_xy, ax_yz, xy, yz )
val imp = ImpLeftRule( s1, s2, And( xy, yz ), xz )
val allQZ = ForallLeftRule( imp, TransXYZ( x, y, z ), TransXY( x, y ), z )
val allQYZ = ForallLeftRule( allQZ, TransXY( x, y ), TransX( x ), y )
val allQXYZ = ForallLeftRule( allQYZ, TransX( x ), Trans, x )
ContractionLeftRule( allQXYZ, Trans )
}
}
object ExistsRightBlock {
/**
* <pre>Applies the ExistsRight-rule n times.
* This method expects a formula main with
* a quantifier block, and a proof s1 which has a fully
* instantiated version of main on the left side of its
* bottommost sequent.
*
* The rule:
* (rest of s1)
* sL |- A[x1\term1,...,xN\termN], sR
* ---------------------------------- (ExistsRight x n)
* sL |- Exists x1,..,xN.A, sR
* </pre>
*
* @param s1 The top proof with (sL |- A[x1\term1,...,xN\termN], sR) as the bottommost sequent.
* @param main A formula of the form (Exist x1,...,xN.A).
* @param terms The list of terms with which to instantiate main. The caller of this
* method has to ensure the correctness of these terms, and, specifically, that
* A[x1\term1,...,xN\termN] indeed occurs at the bottom of the proof s1.
*/
def apply( s1: LKProof, main: HOLFormula, terms: Seq[LambdaExpression] ): LKProof = {
val partiallyInstantiatedMains = ( 0 to terms.length ).toList.reverse.map( n => instantiate( main, terms.take( n ) ) ).toList
//partiallyInstantiatedMains.foreach(println)
val series = terms.reverse.foldLeft( ( s1, partiallyInstantiatedMains ) ) { ( acc, ai ) =>
/*println("MACRORULES|FORALLLEFTBLOCK|APPLYING FORALLEFT")
println("s1: " + acc._1)
println("aux: " + acc._2.head)
println("main: " + acc._2.tail.head)
println("term: " + ai)*/
( ExistsRightRule( acc._1, acc._2.head, acc._2.tail.head, ai ), acc._2.tail )
}
series._1
}
}
object ForallLeftBlock {
/**
* <pre>Applies the ForallLeft-rule n times.
* This method expects a formula main with
* a quantifier block, and a proof s1 which has a fully
* instantiated version of main on the left side of its
* bottommost sequent.
*
* The rule:
* (rest of s1)
* sL, A[x1\term1,...,xN\termN] |- sR
* ---------------------------------- (ForallLeft x n)
* sL, Forall x1,..,xN.A |- sR
* </pre>
*
* @param s1 The top proof with (sL, A[x1\term1,...,xN\termN] |- sR) as the bottommost sequent.
* @param main A formula of the form (Forall x1,...,xN.A).
* @param terms The list of terms with which to instantiate main. The caller of this
* method has to ensure the correctness of these terms, and, specifically, that
* A[x1\term1,...,xN\termN] indeed occurs at the bottom of the proof s1.
*/
def apply( s1: LKProof, main: HOLFormula, terms: Seq[LambdaExpression] ): LKProof = {
val partiallyInstantiatedMains = ( 0 to terms.length ).toList.reverse.map( n => instantiate( main, terms.take( n ) ) ).toList
//partiallyInstantiatedMains.foreach(println)
val series = terms.reverse.foldLeft( ( s1, partiallyInstantiatedMains ) ) { ( acc, ai ) =>
/*println("MACRORULES|FORALLLEFTBLOCK|APPLYING FORALLEFT")
println("s1: " + acc._1)
println("aux: " + acc._2.head)
println("main: " + acc._2.tail.head)
println("term: " + ai)*/
( ForallLeftRule( acc._1, acc._2.head, acc._2.tail.head, ai ), acc._2.tail )
}
series._1
}
}
object ForallRightBlock {
/**
* <pre>Applies the ForallRight-rule n times.
* This method expects a formula main with
* a quantifier block, and a proof s1 which has a fully
* instantiated version of main on the right side of its
* bottommost sequent.
*
* The rule:
* (rest of s1)
* sL |- sR, A[x1\y1,...,xN\yN]
* ---------------------------------- (ForallRight x n)
* sL |- sR, Forall x1,..,xN.A
*
* where y1,...,yN are eigenvariables.
* </pre>
*
* @param s1 The top proof with (sL |- sR, A[x1\y1,...,xN\yN]) as the bocttommost sequent.
* @param main A formula of the form (Forall x1,...,xN.A).
* @param eigenvariables The list of eigenvariables with which to instantiate main. The caller of this
* method has to ensure the correctness of these terms, and, specifically, that
* A[x1\y1,...,xN\yN] indeed occurs at the bottom of the proof s1.
*/
def apply( s1: LKProof, main: HOLFormula, eigenvariables: Seq[Var] ): LKProof = {
val partiallyInstantiatedMains = ( 0 to eigenvariables.length ).toList.reverse.map( n => instantiate( main, eigenvariables.take( n ) ) ).toList
//partiallyInstantiatedMains.foreach(println)
val series = eigenvariables.reverse.foldLeft( ( s1, partiallyInstantiatedMains ) ) { ( acc, ai ) =>
/*println("MACRORULES|FORALLRIGHTBLOCK|APPLYING FORALLEFT")
println("s1: " + acc._1)
println("aux: " + acc._2.head)
println("main: " + acc._2.tail.head)
println("term: " + ai)*/
( ForallRightRule( acc._1, acc._2.head, acc._2.tail.head, ai ), acc._2.tail )
}
series._1
}
def unapply( p: LKProof ): Option[( LKProof, FormulaOccurrence, FormulaOccurrence, List[Var] )] = p match {
case ForallRightRule( ForallRightBlock( q, aux2, main2, eigenvars ), sequent, aux, main, eigenvar ) if main2 == aux =>
Some( ( q, aux2, main, eigenvar :: eigenvars ) )
case ForallRightRule( q, sequent, aux, main, eigenvar ) =>
Some( ( q, aux, main, List( eigenvar ) ) )
case _ => None
}
}
/**
* This macro rule unifies [[EquationLeft1Rule]] and [[EquationLeft2Rule]] by automatically choosing the appropriate rule.
*
*/
object EquationLeftRule extends EquationRuleLogger {
val nLine = sys.props( "line.separator" )
/**
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param pos A position such that A(pos) = s or A(pos) = t
* @return A proof ending with either an [[EquationLeft1Rule]] or an [[EquationLeft2Rule]] according to which replacement is sensible.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, pos: HOLPosition ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with TermPositions = {
val ( eqocc, auxocc ) = getTerms( s1.root, s2.root, term1oc, term2oc )
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
val aux = auxocc.formula
val term = aux.get( pos )
term match {
case Some( `s` ) => EquationLeft1Rule( s1, s2, term1oc, term2oc, pos )
case Some( `t` ) => EquationLeft2Rule( s1, s2, term1oc, term2oc, pos )
case Some( x ) =>
throw new LKRuleCreationException( "Wrong term " + x + " in auxiliary formula " + aux + " at position " + pos + "." )
case None =>
throw new LKRuleCreationException( "Position " + pos + " is not well-defined for formula " + aux + "." )
}
case _ =>
throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationLeft1Rule]] or an [[EquationLeft2Rule]] according to which one leads from A to A'.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with TermPositions = {
val ( eqocc, auxocc ) = getTerms( s1.root, s2.root, term1oc, term2oc )
val aux = auxocc.formula
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationLeft1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
val sAux = aux.find( s )
val sMain = main.find( s )
val tAux = aux.find( t )
val tMain = main.find( t )
if ( sAux.isEmpty && tAux.isEmpty )
throw new LKRuleCreationException( "Neither " + s + " nor " + t + " found in formula " + aux + "." )
trace( "Positions of s = " + s + " in aux = " + aux + ": " + sAux + "." )
trace( "Positions of s = " + s + " in main = " + main + ": " + sMain + "." )
trace( "Positions of t = " + t + " in aux = " + aux + ": " + tAux + "." )
trace( "Positions of t = " + t + " in main = " + main + ": " + tMain + "." )
val tToS = sMain intersect tAux
val sToT = tMain intersect sAux
trace( "tToS = " + tToS )
trace( "sToT = " + sToT )
if ( sToT.length == 1 && tToS.length == 0 ) {
val p = sToT.head
val mainNew = HOLPosition.replace( aux, p, t )
if ( mainNew == main ) {
EquationLeft1Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + t + ") should yield " + main + " but is " + mainNew + "." )
} else if ( tToS.length == 1 && sToT.length == 0 ) {
val p = tToS.head
val mainNew = HOLPosition.replace( aux, p, s )
if ( mainNew == main ) {
EquationLeft2Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + s + ") should yield " + main + " but is " + mainNew + "." )
} else throw new LKRuleCreationException( "Formulas " + aux + " and " + main + " don't differ in exactly one position." + nLine + " Eq: " + eqocc.formula )
}
case _ => throw new LKRuleCreationException( "Formula " + eq + " is not an equation." )
}
}
/**
*
* @param s1 A sequent with term1oc in the succedent.
* @param s2 A sequent with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationLeft1Rule]] or an [[EquationLeft2Rule]] according to which one leads from A to A'.
*/
def apply( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): OccSequent = {
val ( eqocc, auxocc ) = getTerms( s1, s2, term1oc, term2oc )
val aux = auxocc.formula
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationLeft1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
val sAux = aux.find( s )
val sMain = main.find( s )
val tAux = aux.find( t )
val tMain = main.find( t )
if ( sAux.isEmpty && tAux.isEmpty )
throw new LKRuleCreationException( "Neither " + s + " nor " + t + " found in formula " + aux + "." )
trace( "Positions of s = " + s + " in aux = " + aux + ": " + sAux + "." )
trace( "Positions of s = " + s + " in main = " + main + ": " + sMain + "." )
trace( "Positions of t = " + t + " in aux = " + aux + ": " + tAux + "." )
trace( "Positions of t = " + t + " in main = " + main + ": " + tMain + "." )
val tToS = sMain intersect tAux
val sToT = tMain intersect sAux
trace( "tToS = " + tToS )
trace( "sToT = " + sToT )
if ( sToT.length == 1 && tToS.length == 0 ) {
val p = sToT.head
val mainNew = HOLPosition.replace( aux, p, t )
if ( mainNew == main ) {
EquationLeft1Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + t + ") should yield " + main + " but is " + mainNew + "." )
} else if ( tToS.length == 1 && sToT.length == 0 ) {
val p = tToS.head
val mainNew = HOLPosition.replace( aux, p, s )
if ( mainNew == main ) {
EquationLeft2Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + s + ") should yield " + main + " but is " + mainNew + "." )
} else throw new LKRuleCreationException( "Formulas " + aux + " and " + main + " don't differ in exactly one position." + nLine + " Eq: " + eqocc.formula )
}
case _ => throw new LKRuleCreationException( "Formula " + eq + " is not an equation." )
}
}
/**
*
* @param s1 A sequent with term1oc in the succedent.
* @param s2 A sequent with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param pos A position such that A(pos) = s or A(pos) = t
* @return A proof ending with either an [[EquationLeft1Rule]] or an [[EquationLeft2Rule]] according to which replacement is sensible.
*/
def apply( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, pos: HOLPosition ): OccSequent = {
val ( eqocc, auxocc ) = getTerms( s1, s2, term1oc, term2oc )
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
val aux = auxocc.formula
val term = aux.get( pos )
term match {
case Some( `s` ) => EquationLeft1Rule( s1, s2, term1oc, term2oc, pos )
case Some( `t` ) => EquationLeft2Rule( s1, s2, term1oc, term2oc, pos )
case Some( x ) =>
throw new LKRuleCreationException( "Wrong term " + x + " in auxiliary formula " + aux + " at position " + pos + "." )
case None =>
throw new LKRuleCreationException( "Position " + pos + " is not well-defined for formula " + aux + "." )
}
case _ =>
throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
* This version of the rule operates on formulas instead of occurrences. It will attempt to find appropriate occurrences in the premises.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the antecedent.
* @param term1 An equation s = t.
* @param term2 A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationLeft1Rule]] or an [[EquationLeft2Rule]] according to which one leads from A to A'.
*/
def apply( s1: LKProof, s2: LKProof, term1: HOLFormula, term2: HOLFormula, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
( s1.root.succedent.filter( x => x.formula == term1 ).toList, s2.root.antecedent.filter( x => x.formula == term2 ).toList ) match {
case ( ( x :: _ ), ( y :: _ ) ) => apply( s1, s2, x, y, main )
case _ => throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
}
}
/**
* This version creates an axiom for the equation.
*
*/
def apply( s1: LKProof, term1oc: FormulaOccurrence, eq: HOLFormula, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
val leftSubproof = Axiom( eq )
apply( leftSubproof, s1, leftSubproof.root.succedent( 0 ), term1oc, main )
}
private def getTerms( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence ) = {
val term1op = s1.succedent.find( _ == term1oc )
val term2op = s2.antecedent.find( _ == term2oc )
if ( term1op == None || term2op == None ) throw new LKRuleCreationException( "Auxiliary formulas are not contained in the right part of the sequent" )
else {
val eqocc = term1op.get
val auxocc = term2op.get
( eqocc, auxocc )
}
}
}
/**
* This macro rule unifies [[EquationRight1Rule]] and [[EquationRight2Rule]] by automatically choosing the appropriate rule.
*
*/
object EquationRightRule extends EquationRuleLogger {
val nLine = sys.props( "line.separator" )
/**
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param pos A position such that A(pos) = s or A(pos) = t
* @return A proof ending with either an [[EquationRight1Rule]] or an [[EquationRight2Rule]] according to which replacement is sensible.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, pos: HOLPosition ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with TermPositions = {
val ( eqocc, auxocc ) = getTerms( s1.root, s2.root, term1oc, term2oc )
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
val aux = auxocc.formula
val term = aux.get( pos )
term match {
case Some( `s` ) => EquationRight1Rule( s1, s2, term1oc, term2oc, pos )
case Some( `t` ) => EquationRight2Rule( s1, s2, term1oc, term2oc, pos )
case Some( x ) =>
throw new LKRuleCreationException( "Wrong term " + x + " in auxiliary formula " + aux + " at position " + pos + "." )
case None =>
throw new LKRuleCreationException( "Position " + pos + " is not well-defined for formula " + aux + "." )
}
case _ =>
throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationRight1Rule]] or an [[EquationRight2Rule]] according to which one leads from A to A'.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with TermPositions = {
val ( eqocc, auxocc ) = getTerms( s1.root, s2.root, term1oc, term2oc )
val aux = auxocc.formula
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationRight1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
val sAux = aux.find( s )
val sMain = main.find( s )
val tAux = aux.find( t )
val tMain = main.find( t )
if ( sAux.isEmpty && tAux.isEmpty )
throw new LKRuleCreationException( "Neither " + s + " nor " + t + " found in formula " + aux + "." )
trace( "Positions of s = " + s + " in aux = " + aux + ": " + sAux + "." )
trace( "Positions of s = " + s + " in main = " + main + ": " + sMain + "." )
trace( "Positions of t = " + t + " in aux = " + aux + ": " + tAux + "." )
trace( "Positions of t = " + t + " in main = " + main + ": " + tMain + "." )
val tToS = sMain intersect tAux
val sToT = tMain intersect sAux
trace( "tToS = " + tToS )
trace( "sToT = " + sToT )
if ( sToT.length == 1 && tToS.length == 0 ) {
val p = sToT.head
val mainNew = HOLPosition.replace( aux, p, t )
if ( mainNew == main ) {
EquationRight1Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + t + ") should yield " + main + " but is " + mainNew + "." )
} else if ( tToS.length == 1 && sToT.length == 0 ) {
val p = tToS.head
val mainNew = HOLPosition.replace( aux, p, s )
if ( mainNew == main ) {
EquationRight2Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + s + ") should yield " + main + " but is " + mainNew + "." )
} else throw new LKRuleCreationException( "Formulas " + aux + " and " + main + " don't differ in exactly one position." + nLine + " Eq: " + eqocc.formula )
}
case _ => throw new LKRuleCreationException( "Formula " + eq + " is not an equation." )
}
}
/**
*
* @param s1 A sequent with term1oc in the succedent.
* @param s2 A sequent with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationRight1Rule]] or an [[EquationRight2Rule]] according to which one leads from A to A'.
*/
def apply( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): OccSequent = {
val ( eqocc, auxocc ) = getTerms( s1, s2, term1oc, term2oc )
val aux = auxocc.formula
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationRight1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
val sAux = aux.find( s )
val sMain = main.find( s )
val tAux = aux.find( t )
val tMain = main.find( t )
if ( sAux.isEmpty && tAux.isEmpty )
throw new LKRuleCreationException( "Neither " + s + " nor " + t + " found in formula " + aux + "." )
trace( "Positions of s = " + s + " in aux = " + aux + ": " + sAux + "." )
trace( "Positions of s = " + s + " in main = " + main + ": " + sMain + "." )
trace( "Positions of t = " + t + " in aux = " + aux + ": " + tAux + "." )
trace( "Positions of t = " + t + " in main = " + main + ": " + tMain + "." )
val tToS = sMain intersect tAux
val sToT = tMain intersect sAux
trace( "tToS = " + tToS )
trace( "sToT = " + sToT )
if ( sToT.length == 1 && tToS.length == 0 ) {
val p = sToT.head
val mainNew = HOLPosition.replace( aux, p, t )
if ( mainNew == main ) {
EquationRight1Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + t + ") should yield " + main + " but is " + mainNew + "." )
} else if ( tToS.length == 1 && sToT.length == 0 ) {
val p = tToS.head
val mainNew = HOLPosition.replace( aux, p, s )
if ( mainNew == main ) {
EquationRight2Rule( s1, s2, term1oc, term2oc, p )
} else throw new LKRuleCreationException( "Replacement (" + aux + ", " + p + ", " + s + ") should yield " + main + " but is " + mainNew + "." )
} else throw new LKRuleCreationException( "Formulas " + aux + " and " + main + " don't differ in exactly one position." + nLine + " Eq: " + eqocc.formula )
}
case _ => throw new LKRuleCreationException( "Formula " + eq + " is not an equation." )
}
}
/**
*
* @param s1 A sequent with term1oc in the succedent.
* @param s2 A sequent with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param pos A position such that A(pos) = s or A(pos) = t
* @return A proof ending with either an [[EquationRight1Rule]] or an [[EquationRight2Rule]] according to which replacement is sensible.
*/
def apply( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, pos: HOLPosition ): OccSequent = {
val ( eqocc, auxocc ) = getTerms( s1, s2, term1oc, term2oc )
val eq = eqocc.formula
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
val aux = auxocc.formula
val term = aux.get( pos )
term match {
case Some( `s` ) => EquationRight1Rule( s1, s2, term1oc, term2oc, pos )
case Some( `t` ) => EquationRight2Rule( s1, s2, term1oc, term2oc, pos )
case Some( x ) =>
throw new LKRuleCreationException( "Wrong term " + x + " in auxiliary formula " + aux + " at position " + pos + "." )
case None =>
throw new LKRuleCreationException( "Position " + pos + " is not well-defined for formula " + aux + "." )
}
case _ =>
throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
* This version of the rule operates on formulas instead of occurrences. It will attempt to find appropriate occurrences in the premises.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the succedent.
* @param term1 An equation s = t.
* @param term2 A formula A.
* @param main A formula A' such that A' is obtained by replacing one occurrence of s in A by t or vice versa.
* @return A proof ending with either an [[EquationRight1Rule]] or an [[EquationRight2Rule]] according to which one leads from A to A'.
*/
def apply( s1: LKProof, s2: LKProof, term1: HOLFormula, term2: HOLFormula, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas =
( s1.root.succedent.filter( x => x.formula == term1 ).toList, s2.root.succedent.filter( x => x.formula == term2 ).toList ) match {
case ( ( x :: _ ), ( y :: _ ) ) => apply( s1, s2, x, y, main )
case _ => throw new LKRuleCreationException( "Not matching formula occurrences found for application of the rule with the given formula" )
}
/**
* This version creates an axiom for the equation.
*
*/
def apply( s1: LKProof, term1oc: FormulaOccurrence, eq: HOLFormula, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
val leftSubproof = Axiom( eq )
apply( leftSubproof, s1, leftSubproof.root.succedent( 0 ), term1oc, main )
}
private def getTerms( s1: OccSequent, s2: OccSequent, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence ) = {
val term1op = s1.succedent.find( _ == term1oc )
val term2op = s2.succedent.find( _ == term2oc )
if ( term1op == None || term2op == None ) throw new LKRuleCreationException( "Auxiliary formulas are not contained in the right part of the sequent" )
else {
val eqocc = term1op.get
val auxocc = term2op.get
( eqocc, auxocc )
}
}
}
/**
* Macro rule that simulates several term replacements at once.
*
*/
object EquationLeftMacroRule extends EquationRuleLogger {
/**
* Allows replacements at several positions in the auxiliary formula.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param sPos List of positions of terms that should be replaced by s.
* @param tPos List of positions of terms that should be replaced by t.
* @return A new proof whose main formula is A with every p in sPos replaced by s and every p in tPos replaced by t.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, sPos: Seq[HOLPosition], tPos: Seq[HOLPosition] ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas = {
val ( eqocc, auxocc ) = ( s1.root.succedent.find( _ == term1oc ), s2.root.antecedent.find( _ == term2oc ) ) match {
case ( Some( e ), Some( a ) ) => ( e, a )
case _ => throw new LKRuleCreationException( "Auxiliary formulas not found." )
}
val ( eq, aux ) = ( eqocc.formula, auxocc.formula )
trace( "EquationLeftMacroRule called with equation " + term1oc + ", aux formula " + term2oc + ", s positions " + sPos + " and t positions " + tPos )
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
// Filter out those positions where no terms need to be replaced.
val ( sPosActive, tPosActive ) = ( sPos filter { aux.get( _ ) match { case Some( `t` ) => true; case _ => false } },
tPos filter { aux.get( _ ) match { case Some( `s` ) => true; case _ => false } } )
val n = sPosActive.length + tPosActive.length
trace( "" + n + " replacements to make." )
n match {
case 0 => throw new Exception( "This should never happen." )
case 1 =>
EquationLeftRule( s1, s2, term1oc, term2oc, ( sPosActive ++ tPosActive ).head )
case _ =>
// Initialize the proof currently being worked on and its auxiliary formula.
var currentProofR = s2
var currentAux = term2oc
// Save newly created equations in a list so we can later contract them.
val equations = new ListBuffer[FormulaOccurrence]
// Iterate over the s-positions
for ( p <- sPosActive ) aux.get( p ) match {
case Some( `s` ) => trace( "s found at s-position " + p + ", nothing to do." )
case Some( `t` ) =>
// Generate a new instance of s = t :- s = t and save the formula in the antecedent in the equations list.
val currentProofL = Axiom( List( eq ), List( eq ) )
equations += currentProofL.root.antecedent.head
val currentEq = currentProofL.root.succedent.head
// Create a subproof that replaces the term at p.
currentProofR = EquationLeftRule( currentProofL, currentProofR, currentEq, currentAux, p )
// The new auxiliary formula is the principal formula of the previous step.
currentAux = currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 )
case _ => throw new LKRuleCreationException( "Position " + p + " in formula " + aux + " does not contain term " + s + " or " + t + "." )
}
// Iterate over the t-positions. For comments see the previous loop.
for ( p <- tPosActive ) aux.get( p ) match {
case Some( `s` ) =>
val currentProofL = Axiom( List( eq ), List( eq ) )
equations += currentProofL.root.antecedent.head
val currentEq = currentProofL.root.succedent.head
currentProofR = EquationLeftRule( currentProofL, currentProofR, currentEq, currentAux, p )
currentAux = currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 )
case Some( `t` ) => trace( "t found at t-position " + p + ", nothing to do." )
case _ => throw new LKRuleCreationException( "Position " + p + " in formula " + aux + " does not contain term " + s + " or " + t + "." )
}
trace( "" + n + " replacements made." )
// Find the descendants of the saved equations in the current end sequent.
val equationDescendants = equations.toList map { currentProofR.getDescendantInLowerSequent } map { _.get }
// Contract the equations.
currentProofR = ContractionLeftMacroRule( currentProofR, equationDescendants )
// Finally, remove the remaining occurrence of s = t with a cut.
CutRule( s1, currentProofR, eqocc, currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 ) )
}
case _ => throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
* Allows replacements at several positions in the auxiliary formula.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the antecedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main The proposed main formula.
* @return A new proof with principal formula main. Eq rules will be used according to the replacements that need to be made.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas = {
val ( eqocc, auxocc ) = ( s1.root.succedent.find( _ == term1oc ), s2.root.antecedent.find( _ == term2oc ) ) match {
case ( Some( e ), Some( a ) ) => ( e, a )
case _ => throw new LKRuleCreationException( "Auxiliary formulas not found." )
}
val ( eq, aux ) = ( eqocc.formula, auxocc.formula )
trace( "EquationLeftMacroRule called with equation " + term1oc + ", aux formula " + term2oc + " and main formula " + main )
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationLeft1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
// Find all positions of s and t in aux.
val ( auxS, auxT ) = ( aux.find( s ), aux.find( t ) )
// Find all positions of s and t in main.
val ( mainS, mainT ) = ( main.find( s ), main.find( t ) )
// Find the positions where actual replacements will happen.
val ( tToS, sToT ) = ( mainS intersect auxT, mainT intersect auxS )
// Call the previous apply method.
apply( s1, s2, term1oc, term2oc, tToS, sToT )
}
case _ => throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
}
/**
* Macro rule that simulates several term replacements at once.
*
*/
object EquationRightMacroRule extends EquationRuleLogger {
/**
* Allows replacements at several positions in the auxiliary formula.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param sPos List of positions of terms that should be replaced by s.
* @param tPos List of positions of terms that should be replaced by t.
* @return A new proof whose main formula is A with every p in sPos replaced by s and every p in tPos replaced by t.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, sPos: Seq[HOLPosition], tPos: Seq[HOLPosition] ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas = {
// Detailed comments can be found in the corresponding apply method for EquationLeftBulkRule!
val ( eqocc, auxocc ) = ( s1.root.succedent.find( _ == term1oc ), s2.root.succedent.find( _ == term2oc ) ) match {
case ( Some( e ), Some( a ) ) => ( e, a )
case _ => throw new LKRuleCreationException( "Auxiliary formulas not found." )
}
val ( eq, aux ) = ( eqocc.formula, auxocc.formula )
trace( "EquationRightMacroRule called with equation " + term1oc + ", aux formula " + term2oc + ", s positions " + sPos + " and t positions " + tPos )
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
val ( sPosActive, tPosActive ) = ( sPos filter { aux.get( _ ) match { case Some( `t` ) => true; case _ => false } },
tPos filter { aux.get( _ ) match { case Some( `s` ) => true; case _ => false } } )
val n = sPosActive.length + tPosActive.length
trace( "" + n + " replacements to make." )
n match {
case 0 => throw new Exception( "This should never happen." )
case 1 =>
EquationRightRule( s1, s2, term1oc, term2oc, ( sPosActive ++ tPosActive ).head )
case _ =>
var currentProofR = s2
var currentAux = term2oc
val equations = new ListBuffer[FormulaOccurrence]
for ( p <- sPosActive ) aux.get( p ) match {
case Some( `s` ) => trace( "s found at s-position " + p + ", nothing to do." )
case Some( `t` ) =>
val currentProofL = Axiom( List( eq ), List( eq ) )
equations += currentProofL.root.antecedent.head
val currentEq = currentProofL.root.succedent.head
currentProofR = EquationRightRule( currentProofL, currentProofR, currentEq, currentAux, p )
currentAux = currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 )
case _ => throw new LKRuleCreationException( "Position " + p + " in formula " + aux + " does not contain term " + s + " or " + t + "." )
}
for ( p <- tPosActive ) aux.get( p ) match {
case Some( `s` ) =>
val currentProofL = Axiom( List( eq ), List( eq ) )
equations += currentProofL.root.antecedent.head
val currentEq = currentProofL.root.succedent.head
currentProofR = EquationRightRule( currentProofL, currentProofR, currentEq, currentAux, p )
currentAux = currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 )
case Some( `t` ) => trace( "t found at t-position " + p + ", nothing to do." )
case _ => throw new LKRuleCreationException( "Position " + p + " in formula " + aux + " does not contain term " + s + " or " + t + "." )
}
trace( "" + n + " replacements made." )
val equationDescendants = equations.toList map { currentProofR.getDescendantInLowerSequent } map { _.get }
currentProofR = ContractionLeftMacroRule( currentProofR, equationDescendants )
CutRule( s1, currentProofR, eqocc, currentProofR.asInstanceOf[PrincipalFormulas].prin( 0 ) )
}
case _ => throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
/**
* Allows replacements at several positions in the auxiliary formula.
*
* @param s1 A proof ending with term1oc in the succedent.
* @param s2 A proof ending with term2oc in the succedent.
* @param term1oc An equation s = t.
* @param term2oc A formula A.
* @param main The proposed main formula.
* @return A new proof with principal formula main. Eq rules will be used according to the replacements that need to be made.
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, main: HOLFormula ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas = {
val ( eqocc, auxocc ) = ( s1.root.succedent.find( _ == term1oc ), s2.root.succedent.find( _ == term2oc ) ) match {
case ( Some( e ), Some( a ) ) => ( e, a )
case _ => throw new LKRuleCreationException( "Auxiliary formulas not found." )
}
val ( eq, aux ) = ( eqocc.formula, auxocc.formula )
trace( "EquationRightMacroRule called with equation " + term1oc + ", aux formula " + term2oc + " and main formula " + main )
eq match {
case Eq( s, t ) =>
trace( "Eq: " + s + " = " + t + "." )
if ( s == t && aux == main ) {
debug( "Producing equation rule with trivial equation." )
EquationRight1Rule( s1, s2, term1oc, term2oc, main )
} else if ( s == t && aux != main ) {
throw new LKRuleCreationException( "Eq is trivial, but aux formula " + aux + " and main formula " + main + "differ." )
} else if ( s != t && aux == main ) {
throw new LKRuleCreationException( "Nontrivial equation, but aux and main formula are equal." )
} else {
// Find all positions of s and t in aux.
val ( auxS, auxT ) = ( aux.find( s ), aux.find( t ) )
// Find all positions of s and t in main.
val ( mainS, mainT ) = ( main.find( s ), main.find( t ) )
// Find the positions where actual replacements will happen.
val ( tToS, sToT ) = ( mainS intersect auxT, mainT intersect auxS )
// Call the previous apply method.
apply( s1, s2, term1oc, term2oc, tToS, sToT )
}
case _ => throw new LKRuleCreationException( "Formula occurrence " + eqocc + " is not an equation." )
}
}
}
/**
* This macro rule simulates a series of contractions in the antecedent.
*
*/
object ContractionLeftMacroRule extends MacroRuleLogger {
/**
*
* @param s1 A proof.
* @param occs A list of occurrences of a Formula in the antecedent of s1.
* @return A proof ending with as many contraction rules as necessary to contract occs into a single occurrence.
*/
def apply( s1: LKProof, occs: Seq[FormulaOccurrence] ): Tree[OccSequent] with LKProof = occs match {
case Nil | _ :: Nil => s1
case occ1 :: occ2 :: rest =>
rest match {
case Nil => ContractionLeftRule( s1, occ1, occ2 )
case _ =>
val subProof = ContractionLeftRule( s1, occ1, occ2 )
val occ = subProof.prin( 0 )
val restNew = rest map { subProof.getDescendantInLowerSequent }
if ( restNew.forall( _.isDefined ) )
ContractionLeftMacroRule( subProof, occ :: restNew.map( _.get ) )
else
throw new LKRuleCreationException( "Formula not found in sequent " + s1.root )
}
}
/**
* Contracts one formula in the antecedent down to n occurrences. Use with care!
*
* @param s1 A proof.
* @param form A formula.
* @param n Maximum number of occurrences of form in the antecedent of the end sequent. Defaults to 1, i.e. all occurrences are contracted.
* @return
*/
def apply( s1: LKProof, form: HOLFormula, n: Int = 1 ): Tree[OccSequent] with LKProof = {
if ( n < 1 ) throw new IllegalArgumentException( "n must be >= 1." )
val list = s1.root.antecedent.filter( _.formula == form ).drop( n - 1 )
apply( s1, list )
}
}
/**
* This macro rule simulates a series of contractions in the succedent.
*
*/
object ContractionRightMacroRule extends MacroRuleLogger {
/**
*
* @param s1 A proof.
* @param occs A list of occurrences of a Formula in the succedent of s1.
* @return A proof ending with as many contraction rules as necessary to contract occs into a single occurrence.
*/
def apply( s1: LKProof, occs: Seq[FormulaOccurrence] ): Tree[OccSequent] with LKProof = occs match {
case Nil | _ :: Nil => s1
case occ1 :: occ2 :: rest =>
rest match {
case Nil => ContractionRightRule( s1, occ1, occ2 )
case _ =>
val subProof = ContractionRightRule( s1, occ1, occ2 )
val occ = subProof.prin( 0 )
val restNew = rest map { o => subProof.getDescendantInLowerSequent( o ) }
if ( restNew.forall( o => o.isDefined ) )
ContractionRightMacroRule( subProof, occ :: restNew.map( _.get ) )
else
throw new LKRuleCreationException( "Formula not found in sequent " + s1.root )
}
}
/**
* Contracts one formula in the succedent down to n occurrences. Use with care!
*
* @param s1 A proof.
* @param form A formula.
* @param n Maximum number of occurrences of form in the succedent of the end sequent. Defaults to 1, i.e. all occurrences are contracted.
* @return
*/
def apply( s1: LKProof, form: HOLFormula, n: Int = 1 ): Tree[OccSequent] with LKProof = {
if ( n < 1 ) throw new IllegalArgumentException( "n must be >= 1." )
val list = s1.root.succedent.filter( _.formula == form ).drop( n - 1 )
apply( s1, list )
}
}
/**
* This macro rule simulates a series of contractions in both cedents.
*
*/
object ContractionMacroRule extends MacroRuleLogger {
/**
* Contracts the current proof down to a given FSequent.
*
* @param s1 An LKProof.
* @param targetSequent The target sequent.
* @param strict If true, the root of s1 must 1.) contain every formula at least as often as targetSequent
* and 2.) contain no formula that isn't contained at least once in targetSequent.
* @return s1 with its end sequent contracted down to targetSequent.
*/
def apply( s1: LKProof, targetSequent: HOLSequent, strict: Boolean = true ): Tree[OccSequent] with LKProof = {
trace( "ContractionMacroRule called with subproof " + s1 + ", target sequent " + targetSequent + ", strict = " + strict )
val currentSequent = s1.root.toHOLSequent
val targetAnt = targetSequent.antecedent
val targetSuc = targetSequent.succedent
val assertion = ( ( targetSequent isSubMultisetOf currentSequent )
&& ( currentSequent isSubsetOf targetSequent ) )
trace( "targetSequent diff currentSequent: " + targetSequent.diff( currentSequent ) )
trace( "currentSequent.distinct diff targetSequent.distinct: " + currentSequent.distinct.diff( targetSequent.distinct ) )
trace( "If called with strict this would " + { if ( assertion ) "succeed." else "fail." } )
if ( strict & !assertion ) {
throw new LKRuleCreationException( "Sequent " + targetSequent + " cannot be reached from " + currentSequent + " by contractions." )
}
val subProof = targetAnt.distinct.foldLeft( s1 )( ( acc, x ) => { trace( "Contracting formula " + x + " in antecedent." ); ContractionLeftMacroRule( acc, x, targetAnt.count( _ == x ) ) } )
targetSuc.distinct.foldLeft( subProof )( ( acc, x ) => { trace( "Contracting formula " + x + " in succedent." ); ContractionRightMacroRule( acc, x, targetSuc.count( _ == x ) ) } )
}
/**
* Performs all possible contractions. Use with care!
*
* @param s1 A proof.
* @return A proof with all duplicate formulas in the end sequent contracted.
*/
def apply( s1: LKProof ): Tree[OccSequent] with LKProof = {
val targetSequent = s1.root.toHOLSequent.distinct
apply( s1, targetSequent )
}
}
/**
* This macro rule simulates a series of weakenings in the antecedent.
*
*/
object WeakeningLeftMacroRule extends MacroRuleLogger {
/**
*
* @param s1 A Proof.
* @param list A list of Formulas.
* @return A new proof whose antecedent contains new occurrences of the formulas in list.
*/
def apply( s1: LKProof, list: Seq[HOLFormula] ): Tree[OccSequent] with LKProof =
list.foldLeft( s1 ) { ( acc, x ) => WeakeningLeftRule( acc, x ) }
/**
*
* @param s1 An LKProof.
* @param form A Formula.
* @param n A natural number.
* @return s1 extended with weakenings such that form occurs at least n times in the antecedent of the end sequent.
*/
def apply( s1: LKProof, form: HOLFormula, n: Int ): Tree[OccSequent] with LKProof = {
val nCurrent = s1.root.antecedent.count( _.formula == form )
WeakeningLeftMacroRule( s1, Seq.fill( n - nCurrent )( form ) )
}
}
/**
* This macro rule simulates a series of weakenings in the succedent.
*
*/
object WeakeningRightMacroRule extends MacroRuleLogger {
/**
*
* @param s1 A Proof.
* @param list A list of Formulas.
* @return A new proof whose succedent contains new occurrences of the formulas in list.
*/
def apply( s1: LKProof, list: Seq[HOLFormula] ): Tree[OccSequent] with LKProof =
list.foldLeft( s1 ) { ( acc, x ) => WeakeningRightRule( acc, x ) }
/**
*
* @param s1 An LKProof.
* @param form A Formula.
* @param n A natural number.
* @return s1 extended with weakenings such that form occurs at least n times in the succedent of the end sequent.
*/
def apply( s1: LKProof, form: HOLFormula, n: Int ): Tree[OccSequent] with LKProof = {
val nCurrent = s1.root.succedent.count( _.formula == form )
WeakeningRightMacroRule( s1, Seq.fill( n - nCurrent )( form ) )
}
}
/**
* This macro rule simulates a series of weakenings in both cedents.
*
*/
object WeakeningMacroRule extends MacroRuleLogger {
/**
*
* @param s1 A proof.
* @param antList A list of formulas.
* @param sucList A list of formulas.
* @return A new proof whose antecedent and succedent contain new occurrences of the formulas in antList and sucList, respectively.
*/
def apply( s1: LKProof, antList: Seq[HOLFormula], sucList: Seq[HOLFormula] ): Tree[OccSequent] with LKProof =
WeakeningRightMacroRule( WeakeningLeftMacroRule( s1, antList ), sucList )
/**
*
* @param s1 A proof.
* @param targetSequent A sequent of formulas.
* @param strict If true, will require that targetSequent contains the root of s1.
* @return A proof whose end sequent is targetSequent.
*/
def apply( s1: LKProof, targetSequent: HOLSequent, strict: Boolean = true ): Tree[OccSequent] with LKProof = {
val currentSequent = s1.root.toHOLSequent
if ( strict & !( currentSequent isSubMultisetOf targetSequent ) )
throw new LKRuleCreationException( "Sequent " + targetSequent + " cannot be reached from " + currentSequent + " by weakenings." )
val ( antDiff, sucDiff ) = ( targetSequent diff currentSequent ).toTuple
WeakeningMacroRule( s1, antDiff, sucDiff )
}
}
/**
* This macro rule simulates multiple weakenings and contractions in both cedents.
*
*/
object WeakeningContractionMacroRule extends MacroRuleLogger {
/**
*
* @param s1 An LKProof.
* @param antList List of pairs (f,n) of type (Formula, Int) that express “f should occur n times in the antecedent”.
* @param sucList List of pairs (f,n) of type (Formula, Int) that express “f should occur n times in the succedent”.
* @param strict If true: requires that for (f,n) in antList or sucList, if f occurs in the root of s1, then n > 0.
* @return
*/
def apply( s1: LKProof, antList: Seq[( HOLFormula, Int )], sucList: Seq[( HOLFormula, Int )], strict: Boolean ): Tree[OccSequent] with LKProof = {
val currentAnt = s1.root.antecedent map { _.formula }
val currentSuc = s1.root.succedent map { _.formula }
val subProof = antList.foldLeft( s1 )( ( acc, p ) => {
val ( f, n ) = p
val nCurrent = currentAnt.count( _ == f )
if ( n == 0 && nCurrent != 0 && strict )
throw new LKRuleCreationException( "Cannot erase formula occurrences." )
if ( n > nCurrent )
WeakeningLeftMacroRule( acc, f, n - nCurrent )
else if ( n == nCurrent )
acc
else // n < nCurrent
ContractionLeftMacroRule( acc, f, n )
} )
sucList.foldLeft( subProof )( ( acc, p ) => {
val ( f, n ) = p
val nCurrent = currentSuc.count( _ == f )
if ( n == 0 && nCurrent != 0 && strict )
throw new LKRuleCreationException( "Cannot erase formula occurrences." )
if ( n > nCurrent )
WeakeningRightMacroRule( acc, f, n - nCurrent )
else if ( n == nCurrent )
acc
else // n < nCurrent
ContractionRightMacroRule( acc, f, n )
} )
}
/**
*
* @param s1 An LKProof.
* @param targetSequent The proposed end sequent.
* @param strict If true, will require that the root of s1 contains no formula that doesn't appear at least once in targetSequent.
* @return s1 with its end sequent modified to targetSequent by means of weakening and contraction.
*/
def apply( s1: LKProof, targetSequent: HOLSequent, strict: Boolean = true ): Tree[OccSequent] with LKProof = {
val currentSequent = s1.root.toHOLSequent
val targetAnt = targetSequent.antecedent
val targetSuc = targetSequent.succedent
if ( strict && !( currentSequent isSubsetOf targetSequent ) )
throw new LKRuleCreationException( "Sequent " + targetSequent + " cannot be reached from " + currentSequent + " by weakenings and contractions." )
val antList = targetAnt.distinct map ( f => ( f, targetAnt.count( _ == f ) ) )
val sucList = targetSuc.distinct map ( f => ( f, targetSuc.count( _ == f ) ) )
apply( s1, antList, sucList, strict )
}
}
/**
* Computes a proof of F from a proof of some instances of F
*
*/
object proofFromInstances {
/**
*
* @param s1 An LKProof containing the instances in es in its end sequent.
* @param es An ExpansionSequent in which all shallow formulas are prenex and which contains no strong or Skolem quantifiers.
* @return A proof starting with s1 and ending with the deep sequent of es.
*/
def apply( s1: LKProof, es: ExpansionSequent ): LKProof =
( es.antecedent ++ es.succedent ).foldLeft( s1 )( apply )
/**
*
* @param s1 An LKProof containing the instances in et in its end sequent
* @param et An ExpansionTree whose shallow formula is prenex and which contains no strong or Skolem quantifiers.
* @return A proof starting with s1 and ending with the deep formula of et.
*/
def apply( s1: LKProof, et: ExpansionTree ): LKProof = apply( s1, compressQuantifiers( et ) )
/**
*
* @param s1 An LKProof containing the instances in mes in its end sequent.
* @param mes A MultiExpansionSequent in which all shallow formulas are prenex and which contains no strong or Skolem quantifiers.
* @return A proof starting with s1 and ending with the deep sequent of mes.
*/
def apply( s1: LKProof, mes: MultiExpansionSequent )( implicit dummyImplicit: DummyImplicit ): LKProof = ( mes.antecedent ++ mes.succedent ).foldLeft( s1 )( apply )
/**
*
* @param s1 An LKProof containing the instances in et in its end sequent
* @param met A MultiExpansionTree whose shallow formula is prenex and which contains no strong or Skolem quantifiers.
* @return A proof starting with s1 and ending with the deep formula of met.
*/
def apply( s1: LKProof, met: MultiExpansionTree ): LKProof = {
require( isPrenex( met.toShallow ), "Shallow formula of " + met + " is not prenex" )
met match {
case METWeakQuantifier( f @ All( _, _ ), instances ) =>
val tmp = instances.foldLeft( s1 ) {
( acc, i ) => ForallLeftBlock( acc, f, i._2 )
}
ContractionLeftMacroRule( tmp, f )
case METWeakQuantifier( f @ Ex( _, _ ), instances ) =>
val tmp = instances.foldLeft( s1 ) {
( acc, i ) => ExistsRightBlock( acc, f, i._2 )
}
ContractionRightMacroRule( tmp, f )
case METSkolemQuantifier( _, _, _ ) | METStrongQuantifier( _, _, _ ) =>
throw new UnsupportedOperationException( "This case is not handled at this time." )
case _ => s1
}
}
}
/**
* Maybe there is a better place for this?
*
*/
object applyRecursive {
/**
* Recursively applies a function f to a proof.
*
* In the case of an axiom p, the result is just f(p).
*
* In the case of a unary proof p with subproof u, this means that it recursively applies f to u, giving u', and then computes f(p(u')).
* Binary proofs work analogously.
*
* Caveat: It might mess up the ancestor relation on formula occurrences, so be careful.
*
* @param f A function of type LKProof => LKProof
* @param proof An LKProof
* @return
*/
def apply( f: LKProof => LKProof )( proof: LKProof ): LKProof = proof match {
case Axiom( _ ) => f( proof )
// Unary rules
case WeakeningLeftRule( up, _, p1 ) =>
f( WeakeningLeftRule( applyRecursive( f )( up ), p1.formula ) )
case WeakeningRightRule( up, r, p1 ) =>
f( WeakeningRightRule( applyRecursive( f )( up ), p1.formula ) )
case ContractionLeftRule( up, r, a1, _, _ ) =>
val subProof = applyRecursive( f )( up )
f( ContractionLeftRule( subProof, a1.formula ) )
case ContractionRightRule( up, r, a1, _, _ ) =>
val subProof = applyRecursive( f )( up )
f( ContractionRightRule( subProof, a1.formula ) )
case AndLeft1Rule( up, _, a, p ) =>
val subProof = applyRecursive( f )( up )
f( AndLeft1Rule( subProof, a.formula, p.formula ) )
case AndLeft2Rule( up, _, a, p ) =>
val subProof = applyRecursive( f )( up )
f( AndLeft2Rule( subProof, p.formula, a.formula ) )
case OrRight1Rule( up, r, a, p ) =>
val subProof = applyRecursive( f )( up )
f( OrRight1Rule( subProof, a.formula, p.formula ) )
case OrRight2Rule( up, r, a, p ) =>
val subProof = applyRecursive( f )( up )
f( OrRight2Rule( subProof, p.formula, a.formula ) )
case ImpRightRule( up, _, a1, a2, _ ) =>
val subProof = applyRecursive( f )( up )
f( ImpRightRule( subProof, a1.formula, a2.formula ) )
case NegLeftRule( up, _, a, _ ) =>
val subProof = applyRecursive( f )( up )
f( NegLeftRule( subProof, a.formula ) )
case NegRightRule( up, _, a, _ ) =>
val subProof = applyRecursive( f )( up )
f( NegRightRule( subProof, a.formula ) )
case ForallLeftRule( up, _, a, p, t ) =>
val subProof = applyRecursive( f )( up )
f( ForallLeftRule( subProof, a.formula, p.formula, t ) )
case ExistsRightRule( up, _, a, p, t ) =>
val subProof = applyRecursive( f )( up )
f( ExistsRightRule( subProof, a.formula, p.formula, t ) )
case ForallRightRule( up, _, a, p, v ) =>
val subProof = applyRecursive( f )( up )
f( ForallRightRule( subProof, a.formula, p.formula, v ) )
case ExistsLeftRule( up, r, a, p, v ) =>
val subProof = applyRecursive( f )( up )
f( ExistsLeftRule( subProof, a.formula, p.formula, v ) )
case DefinitionLeftRule( up, _, a, p ) =>
val subProof = applyRecursive( f )( up )
f( DefinitionLeftRule( subProof, a.formula, p.formula ) )
case DefinitionRightRule( up, _, a, p ) =>
val subProof = applyRecursive( f )( up )
f( DefinitionRightRule( subProof, a.formula, p.formula ) )
// Binary rules
case CutRule( up1, up2, _, a1, a2 ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
f( CutRule( subProof1, subProof2, a1.formula ) )
case AndRightRule( up1, up2, _, a1, a2, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
f( AndRightRule( subProof1, subProof2, a1.formula, a2.formula ) )
case OrLeftRule( up1, up2, r, a1, a2, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
f( OrLeftRule( subProof1, subProof2, a1.formula, a2.formula ) )
case ImpLeftRule( up1, up2, r, a1, a2, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
f( ImpLeftRule( subProof1, subProof2, a1.formula, a2.formula ) )
// TODO: change equation rules
case EquationLeft1Rule( up1, up2, _, a1, a2, pos, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
val ( a1New, a2New ) = ( subProof1.root.succedent.find( _ =^= a1 ), subProof2.root.antecedent.find( _ =^= a2 ) )
if ( a1New.isEmpty || a2New.isEmpty )
throw new LKRuleCreationException( "Couldn't find descendants of " + a1 + " and " + a2 + "." )
f( EquationLeft1Rule( subProof1, subProof2, a1New.get, a2New.get, pos( 0 ) ) )
case EquationLeft2Rule( up1, up2, _, a1, a2, pos, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
val ( a1New, a2New ) = ( subProof1.root.succedent.find( _ =^= a1 ), subProof2.root.antecedent.find( _ =^= a2 ) )
if ( a1New.isEmpty || a2New.isEmpty )
throw new LKRuleCreationException( "Couldn't find descendants of " + a1 + " and " + a2 + "." )
f( EquationLeft2Rule( subProof1, subProof2, a1New.get, a2New.get, pos( 0 ) ) )
case EquationRight1Rule( up1, up2, _, a1, a2, pos, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
val ( a1New, a2New ) = ( subProof1.root.succedent.find( _ =^= a1 ), subProof2.root.succedent.find( _ =^= a2 ) )
if ( a1New.isEmpty || a2New.isEmpty )
throw new LKRuleCreationException( "Couldn't find descendants of " + a1 + " and " + a2 + "." )
f( EquationRight1Rule( subProof1, subProof2, a1New.get, a2New.get, pos( 0 ) ) )
case EquationRight2Rule( up1, up2, _, a1, a2, pos, _ ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
val ( a1New, a2New ) = ( subProof1.root.succedent.find( _ =^= a1 ), subProof2.root.succedent.find( _ =^= a2 ) )
if ( a1New.isEmpty || a2New.isEmpty )
throw new LKRuleCreationException( "Couldn't find descendants of " + a1 + " and " + a2 + "." )
f( EquationRight2Rule( subProof1, subProof2, a1New.get, a2New.get, pos( 0 ) ) )
case InductionRule( up1, up2, _, a1, a2, a3, _, term ) =>
val ( subProof1, subProof2 ) = ( apply( f )( up1 ), apply( f )( up2 ) )
f( InductionRule( subProof1, subProof2, a1.formula.asInstanceOf[FOLFormula], a2.formula.asInstanceOf[FOLFormula], a3.formula.asInstanceOf[FOLFormula], term ) )
}
} | loewenheim/gapt | src/main/scala/at/logic/gapt/proofs/lk/macroRules.scala | Scala | gpl-3.0 | 69,363 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
import java.io._
import javax.inject.Inject
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, Materializer }
import javax.inject.Singleton
import play.api.http._
import play.api.i18n.I18nComponents
import play.api.inject.{ DefaultApplicationLifecycle, Injector, NewInstanceInjector, SimpleInjector }
import play.api.libs.Files._
import play.api.libs.concurrent.ActorSystemProvider
import play.api.libs.crypto._
import play.api.mvc._
import play.api.mvc.request.{ DefaultRequestFactory, RequestFactory }
import play.api.routing.Router
import play.core.j.JavaHelpers
import play.core.{ SourceMapper, WebCommands }
import play.utils._
import scala.annotation.implicitNotFound
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
/**
* A Play application.
*
* Application creation is handled by the framework engine.
*
* If you need to create an ad-hoc application,
* for example in case of unit testing, you can easily achieve this using:
* {{{
* val application = new DefaultApplication(new File("."), this.getClass.getClassloader, None, Play.Mode.Dev)
* }}}
*
* This will create an application using the current classloader.
*
*/
@implicitNotFound(msg = "You do not have an implicit Application in scope. If you want to bring the current running Application into context, please use dependency injection.")
trait Application {
/**
* The absolute path hosting this application, mainly used by the `getFile(path)` helper method
*/
def path: File
/**
* The application's classloader
*/
def classloader: ClassLoader
/**
* `Dev`, `Prod` or `Test`
*/
def mode: Mode.Mode = environment.mode
/**
* The application's environment
*/
def environment: Environment
private[play] def isDev = (mode == Mode.Dev)
private[play] def isTest = (mode == Mode.Test)
private[play] def isProd = (mode == Mode.Prod)
def configuration: Configuration
private[play] lazy val httpConfiguration = HttpConfiguration.fromConfiguration(configuration, environment)
/**
* The default ActorSystem used by the application.
*/
def actorSystem: ActorSystem
/**
* The default Materializer used by the application.
*/
implicit def materializer: Materializer
/**
* The factory used to create requests for this application.
*/
def requestFactory: RequestFactory
/**
* The HTTP request handler
*/
def requestHandler: HttpRequestHandler
/**
* The HTTP error handler
*/
def errorHandler: HttpErrorHandler
/**
* Return the application as a Java application.
*
* @see [[play.Application]]
*/
def asJava: play.Application = {
new play.DefaultApplication(this, configuration.underlying, injector.asJava)
}
/**
* Retrieves a file relative to the application root path.
*
* Note that it is up to you to manage the files in the application root path in production. By default, there will
* be nothing available in the application root path.
*
* For example, to retrieve some deployment specific data file:
* {{{
* val myDataFile = application.getFile("data/data.xml")
* }}}
*
* @param relativePath relative path of the file to fetch
* @return a file instance; it is not guaranteed that the file exists
*/
@deprecated("Use Environment#getFile instead", "2.6.0")
def getFile(relativePath: String): File = new File(path, relativePath)
/**
* Retrieves a file relative to the application root path.
* This method returns an Option[File], using None if the file was not found.
*
* Note that it is up to you to manage the files in the application root path in production. By default, there will
* be nothing available in the application root path.
*
* For example, to retrieve some deployment specific data file:
* {{{
* val myDataFile = application.getExistingFile("data/data.xml")
* }}}
*
* @param relativePath the relative path of the file to fetch
* @return an existing file
*/
@deprecated("Use Environment#getExistingFile instead", "2.6.0")
def getExistingFile(relativePath: String): Option[File] = Some(getFile(relativePath)).filter(_.exists)
/**
* Scans the application classloader to retrieve a resource.
*
* The conf directory is included on the classpath, so this may be used to look up resources, relative to the conf
* directory.
*
* For example, to retrieve the conf/logback.xml configuration file:
* {{{
* val maybeConf = application.resource("logback.xml")
* }}}
*
* @param name the absolute name of the resource (from the classpath root)
* @return the resource URL, if found
*/
@deprecated("Use Environment#resource instead", "2.6.0")
def resource(name: String): Option[java.net.URL] = {
val n = name.stripPrefix("/")
Option(classloader.getResource(n))
}
/**
* Scans the application classloader to retrieve a resource’s contents as a stream.
*
* The conf directory is included on the classpath, so this may be used to look up resources, relative to the conf
* directory.
*
* For example, to retrieve the conf/logback.xml configuration file:
* {{{
* val maybeConf = application.resourceAsStream("logback.xml")
* }}}
*
* @param name the absolute name of the resource (from the classpath root)
* @return a stream, if found
*/
@deprecated("Use Environment#resourceAsStream instead", "2.6.0")
def resourceAsStream(name: String): Option[InputStream] = {
val n = name.stripPrefix("/")
Option(classloader.getResourceAsStream(n))
}
/**
* Stop the application. The returned future will be redeemed when all stop hooks have been run.
*/
def stop(): Future[_]
/**
* Get the injector for this application.
*
* @return The injector.
*/
def injector: Injector = NewInstanceInjector
/**
* Returns true if the global application is enabled for this app. If set to false, this changes the behavior of
* Play.start, Play.current, and Play.maybeApplication to disallow access to the global application instance,
* also affecting the deprecated Play APIs that use these.
*/
lazy val globalApplicationEnabled: Boolean = {
configuration.getOptional[Boolean](Play.GlobalAppConfigKey).getOrElse(true)
}
}
object Application {
/**
* Creates a function that caches results of calls to
* `app.injector.instanceOf[T]`. The cache speeds up calls
* when called with the same Application each time, which is
* a big benefit in production. It still works properly if
* called with a different Application each time, such as
* when running unit tests, but it will run more slowly.
*
* Since values are cached, it's important that this is only
* used for singleton values.
*
* This method avoids synchronization so it's possible that
* the injector might be called more than once for a single
* instance if this method is called from different threads
* at the same time.
*
* The cache uses a WeakReference to both the Application and
* the returned instance so it will not cause memory leaks.
* Unlike WeakHashMap it doesn't use a ReferenceQueue, so values
* will still be cleaned even if the ReferenceQueue is never
* activated.
*/
def instanceCache[T: ClassTag]: Application => T =
new InlineCache((app: Application) => app.injector.instanceOf[T])
}
class OptionalSourceMapper(val sourceMapper: Option[SourceMapper])
@Singleton
class DefaultApplication @Inject() (
override val environment: Environment,
applicationLifecycle: DefaultApplicationLifecycle,
override val injector: Injector,
override val configuration: Configuration,
override val requestFactory: RequestFactory,
override val requestHandler: HttpRequestHandler,
override val errorHandler: HttpErrorHandler,
override val actorSystem: ActorSystem,
override val materializer: Materializer) extends Application {
override def path: File = environment.rootPath
override def classloader: ClassLoader = environment.classLoader
override def stop(): Future[_] = applicationLifecycle.stop()
}
/**
* Helper to provide the Play built in components.
*/
trait BuiltInComponents extends I18nComponents {
def environment: Environment
def sourceMapper: Option[SourceMapper]
def webCommands: WebCommands
def configuration: Configuration
def applicationLifecycle: DefaultApplicationLifecycle
def router: Router
lazy val injector: Injector = new SimpleInjector(NewInstanceInjector) + router + cookieSigner + csrfTokenSigner + httpConfiguration + tempFileCreator + fileMimeTypes
lazy val playBodyParsers: PlayBodyParsers = PlayBodyParsers(httpConfiguration.parser, httpErrorHandler, materializer, tempFileCreator)
lazy val defaultBodyParser: BodyParser[AnyContent] = playBodyParsers.default
lazy val defaultActionBuilder: DefaultActionBuilder = DefaultActionBuilder(defaultBodyParser)
lazy val httpConfiguration: HttpConfiguration = HttpConfiguration.fromConfiguration(configuration, environment)
lazy val requestFactory: RequestFactory = new DefaultRequestFactory(httpConfiguration)
lazy val httpErrorHandler: HttpErrorHandler = new DefaultHttpErrorHandler(environment, configuration, sourceMapper,
Some(router))
/**
* List of filters, typically provided by mixing in play.filters.HttpFiltersComponents
* or play.api.NoHttpFiltersComponents.
*
* In most cases you will want to mixin HttpFiltersComponents and append your own filters:
*
* {{{
* class MyComponents(context: ApplicationLoader.Context)
* extends BuiltInComponentsFromContext(context)
* with play.filters.HttpFiltersComponents {
*
* lazy val loggingFilter = new LoggingFilter()
* override def httpFilters = {
* super.httpFilters :+ loggingFilter
* }
* }
* }}}
*
* If you want to filter elements out of the list, you can do the following:
*
* {{{
* class MyComponents(context: ApplicationLoader.Context)
* extends BuiltInComponentsFromContext(context)
* with play.filters.HttpFiltersComponents {
* override def httpFilters = {
* super.httpFilters.filterNot(_.getClass == classOf[CSRFFilter])
* }
* }
* }}}
*/
def httpFilters: Seq[EssentialFilter]
lazy val httpRequestHandler: HttpRequestHandler = new DefaultHttpRequestHandler(router, httpErrorHandler, httpConfiguration, httpFilters: _*)
lazy val application: Application = new DefaultApplication(environment, applicationLifecycle, injector,
configuration, requestFactory, httpRequestHandler, httpErrorHandler, actorSystem, materializer)
lazy val actorSystem: ActorSystem = new ActorSystemProvider(environment, configuration, applicationLifecycle).get
implicit lazy val materializer: Materializer = ActorMaterializer()(actorSystem)
implicit lazy val executionContext: ExecutionContext = actorSystem.dispatcher
lazy val cookieSigner: CookieSigner = new CookieSignerProvider(httpConfiguration.secret).get
lazy val csrfTokenSigner: CSRFTokenSigner = new CSRFTokenSignerProvider(cookieSigner).get
lazy val tempFileReaper: TemporaryFileReaper = new DefaultTemporaryFileReaper(actorSystem, TemporaryFileReaperConfiguration.fromConfiguration(configuration))
lazy val tempFileCreator: TemporaryFileCreator = new DefaultTemporaryFileCreator(applicationLifecycle, tempFileReaper)
lazy val fileMimeTypes: FileMimeTypes = new DefaultFileMimeTypesProvider(httpConfiguration.fileMimeTypes).get
lazy val javaContextComponents = JavaHelpers.createContextComponents(messagesApi, langs, fileMimeTypes, httpConfiguration)
}
/**
* A component to mix in when no default filters should be mixed in to BuiltInComponents.
*/
trait NoHttpFiltersComponents {
val httpFilters: Seq[EssentialFilter] = Nil
}
| aradchykov/playframework | framework/src/play/src/main/scala/play/api/Application.scala | Scala | apache-2.0 | 11,930 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.component.helloworld.repository.entity
import play.api.libs.json.{ Format, Json }
import play.api.mvc.{ Result, Results }
import scala.language.implicitConversions
import scalaz._
import Scalaz._
// The implicit resolution rules are
// 1. First look in current scope
// - Implicits defined in current scope
// - Explicit imports
// - wildcard imports
// - _(*) Same scope in other files (*)_
//
// We haven't imported anything in the HelloWorldController..
//
// 2. Now look at associated types in
// - Companion objects of a type
// - Implicit scope of an argument's type (2.9.1)
// - Implicit scope of type arguments (2.8.0)
// - Outer objects for nested types
//
// If we returned an HelloWorld type in the controller, the companion object
// (which is object HelloWorld) would be responsible to convert HelloWorld to a result
// that can be done by means of one of the implicit methods eg. toResult
//
// If we return an Option[HelloWorld], the resolution would first to look at the companion of Option.. no luck there
// next we will look in the 'Implicit scope of an argument type' of Option[T] if we can convert Option[HelloWorld] to
// 'Result', the argument type is 'HelloWorld' and if we look at the companion object of HelloWorld we have a hit.
// because we find a way to convert Option[HelloWorld] to a Result.
//
object HelloWorld extends GenericResult {
implicit val format: Format[HelloWorld] = Json.format[HelloWorld]
}
final case class HelloWorld(msg: String)
trait GenericResult extends Results {
implicit def fromA[A: Format](a: A): Result =
Ok(Json.toJson(a))
implicit def fromOption[A: Format](option: Option[A]): Result =
option.map(a => fromA(a)).getOrElse(NotFound)
implicit def fromMaybe[A: Format](maybe: Maybe[A]): Result =
maybe.toOption
implicit def fromDisjunction[A: Format](disjunction: Disjunction[String, A]): Result =
disjunction.map(a => fromA(a)).valueOr(msg => NotFound(msg))
implicit def fromValidation[A: Format](validation: Validation[String, A]): Result =
validation.disjunction
implicit def fromValidationNel[A: Format](validation: ValidationNel[String, A]): Result =
validation.leftMap(_.toList.mkString(",")).disjunction
}
| dnvriend/akka-http-test | app/com/github/dnvriend/component/helloworld/repository/entity/HelloWorld.scala | Scala | apache-2.0 | 2,838 |
package japgolly.scalajs.react.extra
class ReusableVal2[A, S](a: () => A, val src: S)(implicit val reusability: Reusability[S]) {
lazy val value: A = a()
}
object ReusableVal2 {
implicit def reusability[A, S]: Reusability[ReusableVal2[A, S]] =
Reusability.internal((_: ReusableVal2[A, S]).src)(_.reusability)
implicit def autoValue[A, B](r: ReusableVal2[A, B]): A =
r.value
@inline def apply[A, S: Reusability](a: => A, src: S): ReusableVal2[A, S] =
new ReusableVal2(() => a, src)
def function[A: Reusability, B](a: A)(f: A => B): ReusableVal2[B, A] =
apply(f(a), a)
} | beni55/scalajs-react | extra/src/main/scala/japgolly/scalajs/react/extra/ReusableVal2.scala | Scala | apache-2.0 | 599 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import java.util.Locale
import scala.util.control.NonFatal
import org.json4s._
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.expressions.{Cast, Expression}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.Utils
/**
* The base type of all Spark SQL data types.
*
* @since 1.3.0
*/
@Stable
abstract class DataType extends AbstractDataType {
/**
* Enables matching against DataType for expressions:
* {{{
* case Cast(child @ BinaryType(), StringType) =>
* ...
* }}}
*/
private[sql] def unapply(e: Expression): Boolean = e.dataType == this
/**
* The default size of a value of this data type, used internally for size estimation.
*/
def defaultSize: Int
/** Name of the type used in JSON serialization. */
def typeName: String = {
this.getClass.getSimpleName
.stripSuffix("$").stripSuffix("Type").stripSuffix("UDT")
.toLowerCase(Locale.ROOT)
}
private[sql] def jsonValue: JValue = typeName
/** The compact JSON representation of this data type. */
def json: String = compact(render(jsonValue))
/** The pretty (i.e. indented) JSON representation of this data type. */
def prettyJson: String = pretty(render(jsonValue))
/** Readable string representation for the type. */
def simpleString: String = typeName
/** String representation for the type saved in external catalogs. */
def catalogString: String = simpleString
/** Readable string representation for the type with truncation */
private[sql] def simpleString(maxNumberFields: Int): String = simpleString
def sql: String = simpleString.toUpperCase(Locale.ROOT)
/**
* Check if `this` and `other` are the same data type when ignoring nullability
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def sameType(other: DataType): Boolean =
if (SQLConf.get.caseSensitiveAnalysis) {
DataType.equalsIgnoreNullability(this, other)
} else {
DataType.equalsIgnoreCaseAndNullability(this, other)
}
/**
* Returns the same data type but set all nullability fields are true
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def asNullable: DataType
/**
* Returns true if any `DataType` of this DataType tree satisfies the given function `f`.
*/
private[spark] def existsRecursively(f: (DataType) => Boolean): Boolean = f(this)
override private[sql] def defaultConcreteType: DataType = this
override private[sql] def acceptsType(other: DataType): Boolean = sameType(other)
}
/**
* @since 1.3.0
*/
@Stable
object DataType {
private val FIXED_DECIMAL = """decimal\\(\\s*(\\d+)\\s*,\\s*(\\-?\\d+)\\s*\\)""".r
def fromDDL(ddl: String): DataType = {
try {
CatalystSqlParser.parseDataType(ddl)
} catch {
case NonFatal(_) => CatalystSqlParser.parseTableSchema(ddl)
}
}
def fromJson(json: String): DataType = parseDataType(parse(json))
private val nonDecimalNameToType = {
Seq(NullType, DateType, TimestampType, BinaryType, IntegerType, BooleanType, LongType,
DoubleType, FloatType, ShortType, ByteType, StringType, CalendarIntervalType)
.map(t => t.typeName -> t).toMap
}
/** Given the string representation of a type, return its DataType */
private def nameToType(name: String): DataType = {
name match {
case "decimal" => DecimalType.USER_DEFAULT
case FIXED_DECIMAL(precision, scale) => DecimalType(precision.toInt, scale.toInt)
case other => nonDecimalNameToType.getOrElse(
other,
throw new IllegalArgumentException(
s"Failed to convert the JSON string '$name' to a data type."))
}
}
private object JSortedObject {
def unapplySeq(value: JValue): Option[List[(String, JValue)]] = value match {
case JObject(seq) => Some(seq.toList.sortBy(_._1))
case _ => None
}
}
// NOTE: Map fields must be sorted in alphabetical order to keep consistent with the Python side.
private[sql] def parseDataType(json: JValue): DataType = json match {
case JString(name) =>
nameToType(name)
case JSortedObject(
("containsNull", JBool(n)),
("elementType", t: JValue),
("type", JString("array"))) =>
ArrayType(parseDataType(t), n)
case JSortedObject(
("keyType", k: JValue),
("type", JString("map")),
("valueContainsNull", JBool(n)),
("valueType", v: JValue)) =>
MapType(parseDataType(k), parseDataType(v), n)
case JSortedObject(
("fields", JArray(fields)),
("type", JString("struct"))) =>
StructType(fields.map(parseStructField))
// Scala/Java UDT
case JSortedObject(
("class", JString(udtClass)),
("pyClass", _),
("sqlType", _),
("type", JString("udt"))) =>
Utils.classForName(udtClass).getConstructor().newInstance().asInstanceOf[UserDefinedType[_]]
// Python UDT
case JSortedObject(
("pyClass", JString(pyClass)),
("serializedClass", JString(serialized)),
("sqlType", v: JValue),
("type", JString("udt"))) =>
new PythonUserDefinedType(parseDataType(v), pyClass, serialized)
case other =>
throw new IllegalArgumentException(
s"Failed to convert the JSON string '${compact(render(other))}' to a data type.")
}
private def parseStructField(json: JValue): StructField = json match {
case JSortedObject(
("metadata", metadata: JObject),
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable, Metadata.fromJObject(metadata))
// Support reading schema when 'metadata' is missing.
case JSortedObject(
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable)
case other =>
throw new IllegalArgumentException(
s"Failed to convert the JSON string '${compact(render(other))}' to a field.")
}
protected[types] def buildFormattedString(
dataType: DataType,
prefix: String,
builder: StringBuilder): Unit = {
dataType match {
case array: ArrayType =>
array.buildFormattedString(prefix, builder)
case struct: StructType =>
struct.buildFormattedString(prefix, builder)
case map: MapType =>
map.buildFormattedString(prefix, builder)
case _ =>
}
}
/**
* Compares two types, ignoring nullability of ArrayType, MapType, StructType.
*/
private[types] def equalsIgnoreNullability(left: DataType, right: DataType): Boolean = {
(left, right) match {
case (ArrayType(leftElementType, _), ArrayType(rightElementType, _)) =>
equalsIgnoreNullability(leftElementType, rightElementType)
case (MapType(leftKeyType, leftValueType, _), MapType(rightKeyType, rightValueType, _)) =>
equalsIgnoreNullability(leftKeyType, rightKeyType) &&
equalsIgnoreNullability(leftValueType, rightValueType)
case (StructType(leftFields), StructType(rightFields)) =>
leftFields.length == rightFields.length &&
leftFields.zip(rightFields).forall { case (l, r) =>
l.name == r.name && equalsIgnoreNullability(l.dataType, r.dataType)
}
case (l, r) => l == r
}
}
/**
* Compares two types, ignoring compatible nullability of ArrayType, MapType, StructType.
*
* Compatible nullability is defined as follows:
* - If `from` and `to` are ArrayTypes, `from` has a compatible nullability with `to`
* if and only if `to.containsNull` is true, or both of `from.containsNull` and
* `to.containsNull` are false.
* - If `from` and `to` are MapTypes, `from` has a compatible nullability with `to`
* if and only if `to.valueContainsNull` is true, or both of `from.valueContainsNull` and
* `to.valueContainsNull` are false.
* - If `from` and `to` are StructTypes, `from` has a compatible nullability with `to`
* if and only if for all every pair of fields, `to.nullable` is true, or both
* of `fromField.nullable` and `toField.nullable` are false.
*/
private[sql] def equalsIgnoreCompatibleNullability(from: DataType, to: DataType): Boolean = {
(from, to) match {
case (ArrayType(fromElement, fn), ArrayType(toElement, tn)) =>
(tn || !fn) && equalsIgnoreCompatibleNullability(fromElement, toElement)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
(tn || !fn) &&
equalsIgnoreCompatibleNullability(fromKey, toKey) &&
equalsIgnoreCompatibleNullability(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall { case (fromField, toField) =>
fromField.name == toField.name &&
(toField.nullable || !fromField.nullable) &&
equalsIgnoreCompatibleNullability(fromField.dataType, toField.dataType)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
/**
* Compares two types, ignoring nullability of ArrayType, MapType, StructType, and ignoring case
* sensitivity of field names in StructType.
*/
private[sql] def equalsIgnoreCaseAndNullability(from: DataType, to: DataType): Boolean = {
(from, to) match {
case (ArrayType(fromElement, _), ArrayType(toElement, _)) =>
equalsIgnoreCaseAndNullability(fromElement, toElement)
case (MapType(fromKey, fromValue, _), MapType(toKey, toValue, _)) =>
equalsIgnoreCaseAndNullability(fromKey, toKey) &&
equalsIgnoreCaseAndNullability(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall { case (l, r) =>
l.name.equalsIgnoreCase(r.name) &&
equalsIgnoreCaseAndNullability(l.dataType, r.dataType)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
/**
* Returns true if the two data types share the same "shape", i.e. the types
* are the same, but the field names don't need to be the same.
*
* @param ignoreNullability whether to ignore nullability when comparing the types
*/
def equalsStructurally(
from: DataType,
to: DataType,
ignoreNullability: Boolean = false): Boolean = {
(from, to) match {
case (left: ArrayType, right: ArrayType) =>
equalsStructurally(left.elementType, right.elementType) &&
(ignoreNullability || left.containsNull == right.containsNull)
case (left: MapType, right: MapType) =>
equalsStructurally(left.keyType, right.keyType) &&
equalsStructurally(left.valueType, right.valueType) &&
(ignoreNullability || left.valueContainsNull == right.valueContainsNull)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields)
.forall { case (l, r) =>
equalsStructurally(l.dataType, r.dataType) &&
(ignoreNullability || l.nullable == r.nullable)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
private val SparkGeneratedName = """col\\d+""".r
private def isSparkGeneratedName(name: String): Boolean = name match {
case SparkGeneratedName(_*) => true
case _ => false
}
/**
* Returns true if the write data type can be read using the read data type.
*
* The write type is compatible with the read type if:
* - Both types are arrays, the array element types are compatible, and element nullability is
* compatible (read allows nulls or write does not contain nulls).
* - Both types are maps and the map key and value types are compatible, and value nullability
* is compatible (read allows nulls or write does not contain nulls).
* - Both types are structs and each field in the read struct is present in the write struct and
* compatible (including nullability), or is nullable if the write struct does not contain the
* field. Write-side structs are not compatible if they contain fields that are not present in
* the read-side struct.
* - Both types are atomic and the write type can be safely cast to the read type.
*
* Extra fields in write-side structs are not allowed to avoid accidentally writing data that
* the read schema will not read, and to ensure map key equality is not changed when data is read.
*
* @param write a write-side data type to validate against the read type
* @param read a read-side data type
* @return true if data written with the write type can be read using the read type
*/
def canWrite(
write: DataType,
read: DataType,
resolver: Resolver,
context: String,
addError: String => Unit = (_: String) => {}): Boolean = {
(write, read) match {
case (wArr: ArrayType, rArr: ArrayType) =>
// run compatibility check first to produce all error messages
val typesCompatible =
canWrite(wArr.elementType, rArr.elementType, resolver, context + ".element", addError)
if (wArr.containsNull && !rArr.containsNull) {
addError(s"Cannot write nullable elements to array of non-nulls: '$context'")
false
} else {
typesCompatible
}
case (wMap: MapType, rMap: MapType) =>
// map keys cannot include data fields not in the read schema without changing equality when
// read. map keys can be missing fields as long as they are nullable in the read schema.
// run compatibility check first to produce all error messages
val keyCompatible =
canWrite(wMap.keyType, rMap.keyType, resolver, context + ".key", addError)
val valueCompatible =
canWrite(wMap.valueType, rMap.valueType, resolver, context + ".value", addError)
val typesCompatible = keyCompatible && valueCompatible
if (wMap.valueContainsNull && !rMap.valueContainsNull) {
addError(s"Cannot write nullable values to map of non-nulls: '$context'")
false
} else {
typesCompatible
}
case (StructType(writeFields), StructType(readFields)) =>
var fieldCompatible = true
readFields.zip(writeFields).foreach {
case (rField, wField) =>
val namesMatch = resolver(wField.name, rField.name) || isSparkGeneratedName(wField.name)
val fieldContext = s"$context.${rField.name}"
val typesCompatible =
canWrite(wField.dataType, rField.dataType, resolver, fieldContext, addError)
if (!namesMatch) {
addError(s"Struct '$context' field name does not match (may be out of order): " +
s"expected '${rField.name}', found '${wField.name}'")
fieldCompatible = false
} else if (!rField.nullable && wField.nullable) {
addError(s"Cannot write nullable values to non-null field: '$fieldContext'")
fieldCompatible = false
} else if (!typesCompatible) {
// errors are added in the recursive call to canWrite above
fieldCompatible = false
}
}
if (readFields.size > writeFields.size) {
val missingFieldsStr = readFields.takeRight(readFields.size - writeFields.size)
.map(f => s"'${f.name}'").mkString(", ")
if (missingFieldsStr.nonEmpty) {
addError(s"Struct '$context' missing fields: $missingFieldsStr")
fieldCompatible = false
}
} else if (writeFields.size > readFields.size) {
val extraFieldsStr = writeFields.takeRight(writeFields.size - readFields.size)
.map(f => s"'${f.name}'").mkString(", ")
addError(s"Cannot write extra fields to struct '$context': $extraFieldsStr")
fieldCompatible = false
}
fieldCompatible
case (w: AtomicType, r: AtomicType) =>
if (!Cast.canSafeCast(w, r)) {
addError(s"Cannot safely cast '$context': $w to $r")
false
} else {
true
}
case (w, r) if w.sameType(r) && !w.isInstanceOf[NullType] =>
true
case (w, r) =>
addError(s"Cannot write '$context': $w is incompatible with $r")
false
}
}
}
| WindCanDie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala | Scala | apache-2.0 | 17,648 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.io.ByteArrayInputStream
import org.apache.arrow.memory.{BufferAllocator, RootAllocator}
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.arrow.io.SimpleFeatureArrowFileReader
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.filter.Filter
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ArrowConversionProcessTest extends TestWithFeatureType {
import scala.collection.JavaConversions._
sequential
override val spec = "name:String:index=join,team:String,dtg:Date,*geom:Point:srid=4326"
implicit val allocator: BufferAllocator = new RootAllocator(Long.MaxValue)
val process = new ArrowConversionProcess
val features = (0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"0$i", s"name${i % 2}", s"team$i", s"2017-02-20T00:00:0$i.000Z", s"POINT(40 ${50 + i})")
}
addFeatures(features)
"ArrowConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()) must beEmpty
}
}
"encode an empty accumulo feature collection" in {
val bytes = process.execute(fs.getFeatures(ECQL.toFilter("bbox(geom,20,20,30,30)")), null, null, null, null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()) must beEmpty
}
}
"encode an accumulo feature collection in distributed fashion" in {
val bytes = process.execute(fs.getFeatures(Filter.INCLUDE), null, null, null, null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq must
containTheSameElementsAs(features)
}
}
"encode an accumulo feature collection in distributed fashion with cached dictionary values" in {
val filter = ECQL.toFilter("name = 'name0'")
val bytes = process.execute(fs.getFeatures(filter), null, null, Seq("name"), null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq must
containTheSameElementsAs(features.filter(filter.evaluate))
// verify all cached values were used for the dictionary
reader.dictionaries.map { case (k, v) => (k, v.iterator.toSeq) } mustEqual Map("name" -> Seq("name0", "name1"))
}
}
"encode an accumulo feature collection in distributed fashion with calculated dictionary values" in {
val filter = ECQL.toFilter("name = 'name0'")
val bytes = process.execute(fs.getFeatures(filter), null, null, Seq("name"), false, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq must
containTheSameElementsAs(features.filter(filter.evaluate))
// verify only exact values were used for the dictionary
reader.dictionaries.map { case (k, v) => (k, v.iterator.toSeq) } mustEqual Map("name" -> Seq("name0"))
}
}
"sort and encode an accumulo feature collection in distributed fashion" in {
val bytes = process.execute(fs.getFeatures(Filter.INCLUDE), null, null, null, null, "dtg", null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toList mustEqual features
}
}
"reverse sort and encode an accumulo feature collection in distributed fashion" in {
val bytes = process.execute(fs.getFeatures(Filter.INCLUDE), null, null, null, null, "dtg", Boolean.box(true), null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toList mustEqual features.reverse
}
}
}
step {
allocator.close()
}
}
| ccri/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/process/transform/ArrowConversionProcessTest.scala | Scala | apache-2.0 | 5,595 |
/*
* Copyright 2019 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.common.rich
final class RichChar(val ch: Char) extends AnyVal {
/** Is this an ASCII Char (< 128)? */
def isASCIIChar: Boolean = ch < '\u0080' // 128
} | frugalmechanic/fm-common | shared/src/main/scala/fm/common/rich/RichChar.scala | Scala | apache-2.0 | 794 |
package polynomial
import core.NegativeInfinity
import integer.IntegersModN
import org.scalatest.{FunSuite, Matchers}
import polynomial.Predef.X
class PolynomialTest extends FunSuite with Matchers {
implicit val intsMod4 = IntegersModN(4)
val zero = intsMod4.classOf(0)
val one = intsMod4.classOf(1)
val two = intsMod4.classOf(2)
val three = intsMod4.classOf(3)
test("Leading zero coefficients are ignored") {
Polynomial(one, two, zero, three) should be (Polynomial(zero, zero, one, two, zero, three))
}
test("The leading coefficient is zero only for the polynomial of all zero coefficients") {
val zeros = Polynomial(zero, zero)
val poly1 = Polynomial(one, two, three)
val poly2 = Polynomial(zero, two, three, three)
zeros.leadingCoefficient should be (zero)
poly1.leadingCoefficient should be (one)
poly2.leadingCoefficient should be (two)
}
test("By convention, the polynomial with only zero coefficients has degree negative infinity") {
Polynomial(zero).degree should be (NegativeInfinity)
Polynomial(zero, zero).degree should be (NegativeInfinity)
}
test("The degree function works as expected, but you have to use the extended integers (since you can get negative infinity)") {
Polynomial(two, two, zero, one).degree.toInt should be (3)
Polynomial(zero, two, two, zero, one).degree.toInt should be (3)
Polynomial(three, two, one).degree.toInt should be (2)
Polynomial(two, one).degree.toInt should be (1)
Polynomial(three).degree.toInt should be (0)
}
test("The string representation follows mathematical conventions") {
Polynomial(zero).toString should be ("[0]_4")
Polynomial(one).toString should be ("[1]_4")
Polynomial(one, zero).toString should be ("[1]_4X")
Polynomial(two, zero, one).toString should be ("[2]_4X^2 + [1]_4")
Polynomial(three, two, one, two).toString should be ("[3]_4X^3 + [2]_4X^2 + [1]_4X + [2]_4")
}
test("Polynomials with the same formal parameter are equal when you would expect") {
Polynomial(zero) == Polynomial(zero, zero) should be (true)
Polynomial(zero, one, two) == Polynomial(one, two) should be (true)
Polynomial(one, two) != Polynomial(two, one) should be (true)
}
test("Polynomials with different formal parameters are not equal") {
Polynomial(X, one, two) == Polynomial(polynomial.Predef.Y, one, two) should be (false)
}
test("Hashcode respects equality") {
Polynomial(one, two, zero, three).hashCode == Polynomial(zero, zero, one, two, zero, three).hashCode should be (true)
}
test("Polynomials can be used in sets") {
val set = Set(Polynomial(zero, one, two), Polynomial(two, one) ,Polynomial(one, two))
set.size should be (2)
set.contains(Polynomial(zero, two, one)) should be (true)
set.contains(Polynomial(three, one)) should be (false)
}
}
| dkettlestrings/thunder | src/test/scala/polynomial/PolynomialTest.scala | Scala | gpl-3.0 | 2,875 |
package org.eoin
import org.eoin.Chapter10.exercise9.{Part, Stub, WC}
object Chapter10 {
trait Monoid[A] {
def op(a1: A, a2: A): A
def zero: A
}
object exercise1 {
val stringMonoid = new Monoid[String] {
def op(a1: String, a2: String) = a1 + a2
def zero = ""
}
def listMonoid[A] = new Monoid[List[A]] {
def op(a1: List[A], a2: List[A]) = a1 ++ a2
def zero = Nil
}
val intAddition: Monoid[Int] = new Monoid[Int] {
override def op(a1: Int, a2: Int): Int = a1+a2
override def zero: Int = 0
}
val intMultiplication: Monoid[Int] = new Monoid[Int] {
override def op(a1: Int, a2: Int): Int = a1 * a2
override def zero: Int = 1
}
val booleanOr: Monoid[Boolean] = new Monoid[Boolean] {
override def op(a1: Boolean, a2: Boolean): Boolean = a1 | a2
override def zero: Boolean = false
}
val booleanAnd: Monoid[Boolean] = new Monoid[Boolean] {
override def op(a1: Boolean, a2: Boolean): Boolean = a1 & a2
override def zero: Boolean = true
}
}
object exercise2 {
def optionMonoid[A]: Monoid[Option[A]] = new Monoid[Option[A]] {
override def op(a1: Option[A], a2: Option[A]): Option[A] = a1 orElse a2
override def zero: Option[A] = None
}
}
object exercise3 {
def EndoMonoid[A]: Monoid[A => A] = new Monoid[A => A] {
override def op(a1: (A) => A, a2: (A) => A): (A) => A = a1 andThen a2
override def zero: (A) => A = identity
}
}
object exercise5 {
def wordsMonoid(s: String): Monoid[String]= new Monoid[String] {
override def op(a1: String, a2: String): String = {
val s1 = s"${a1.trim}${s}${a2.trim}"
s1.trim
}
override def zero: String = ""
}
}
object exercise6 {
def concatenate[A](as: List[A], m: Monoid[A]): A = {
as.fold(m.zero)(m.op)
}
}
object exercise7 {
def foldMap[A,B](as: List[A], m: Monoid[B])(f: A => B): B = {
exercise6.concatenate(as map f, m)
}
}
object exercise9 {
sealed trait WC
case class Stub(chars: String) extends WC
case class Part(lStub: String, words: Int, rStub: String) extends WC
def wcMonoid: Monoid[WC]= new Monoid[WC] {
def isWordSeparator(c:Char) = c.isWhitespace || ",.-()[]';:".contains (c)
override def op(a1: WC, a2: WC): WC = (a1,a2) match {
case (Stub(c1),Stub(c2)) => Stub(c1 + c2)
case (Stub(c), Part(ls,w,rs)) =>
val lsCombo = c+ls
val newWordCount = w + lsCombo count { isWordSeparator _ } // incorrect, double counts eg " . "
val newLs = (lsCombo).takeWhile(! isWordSeparator(_))
Part(newLs, newWordCount, rs)
case (Part(ls,w,rs), Stub(c)) =>
val rsCombo = rs+c
val newWordCount = w + rsCombo count { isWordSeparator _ }
val newRs = (rsCombo).takeWhile(! isWordSeparator(_))
Part(ls, newWordCount, newRs)
case (Part(ls1,w1,rs1), Part(ls2,w2,rs2)) =>
val midCombo = rs1+ls2
val newWordCount = w1+w2 + (rs1+ls2).count(isWordSeparator(_))
Part(ls1, newWordCount, rs2)
}
override def zero: WC = Stub("")
}
}
object exercise10 {
def countWords (s:String):Int = {
val wcMonoid = exercise9.wcMonoid
def toWC(c: Char): WC = {
if (c.isWhitespace) Part("", 0, "")
else Stub(c.toString)
}
def unstub(s: String) = s.length min 1
exercise7.foldMap(s.toList, wcMonoid)(toWC) match {
case Stub(s) => unstub(s)
case Part(l, w, r) => unstub(l) + w + unstub(r)
}
}
}
object exercise11 {
//@tailrec
def foldMapV[A,B](v: IndexedSeq[A], m: Monoid[B])(f: A => B): B = {
v.length match {
case 0 => m.zero
case 1 => f(v(0))
case n =>
val (left,right) = v.splitAt( n/2 )
m.op( foldMapV(left, m)(f), foldMapV(right, m)(f) )
}
}
}
object exercise12 {
type IntBool = (Int,Boolean)
val compareMonoid = new Monoid[IntBool] {
override def op(a1: IntBool, a2: IntBool): IntBool = {
val (a1Int,a1Bool) = a1
val (a2Int,a2Bool) = a2
val unorderingDetected = a1Bool || a2Bool || (a2Int < a1Int)
(Math.abs(a2Int-a1Int), unorderingDetected)
}
override def zero = (0,false)
}
def isOrdered (is: IndexedSeq[Int]) : Boolean = {
val f = (i:Int) => (i,false)
val t = exercise11.foldMapV(is,compareMonoid) ( f)
! t._2
}
}
object foldable {
import scala.language.higherKinds
trait Foldable[F[_]] {
def foldRight[A, B](as: F[A])(z: B)(f: (A, B) => B): B
def foldLeft[A, B](as: F[A])(z: B)(f: (B, A) => B): B
def foldMap[A, B](as: F[A])(z: B)(f: A => B)(mb: Monoid[B]): B
def concatenate[A](as: F[A])(m: Monoid[A]): A = foldLeft(as)(m.zero)(m.op)
}
object foldableList extends Foldable[List] {
override def foldRight[A, B](as: List[A])(z: B)(f: (A, B) => B): B = as.foldRight(z)(f)
override def foldLeft[A, B](as: List[A])(z: B)(f: (B, A) => B): B = as.foldLeft(z)(f)
override def foldMap[A, B](as: List[A])(z: B)(f: (A) => B)(mb: Monoid[B]): B = exercise11.foldMapV(as.toIndexedSeq,mb)(f)
}
object foldableIndexedSeq extends Foldable[IndexedSeq] {
override def foldRight[A, B](as: IndexedSeq[A])(z: B)(f: (A, B) => B): B = as.foldRight(z)(f)
override def foldLeft[A, B](as: IndexedSeq[A])(z: B)(f: (B, A) => B): B = as.foldLeft(z)(f)
override def foldMap[A, B](as: IndexedSeq[A])(z: B)(f: (A) => B)(mb: Monoid[B]): B = exercise11.foldMapV(as,mb)(f)
}
sealed trait Tree[+A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
case class Leaf[A](value: A) extends Tree[A]
object foldableTree extends Foldable[Tree] {
override def foldRight[A, B](as: Tree[A])(z: B)(f: (A, B) => B): B = ???
override def foldLeft[A, B](as: Tree[A])(z: B)(f: (B, A) => B): B = ???
override def foldMap[A, B](as: Tree[A])(z: B)(f: (A) => B)(mb: Monoid[B]): B = ???
}
object foldableOption extends Foldable[Option] {
override def foldRight[A, B](as: Option[A])(z: B)(f: (A, B) => B): B = ???
override def foldLeft[A, B](as: Option[A])(z: B)(f: (B, A) => B): B = ???
override def foldMap[A, B](as: Option[A])(z: B)(f: (A) => B)(mb: Monoid[B]): B = ???
}
}
} | eoinparker/FunctionalProgrammingRedBook | src/main/scala/org/eoin/chapter10.scala | Scala | mit | 6,535 |
package com.github.dakatsuka.akka.http.oauth2.client.strategy
import akka.NotUsed
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model._
import akka.stream.scaladsl.Source
import com.github.dakatsuka.akka.http.oauth2.client.{ ConfigLike, GrantType }
class ClientCredentialsStrategy extends Strategy(GrantType.ClientCredentials) {
override def getAuthorizeUrl(config: ConfigLike, params: Map[String, String] = Map.empty): Option[Uri] = None
override def getAccessTokenSource(config: ConfigLike, params: Map[String, String] = Map.empty): Source[HttpRequest, NotUsed] = {
val uri = Uri
.apply(config.site.toASCIIString)
.withPath(Uri.Path(config.tokenUrl))
val request = HttpRequest(
method = config.tokenMethod,
uri = uri,
headers = List(
RawHeader("Accept", "*/*")
),
FormData(
params ++ Map(
"grant_type" -> grant.value,
"client_id" -> config.clientId,
"client_secret" -> config.clientSecret
)
).toEntity(HttpCharsets.`UTF-8`)
)
Source.single(request)
}
}
| dakatsuka/akka-http-oauth2-client | src/main/scala/com/github/dakatsuka/akka/http/oauth2/client/strategy/ClientCredentialsStrategy.scala | Scala | apache-2.0 | 1,121 |
package scala.collection.mutable
class Qu[A] protected (array: Array[AnyRef], start: Int, end: Int):
def this(initialSize: Int = ArrayDeque.DefaultInitialSize) =
this(ArrayDeque.alloc(initialSize), start = 0, end = 0)
object Qu:
def f[A](array: Array[AnyRef], start: Int, end: Int) = 1
def f[A](initialSize: Int = 1) = 2
| lampepfl/dotty | tests/pos/i8920/Qu_1.scala | Scala | apache-2.0 | 333 |
package pl.project13.scala.akka.raft.protocol
/**
* States used by the Raft FSM.
*
* Use by importing the protocol package:
* {{{import akka.raft.protocol._}}}
*/
trait RaftStates {
sealed trait RaftState
/** In this phase the member awaits to get it's [[pl.project13.scala.akka.raft.ClusterConfiguration]] */
case object Init extends RaftState
/** A Follower can take writes from a Leader; If doesn't get any heartbeat, may decide to become a Candidate */
case object Follower extends RaftState
/** A Candidate tries to become a Leader, by issuing [[pl.project13.scala.akka.raft.protocol.RaftProtocol.RequestVote]] */
case object Candidate extends RaftState
/** The Leader is responsible for taking writes, and commiting entries, as well as keeping the heartbeat to all members */
case object Leader extends RaftState
}
| colin-scott/akka-raft | src/main/scala/pl/project13/scala/akka/raft/protocol/RaftStates.scala | Scala | apache-2.0 | 859 |
/*
* #%L
* GatlingCql
* %%
* Copyright (C) 2014 Mikhail Stepura
* %%
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* #L%
*/
package io.github.gatling.cql
import io.gatling.core.config.{ Credentials, Protocol }
import com.datastax.driver.core.Cluster
import com.datastax.driver.core.Session
//holds reference to a cluster, just settings
case class CqlProtocol(session: Session) extends Protocol | infomaven/GatlingCql | src/main/scala/io/github/gatling/cql/CqlProtocol.scala | Scala | mit | 1,430 |
/**
* Copyright 2011 James Lindstorff
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dk.industria.solr.processors;
import java.util.regex.{Matcher, Pattern, PatternSyntaxException}
/** Represents a pattern replace rule for the PatternReplaceProcessor.
*
* @param id Id used to identify the rule.
* @param pattern Regular expression defining the pattern.
* @param replacement Value to replace the pattern match with.
*/
class PatternReplaceRule(id: String, pattern: Pattern, replacement: String) {
require(Option(id).isDefined)
require(0 < id.length)
require(Option(pattern).isDefined)
require(Option(replacement).isDefined)
/** Get id of the rule.
*
* @return String containing the id of the rule.
*/
def getId(): String = id
/** Apply the pattern replace rule to a value.
*
* @param value Value to apply the pattern replace rule to.
* @return Value after the rule has been applied to the value.
*/
def replace(value: String): String = {
require(Option(value).isDefined)
val matcher = pattern.matcher(value)
matcher.replaceAll(replacement)
}
/** Get a String representation of the pattern replace rule.
*
* @return String representation of the pattern replace rule.
*/
override def toString(): String = s"Id: [${this.id}] Pattern: [${this.pattern}] Replace: [${this.replacement}]"
}
object PatternReplaceRule {
/** Create a new PatternReplaceRule.
*
* @param id Id used to identify the rule in field mappings.
* @param pattern String containing the regular expression defining the pattern to replace.
* @param replacement String containing the value to replace pattern matches with. Null equals an empty string.
* @return PatternReplaceRule
* @throws IllegalArgumentException If iid is null or empty. If the pattern isn't a regular expression.
*/
@throws(classOf[IllegalArgumentException])
def getInstance(id: String, pattern: String, replacement: String): PatternReplaceRule = {
require(Option(id).isDefined)
require(0 < id.length)
require(Option(pattern).isDefined)
try {
val compiledPattern = Pattern.compile(pattern)
val replacementValue = Option(replacement).getOrElse("")
new PatternReplaceRule(id, compiledPattern, replacementValue)
} catch {
case e: PatternSyntaxException => {
val msg = "Failed to compile pattern [" + pattern + "] for rule id [" + id + "] : " + e.getMessage()
throw new IllegalArgumentException(msg, e)
}
}
}
}
| industria/solrprocessors | src/main/scala/dk/industria/solr/processors/PatternReplaceRule.scala | Scala | apache-2.0 | 3,056 |
/*
* Copyright 2010 Guardian News and Media
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gu.conf.impl
import org.scalatest.FunSuite
import org.scalatest.Matchers
import java.io.File
class PropertiesLoaderTest extends FunSuite with Matchers {
val loader = new PropertiesLoader
val basedir = "file:" + new File(".").getAbsolutePath
test("should load classpath based properties") {
val properties = loader getPropertiesFrom "classpath:conf/test.properties"
properties.getProperty("property") should be("theft")
}
test("should load file based properties") {
val properties = loader getPropertiesFrom (basedir + "/src/test/resources/conf/test.properties")
properties.getProperty("property") should be("theft")
}
test("should ignore missing classpath based properties when not available") {
val properties = loader getPropertiesFrom "classpath:conf/does-not-exist.properties"
properties.size() should be(0)
}
test("should ignore missing file based properties when not available") {
val properties = loader getPropertiesFrom (basedir + "/src/test/resources/conf/does-not-exist.properties")
properties.size should be(0)
}
test("should ignore unknown protocols") {
val properties = loader getPropertiesFrom "unknown:protocol"
properties.size should be(0)
}
} | guardian/guardian-configuration | src/test/scala/com.gu.conf/impl/PropertiesLoaderTest.scala | Scala | apache-2.0 | 1,882 |
package at.bioinform.webapp.directive
import akka.http.scaladsl.marshalling.GenericMarshallers._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import at.bioinform.webapp.Env
import at.bioinform.webapp.db.TableDefinitions
import cats.data.Reader
import scala.concurrent.ExecutionContext.Implicits.global
object Sequences extends TableDefinitions {
val routes: Reader[Env, Route] = Reader { env =>
val sequenceRepository = env.repositories.sequenceRepository
pathPrefix("sequence" ) {
get {
complete {
Marshal("Hello world!").to[HttpResponse]
}
} ~
post {
complete {
val id = sequenceRepository.create(("first", "path"))
Marshal(id.toString()).to[HttpResponse]
}
} ~
path(IntNumber) { id =>
get {
complete {
Marshal(s"$id").to[HttpResponse]
}
}
}
}
}
}
| peri4n/bIO | subprojects/webapp/src/main/scala/at/bioinform/webapp/directive/Sequences.scala | Scala | apache-2.0 | 1,049 |
package io.youi.path
import scala.collection.mutable.ListBuffer
case class Path(actions: List[PathAction]) extends PathBuilder with PathAction {
lazy val boundingBox: BoundingBox = Path.boundingBox(actions)
lazy val path2d: Val[Path2D] = Val(createPath2d(ui.ratio))
override def draw(context: Context, x: Double, y: Double, scaleX: Double, scaleY: Double): Unit = {
actions.foreach(_.draw(context, x, y, scaleX, scaleY))
}
def modify(mx: Double => Double, my: Double => Double): Path = {
val updated = actions.map {
case CurveTo(x1, y1, x2, y2, x, y) => CurveTo(mx(x1), my(y1), mx(x2), my(y2), mx(x), my(y))
case LineTo(x, y) => LineTo(mx(x), my(y))
case MoveTo(x, y) => MoveTo(mx(x), my(y))
case QuadraticCurveTo(x1, y1, x, y) => QuadraticCurveTo(mx(x1), my(y1), mx(x), my(y))
case Rectangle(x, y, width, height, begin, close) => Rectangle(mx(x), my(y), mx(width), my(height), begin, close)
case BeginPath => BeginPath
case ClosePath => ClosePath
case action => throw new RuntimeException(s"Unsupported PathAction: $action.")
}
Path(updated)
}
private def createPath2d(ratio: => Double): Path2D = {
val p = new Path2D
def s(v: Double): Double = v * ratio
actions.foreach {
case CurveTo(x1, y1, x2, y2, x, y) => p.bezierCurveTo(s(x1), s(y1), s(x2), s(y2), s(x), s(y))
case LineTo(x, y) => p.lineTo(s(x), s(y))
case MoveTo(x, y) => p.moveTo(s(x), s(y))
case QuadraticCurveTo(x1, y1, x, y) => p.quadraticCurveTo(s(x1), s(y1), s(x), s(y))
case BeginPath => // No begin in path2d
case ClosePath => p.closePath()
case action => throw new RuntimeException(s"Unsupported PathAction: $action.")
}
p
}
def scale(x: Double = 1.0, y: Double = 1.0): Path = modify(_ * x, _ * y)
def shift(adjustX: Double, adjustY: Double): Path = modify(_ + adjustX, _ + adjustY)
def fix(): Path = modify(Path.fix, Path.fix)
override def withAction(action: PathAction): Path = Path(actions ::: List(action))
def withoutOpen(): Path = Path(actions.filterNot(_ == BeginPath))
def withoutClose(): Path = Path(actions.filterNot(_ == ClosePath))
override def toString: String = s"Path(${actions.mkString(", ")})"
}
object Path extends PathBuilder {
lazy val empty: Path = Path(Nil)
private lazy val actionCharacters = Set('M', 'L', 'C', 'Q', 'Z', 'H', 'V')
private lazy val MoveRegex = """M[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)""".r
private lazy val LineRegex = """L[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)""".r
private lazy val HorizontalLineRegex = """H[ ]?([-]?[0-9.]+)""".r
private lazy val VerticalLineRegex = """V[ ]?([-]?[0-9.]+)""".r
private lazy val CurveRegex = """C[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)[ ]?([-]?[0-9.]+)""".r
private lazy val QuadraticRegex = """Q([- ]?[0-9.]+)([- ]?[0-9.]+)([- ]?[0-9.]+)([- ]?[0-9.]+)""".r
def apply(actions: PathAction*): Path = {
var list = actions.toList
if (list.head != BeginPath) {
list = BeginPath :: list
}
if (list.last != ClosePath) {
list = list ::: List(ClosePath)
}
Path(list)
}
def merge(paths: Path*): Path = Path(paths.flatMap(_.actions).toList)
def apply(pathString: String): Path = {
val b = new StringBuilder
val actions = ListBuffer.empty[PathAction]
var currentX = 0.0
var currentY = 0.0
def s2a(s: String): PathAction = s match {
case MoveRegex(x, y) => {
currentX = x.toDouble
currentY = y.toDouble
MoveTo(currentX, currentY)
}
case LineRegex(x, y) => {
currentX = x.toDouble
currentY = y.toDouble
LineTo(currentX, currentY)
}
case HorizontalLineRegex(x) => {
currentX = x.toDouble
LineTo(currentX, currentY)
}
case VerticalLineRegex(y) => {
currentY = y.toDouble
LineTo(currentX, currentY)
}
case CurveRegex(x1, y1, x2, y2, x, y) => {
currentX = x.toDouble
currentY = y.toDouble
CurveTo(x1.toDouble, y1.toDouble, x2.toDouble, y2.toDouble, currentX, currentY)
}
case QuadraticRegex(x1, y1, x, y) => {
currentX = x.toDouble
currentY = y.toDouble
QuadraticCurveTo(x1.toDouble, y1.toDouble, currentX, currentY)
}
case "Z" => ClosePath
case _ => throw new RuntimeException(s"Unknown action: [$s]")
}
pathString.replaceAll("[,]", " ").toUpperCase.foreach { c =>
if (actionCharacters.contains(c)) {
if (b.nonEmpty) {
actions += s2a(b.toString().trim)
}
b.clear()
}
b.append(c)
}
if (b.nonEmpty) {
actions += s2a(b.toString().trim)
}
Path(actions.toList)
}
def boundingBox(pathActions: List[PathAction]): BoundingBox = {
var minX = Double.MaxValue
var minY = Double.MaxValue
var maxX = Double.MinValue
var maxY = Double.MinValue
var cx = 0.0
var cy = 0.0
def adjustTo(newX: Double, newY: Double, oldX: Double = cx, oldY: Double = cy, updateCoordinates: Boolean = true): Unit = {
minX = math.min(oldX, math.min(minX, newX))
minY = math.min(oldY, math.min(minY, newY))
maxX = math.max(oldX, math.max(maxX, newX))
maxY = math.max(oldY, math.max(maxY, newY))
cx = newX
cy = newY
}
pathActions.foreach {
case BeginPath => // Nothing
case ClosePath => // Nothing
case CurveTo(_, _, _, _, x, y) => adjustTo(x, y)
case LineTo(x, y) => adjustTo(x, y)
case MoveTo(x, y) => {
cx = x
cy = y
}
case QuadraticCurveTo(_, _, x, y) => adjustTo(x, y)
case Rectangle(x, y, width, height, _, _) => adjustTo(x + width, y + height, updateCoordinates = false)
case RoundedRectangle(x, y, width, height, _) => adjustTo(x + width, y + height, updateCoordinates = false)
}
BoundingBox(minX, minY, maxX, maxY)
}
override def withAction(action: PathAction): Path = Path(List(action))
def fix(value: Double): Double = value
} | outr/youi | ui/js/src/main/scala/io/youi/path/Path.scala | Scala | mit | 6,079 |
package akka.rtcweb.protocol.scodec
import akka.rtcweb.CodecSpec
import org.specs2.mutable.Specification
import scodec.Attempt.Successful
import scodec.{ Attempt, DecodeResult }
import scodec.bits.BitVector.{ empty => emptyVector }
import scodec.bits.BitVector._
import scodec.bits._
import scodec.codecs._
import shapeless.HNil
import scala.concurrent.duration._
class SCodecContribSpec extends Specification with CodecSpec {
import SCodecContrib._
"blockalignBits" should {
"decode an uint8 followed by a two bit padding" in {
val res = blockalignBits(uint8, 9).decode(uint8.encode(1).require ++ low(1))
res shouldEqual Successful(DecodeResult(1, emptyVector))
}
"encode an uint8 followed by a two bit padding" in {
val res = blockalignBits(uint8 :: uint8, 9).encode(1 :: 1 :: HNil)
res shouldEqual Successful(low(7) ++ high(1) ++ low(7) ++ high(1) ++ low(2))
}
}
"blockalignBytes" should {
"encode" in {
val res = blockalignBytes(uint8 :: uint8 :: uint8, 2).encode(255 :: 255 :: 255 :: HNil)
res shouldEqual Successful(high(24) ++ low(8))
}
"decode" in {
blockalignBytes(uint8, 2).decode(uint8.encode(1).require ++ high(9)) shouldEqual
Successful(DecodeResult(1, high(1)))
}
}
"duration" should {
"encode 42 Minutes" in {
duration(uint8, concurrent.duration.MINUTES).encode(42 minutes) shouldEqual
Successful(uint8.encode(42).require)
}
"decode 42 Minutes" in {
duration(uint8, concurrent.duration.MINUTES).decode(uint8.encode(42).require) shouldEqual
Successful(DecodeResult(42 minutes, BitVector.empty))
}
}
"multiVariableSize" should {
"decode" in {
multiVariableSizes(uint8 :: uint8 :: HNil, ascii :: ascii :: HNil).decode(hex"0102414142".bits) shouldEqual
Successful(DecodeResult("A" :: "AB" :: HNil, BitVector.empty))
}
"encode" in {
//multiVariableSizes(uint8 :: uint8 :: HNil, ascii :: ascii :: HNil).encode("A" :: "AB" :: HNil) shouldEqual
// Successful(hex"0102414142".bits)
todo
}
}
"sizeBounded" should {
"not be creatable for codecs with known bigger upper size bounds" in {
boundedSize(1, scodec.codecs.bits(100)) should throwA[IllegalArgumentException]
}
"sizeBound a string" in {
roundtrip(boundedSizeBytes(4, utf8), "1234")
}
"do not decode too long things" in {
boundedSizeBytes(2, utf8).decode(utf8.encode("abc").require) should beEqualTo(Attempt.successful(DecodeResult("ab", hex"0x63".bits)))
}
"do decode shorter things" in {
boundedSizeBytes(4, utf8).decode(utf8.encode("abc").require) should beEqualTo(Attempt.successful(DecodeResult("abc", BitVector.empty)))
}
}
}
| danielwegener/akka-rtcweb | src/test/scala/akka/rtcweb/protocol/scodec/SCodecContribSpec.scala | Scala | apache-2.0 | 2,765 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import play.api.data.Form
import play.api.data.Forms.single
object PartOfOrganisationForm extends RequiredBooleanForm {
override val errorMsg = "validation.partOfOrganisation.missing"
val yesNo: String = "value"
val form: Form[Boolean] = Form(
single(yesNo -> requiredBoolean)
)
}
| hmrc/vat-registration-frontend | app/forms/PartOfOrganisationForm.scala | Scala | apache-2.0 | 917 |
package scommons.client.showcase.table
import scommons.react._
import scommons.react.test.TestSpec
import scommons.react.test.util.ShallowRendererUtils
class TablePanelDemoSpec extends TestSpec with ShallowRendererUtils {
it should "render component" in {
//given
val component = <(TablePanelDemo())()()
//when
val result = shallowRender(component)
//then
assertNativeComponent(result,
<.>()(
<.h2()("TablePanel"),
<.p()("Demonstrates table functionality"),
<.h3()("Simple TablePanel"),
<(SimpleTablePanel()).empty,
<.h3()("TablePanel with custom cell renderer"),
<(CustomTablePanel()).empty
)
)
}
}
| viktor-podzigun/scommons | showcase/src/test/scala/scommons/client/showcase/table/TablePanelDemoSpec.scala | Scala | apache-2.0 | 707 |
/*
Copyright (C) 2012-2013 the original author or authors.
See the LICENSE.txt file distributed with this work for additional
information regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalaopts
trait ParserStrategy {
def validateOptions(options: CommandLineOptionMap): Boolean
/**
* Takes a stream of arguments and produces a stream that represents the arguments in a
* standard format.
* @param application_arguments
* @return
*/
def processOptions(application_arguments: Stream[String], options: CommandLineOptionMap): CommandLineOptionResults
}
| davidhoyt/scalaopts | src/main/scala/scalaopts/ParserStrategy.scala | Scala | apache-2.0 | 1,146 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.validation
import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xDirectorsBoxRetriever
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
trait DirectorsReportEnabledCalculator {
def calculateDirectorsReportEnabled(boxRetriever: Frs10xDirectorsBoxRetriever with FilingAttributesBoxValueRetriever): Boolean = {
val isCoHoFiling = boxRetriever.companiesHouseFiling().value
val isHmrcFiling = boxRetriever.hmrcFiling().value
val isMicroEntityFiling = boxRetriever.microEntityFiling().value
val answeredYesToCoHoDirectorsReportQuestion = boxRetriever.ac8021().orFalse
val answeredYesToHmrcDirectorsReportQuestion = boxRetriever.ac8023().orFalse
(isCoHoFiling, isHmrcFiling) match {
case (true, false) => answeredYesToCoHoDirectorsReportQuestion
case _ => !isMicroEntityFiling || answeredYesToHmrcDirectorsReportQuestion
}
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/validation/DirectorsReportEnabledCalculator.scala | Scala | apache-2.0 | 1,532 |
object Main {
def test[T] = ?//test // error
} | lampepfl/dotty | tests/neg/i10268.scala | Scala | apache-2.0 | 48 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import java.io._
object GenerateInput {
def main(args : Array[String]) {
if (args.length != 5) {
println("usage: GenerateInput output-dir n-output-files " +
"n-vectors-per-file avg-vec-length vec-length-range")
return;
}
val outputDir = args(0)
val nOutputFiles = args(1).toInt
val pointsPerFile = args(2).toInt
val avgLength = args(3).toInt
val lengthRange = args(4).toInt
assert(lengthRange < avgLength)
val r = new scala.util.Random
val range = 100.0
for (f <- 0 until nOutputFiles) {
val writer = new PrintWriter(new File(outputDir + "/input." + f))
for (p <- 0 until pointsPerFile) {
var length = r.nextInt(lengthRange * 2)
length = length - lengthRange
length = length + avgLength
assert(length > 0)
for (i <- 0 until length) {
writer.write(r.nextDouble * range + " ")
}
writer.write("\n")
}
writer.close
}
}
}
| agrippa/spark-swat | functional-tests/two-dense-vector-input/src/main/scala/dense-vector-input/GenerateInput.scala | Scala | bsd-3-clause | 2,654 |
/*
* BasicRendering.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite
import java.awt.geom.{AffineTransform, Area, Ellipse2D}
/** Paint support for grapheme obj views. */
trait GraphemeRendering extends BasicRendering {
def ellipse1 : Ellipse2D
def transform1: AffineTransform
def area1 : Area
def area2 : Area
def ttMoveState : GraphemeTool.Move
} | Sciss/Mellite | core/src/main/scala/de/sciss/mellite/GraphemeRendering.scala | Scala | agpl-3.0 | 617 |
package com.twitter.inject.thrift.modules
import com.twitter.inject.TwitterModule
import com.twitter.inject.thrift.AndThenService
import com.twitter.inject.thrift.internal.DefaultAndThenServiceImpl
object AndThenServiceModule extends TwitterModule {
override def configure: Unit = {
bindSingleton[AndThenService].to[DefaultAndThenServiceImpl]
}
} | syamantm/finatra | inject/inject-thrift-client/src/main/scala/com/twitter/inject/thrift/modules/AndThenServiceModule.scala | Scala | apache-2.0 | 357 |
package com.github.chengpohi.indexer
import akka.actor.Actor
import com.github.chengpohi.model.IndexPage
import org.slf4j.LoggerFactory
/**
* Created by xiachen on 1/17/15.
*/
class PageIndexerService extends Actor {
lazy val LOG = LoggerFactory.getLogger(getClass.getName)
val htmlPageIndexer = new impl.HtmlPageIndexer
def receive: Receive = {
case page: IndexPage => htmlPageIndexer.asyncIndex(page)
}
}
| chengpohi/secer | indexer/src/main/scala/com/github/chengpohi/indexer/PageIndexerService.scala | Scala | apache-2.0 | 425 |
package org.jetbrains.plugins.scala.refactoring.introduceParameter
import java.io.File
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.fileEditor.{FileEditorManager, OpenFileDescriptor}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.PsiDocumentManager
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.extensions.executeWriteActionCommand
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunctionDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.changeInfo.ScalaChangeInfo
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.{ScalaChangeSignatureProcessor, ScalaMethodDescriptor, ScalaParameterInfo}
import org.jetbrains.plugins.scala.lang.refactoring.introduceParameter.ScalaIntroduceParameterHandler
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil.{afterExpressionChoosing, trimSpacesAndComments}
import scala.annotation.nowarn
/**
* @author Alexander Podkhalyuzin
*/
@nowarn("msg=ScalaLightPlatformCodeInsightTestCaseAdapter")
abstract class IntroduceParameterTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
protected def folderPath = baseRootPath + "introduceParameter/"
private val startMarker = "/*start*/"
private val endMarker = "/*end*/"
private val allMarker = "//all = "
private val nameMarker = "//name = "
private val defaultMarker = "//default = "
private val constructorMarker = "//constructor = "
protected def doTest(): Unit = {
import _root_.org.junit.Assert._
implicit val project: Project = getProjectAdapter
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val startOffset = fileText.indexOf(startMarker) + startMarker.length
assert(startOffset != -1 + startMarker.length,
"Not specified start marker in test case. Use /*start*/ in scala file for this.")
val endOffset = fileText.indexOf(endMarker)
assert(endOffset != -1, "Not specified end marker in test case. Use /*end*/ in scala file for this.")
val fileEditorManager = FileEditorManager.getInstance(project)
implicit val editor: Editor = fileEditorManager
.openTextEditor(new OpenFileDescriptor(project, getVFileAdapter, startOffset), false)
var res: String = null
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
//getting settings
def getSetting(marker: String, default: String): String = {
val offset = fileText.indexOf(marker)
if (offset == -1) default
else {
val comment = scalaFile.findElementAt(offset)
comment.getText.substring(marker.length)
}
}
val replaceAllOccurrences = getSetting(allMarker, "true").toBoolean
val paramName = getSetting(nameMarker, "param")
val isDefaultParam = getSetting(defaultMarker, "false").toBoolean
val toPrimaryConstructor = getSetting(constructorMarker, "false").toBoolean
//start to inline
try {
executeWriteActionCommand("Test") {
editor.getSelectionModel.setSelection(startOffset, endOffset)
afterExpressionChoosing(scalaFile, "Introduce Variable") {
trimSpacesAndComments(editor, scalaFile)
PsiDocumentManager.getInstance(project).commitAllDocuments()
val handler = new ScalaIntroduceParameterHandler()
val (exprWithTypes, elems) = handler.selectedElementsInFile(scalaFile).getOrElse(return)
val (methodLike: ScMethodLike, returnType) =
if (toPrimaryConstructor)
(PsiTreeUtil.getContextOfType(elems.head, true, classOf[ScClass]).constructor.get, Any)
else {
val fun = PsiTreeUtil.getContextOfType(elems.head, true, classOf[ScFunctionDefinition])
(fun, fun.returnType.getOrAny)
}
val collectedData = handler.collectData(exprWithTypes, elems, methodLike, editor)
assert(collectedData.isDefined, "Could not collect data for introduce parameter")
val data = collectedData.get.copy(paramName = paramName, replaceAll = replaceAllOccurrences)
val paramInfo = new ScalaParameterInfo(data.paramName, -1, data.tp, project, false, false, data.defaultArg, isIntroducedParameter = true)
val descriptor: ScalaMethodDescriptor = handler.createMethodDescriptor(data.methodToSearchFor, paramInfo)
val changeInfo = ScalaChangeInfo(descriptor.getVisibility, data.methodToSearchFor, descriptor.getName, returnType,
descriptor.parameters, isDefaultParam)
changeInfo.introducedParameterData = Some(data)
new ScalaChangeSignatureProcessor(changeInfo).run()
}
}
res = scalaFile.getText.substring(0, lastPsi.getTextOffset).trim
}
catch {
case e: Exception => assert(assertion = false, message = e.getMessage + "\\n" + e.getStackTrace)
}
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ =>
assertTrue("Test result must be in last comment statement.", false)
""
}
assertEquals(output, res.trim)
}
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/refactoring/introduceParameter/IntroduceParameterTestBase.scala | Scala | apache-2.0 | 6,341 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.repl
import com.spotify.scio.{registerSysProps, SysProp}
@registerSysProps
object ScioReplSysProps {
val Key: SysProp = SysProp("key", "")
val MaxPrintString: SysProp =
SysProp("scala.repl.maxprintstring", "Max characters to display in REPL before truncation")
}
| spotify/scio | scio-repl/src/main/scala/com/spotify/scio/repl/ScioReplSysProps.scala | Scala | apache-2.0 | 899 |
/**
* Copyright 2012-2013 StackMob
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stackmob.scaliak.tests.util
import org.mockito.ArgumentMatcher
class MockitoArgumentExtractor[T] extends ArgumentMatcher[T] {
var argument: Option[T] = None
def matches(arg: AnyRef): Boolean = {
argument = Option(arg) map { _.asInstanceOf[T] }
true
}
} | stackmob/scaliak | src/test/scala/com/stackmob/scaliak/tests/util/MockitoArgumentExtractor.scala | Scala | apache-2.0 | 876 |
package org.apache.spark.sql.cassandra
import org.scalatest.{FlatSpec, Matchers}
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.types.{IntType, TimeUUIDType}
class PredicatePushDownSpec extends FlatSpec with Matchers {
// We don't want this test to rely on any Spark code,
// so we're using our own Filters
trait Filter
case class EqFilter(columnName: String) extends Filter
case class InFilter(columnName: String) extends Filter
case class LtFilter(columnName: String) extends Filter
case class GtFilter(columnName: String) extends Filter
case object UnsupportedFilter extends Filter
implicit object FilterOps extends PredicateOps[Filter] {
override def columnName(p: Filter) = p match {
case EqFilter(name) => name
case InFilter(name) => name
case LtFilter(name) => name
case GtFilter(name) => name
case UnsupportedFilter => throw new IllegalArgumentException("Unsupported predicate")
}
override def isRangePredicate(p: Filter) = p.isInstanceOf[LtFilter] || p.isInstanceOf[GtFilter]
override def isSingleColumnPredicate(p: Filter) = p != UnsupportedFilter
override def isEqualToPredicate(p: Filter) = p.isInstanceOf[EqFilter]
override def isInPredicate(p: Filter) = p.isInstanceOf[InFilter]
}
val pk1 = ColumnDef("pk1", PartitionKeyColumn, IntType)
val pk2 = ColumnDef("pk2", PartitionKeyColumn, IntType)
val c1 = ColumnDef("c1", ClusteringColumn(0), IntType)
val c2 = ColumnDef("c2", ClusteringColumn(1), IntType)
val c3 = ColumnDef("c3", ClusteringColumn(2), IntType)
val i1 = ColumnDef("i1", RegularColumn, IntType)
val i2 = ColumnDef("i2", RegularColumn, IntType)
val r1 = ColumnDef("r1", RegularColumn, IntType)
val r2 = ColumnDef("r2", RegularColumn, IntType)
val t1 = ColumnDef("t1", RegularColumn, TimeUUIDType)
val timeUUIDc1 = ColumnDef("c1", ClusteringColumn(0), TimeUUIDType)
val table = TableDef(
keyspaceName = "test",
tableName = "test",
partitionKey = Seq(pk1, pk2),
clusteringColumns = Seq(c1, c2, c3),
regularColumns = Seq(i1, i2, r1, r2),
indexes = Seq(
IndexDef("DummyIndex", "i1", "IndexOne", Map.empty),
IndexDef("DummyIndex", "i2", "IndexTwo", Map.empty))
)
val timeUUIDTable = TableDef(
keyspaceName = "test",
tableName = "uuidtab",
partitionKey = Seq(pk1, pk2),
clusteringColumns = Seq(timeUUIDc1),
regularColumns = Seq(i1, i2, r1, r2, t1)
)
"BasicCassandraPredicatePushDown" should "push down all equality predicates restricting partition key columns" in {
val f1 = EqFilter("pk1")
val f2 = EqFilter("pk2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain allOf(f1, f2)
ppd.predicatesToPreserve shouldBe empty
}
it should " break if the user tries to use a TimeUUID on a fully unhandled predicate" in {
val f1 = GtFilter("t1")
val ex = intercept[IllegalArgumentException] {
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), timeUUIDTable)
}
}
it should " work if the user tries to use a TimeUUID on a fully handled predicate" in {
val f1 = GtFilter("c1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), timeUUIDTable)
ppd.predicatesToPushDown should contain (f1)
ppd.predicatesToPreserve shouldBe empty
}
it should " work if the user tries to use a TimeUUID column in a eq predicate" in {
val f1 = EqFilter("c1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), timeUUIDTable)
ppd.predicatesToPushDown should contain (f1)
ppd.predicatesToPreserve shouldBe empty
}
it should "not push down a partition key predicate for a part of the partition key" in {
val f1 = EqFilter("pk1")
val ppd1 = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd1.predicatesToPushDown shouldBe empty
ppd1.predicatesToPreserve should contain(f1)
val f2 = EqFilter("pk2")
val ppd2 = new BasicCassandraPredicatePushDown(Set[Filter](f2), table)
ppd2.predicatesToPushDown shouldBe empty
ppd2.predicatesToPreserve should contain(f2)
}
it should "not push down a range partition key predicate" in {
val f1 = EqFilter("pk1")
val f2 = LtFilter("pk2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain allOf(f1, f2)
}
it should "push down an IN partition key predicate on the last partition key column" in {
val f1 = EqFilter("pk1")
val f2 = InFilter("pk2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain allOf(f1, f2)
ppd.predicatesToPreserve shouldBe empty
}
it should "not push down an IN partition key predicate on the non-last partition key column" in {
val f1 = InFilter("pk1")
val f2 = EqFilter("pk2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain allOf(f1, f2)
}
it should "push down the first clustering column predicate" in {
val f1 = EqFilter("c1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd.predicatesToPushDown should contain only f1
ppd.predicatesToPreserve shouldBe empty
}
it should "push down the first and the second clustering column predicate" in {
val f1 = EqFilter("c1")
val f2 = LtFilter("c2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain only(f1, f2)
ppd.predicatesToPreserve shouldBe empty
}
it should "push down restrictions on only the initial clustering columns" in {
val f1 = EqFilter("c1")
val f2 = EqFilter("c3")
val ppd1 = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd1.predicatesToPushDown should contain only f1
ppd1.predicatesToPreserve should contain only f2
val ppd2 = new BasicCassandraPredicatePushDown(Set[Filter](f2), table)
ppd2.predicatesToPushDown shouldBe empty
ppd2.predicatesToPreserve should contain only f2
}
it should "push down only one range predicate restricting the first clustering column, " +
"if there are more range predicates on different clustering columns" in {
val f1 = LtFilter("c1")
val f2 = LtFilter("c2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain only f1
ppd.predicatesToPreserve should contain only f2
}
it should "push down multiple range predicates for the same clustering column" in {
val f1 = LtFilter("c1")
val f2 = GtFilter("c1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain allOf (f1, f2)
ppd.predicatesToPreserve shouldBe empty
}
it should "push down clustering column predicates when the last clustering column is restricted by IN" in {
val f1 = EqFilter("c1")
val f2 = EqFilter("c2")
val f3 = InFilter("c3")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2, f3), table)
ppd.predicatesToPushDown should contain only(f1, f2, f3)
ppd.predicatesToPreserve shouldBe empty
}
it should "stop pushing down clustering column predicates on the first range predicate" in {
val f1 = EqFilter("c1")
val f2 = LtFilter("c2")
val f3 = EqFilter("c3")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2, f3), table)
ppd.predicatesToPushDown should contain only(f1, f2)
ppd.predicatesToPreserve should contain only f3
}
it should "not push down IN restriction on non-last column" in {
val f1 = EqFilter("c1")
val f2 = InFilter("c2")
val f3 = EqFilter("c3")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2, f3), table)
ppd.predicatesToPushDown should contain only f1
ppd.predicatesToPreserve should contain only (f2, f3)
}
it should "not push down any clustering column predicates, if the first clustering column is missing" in {
val f1 = EqFilter("c2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain only f1
}
it should "push down equality predicates on regular indexed columns" in {
val f1 = EqFilter("i1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd.predicatesToPushDown should contain only f1
ppd.predicatesToPreserve shouldBe empty
}
it should "not push down range predicates on regular indexed columns" in {
val f1 = LtFilter("i1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain only f1
}
it should "not push down IN predicates on regular indexed columns" in {
val f1 = InFilter("i1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain only f1
}
it should "push down predicates on regular non-indexed and indexed columns" in {
val f1 = EqFilter("r1")
val f2 = EqFilter("r2")
val f3 = EqFilter("i1")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2, f3), table)
ppd.predicatesToPushDown should contain allOf(f1, f2, f3)
ppd.predicatesToPreserve shouldBe empty
}
it should "not push down predicates on regular non-indexed columns if indexed ones are not included" in {
val f1 = EqFilter("r1")
val f2 = EqFilter("r2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown shouldBe empty
ppd.predicatesToPreserve should contain allOf(f1, f2)
}
it should "prefer to push down equality predicates over range predicates" in {
val f1 = EqFilter("c1")
val f2 = EqFilter("c2")
val f3 = LtFilter("c2")
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2, f3), table)
ppd.predicatesToPushDown should contain only(f1, f2)
ppd.predicatesToPreserve should contain only f3
}
it should "not push down unsupported predicates" in {
val f1 = EqFilter("i1")
val f2 = UnsupportedFilter
val ppd = new BasicCassandraPredicatePushDown(Set[Filter](f1, f2), table)
ppd.predicatesToPushDown should contain only f1
ppd.predicatesToPreserve should contain only f2
}
}
| christobill/spark-cassandra-connector | spark-cassandra-connector/src/test/scala/org/apache/spark/sql/cassandra/PredicatePushDownSpec.scala | Scala | apache-2.0 | 10,643 |
package logcluster.alg
import java.io.File
import java.io.PrintStream
import java.io.FileOutputStream
import logcluster.util.createDirOrCheckEmpty
import java.io.IOException
import scala.collection.mutable
import com.typesafe.scalalogging.StrictLogging
import org.joda.time.DateTime
import logcluster.persistence.ErrorPersister._
case class CassReporter(application: String) extends Reporter with StrictLogging {
override def addToCluster(clusterId: String, entry: LogEntry) {
persist(entry.owner, clusterId, entry.original)
}
}
| despegar/logcluster | src/main/scala/logcluster/alg/CassReporter.scala | Scala | bsd-2-clause | 540 |
package org.jetbrains.plugins.hocon.ref
import com.intellij.openapi.util.TextRange
import com.intellij.psi.{ElementManipulators, PsiElement, PsiReference}
import org.jetbrains.plugins.hocon.psi.HKey
/**
* @author ghik
*/
class HKeySelfReference(key: HKey) extends PsiReference {
override def getVariants: Array[AnyRef] = Array.empty
def getCanonicalText: String = key.stringValue
def getElement: PsiElement = key
def isReferenceTo(element: PsiElement): Boolean =
element == resolve()
def bindToElement(element: PsiElement): PsiElement = null
def handleElementRename(newElementName: String): PsiElement =
ElementManipulators.getManipulator(key).handleContentChange(key, newElementName)
def isSoft = true
def getRangeInElement: TextRange = ElementManipulators.getValueTextRange(key)
def resolve(): PsiElement = key
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/hocon/ref/HKeySelfReference.scala | Scala | apache-2.0 | 855 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel
package scala
package dsl
import org.apache.camel.ContextTestSupport
import org.apache.camel.component.mock.MockEndpoint
import builder.{RouteBuilder,RouteBuilderSupport}
import _root_.scala.collection.mutable.ArrayBuffer
abstract class ScalaTestSupport extends ContextTestSupport with RouteBuilderSupport with Preamble {
implicit def stringToUri(uri:String) = new RichTestUri(uri, this)
implicit def mockWrapper(endpoint: MockEndpoint) = new RichMockEndpoint(endpoint)
val endpoints = new ArrayBuffer[MockEndpoint]()
def assert(uri: String) = getMockEndpoint(uri).assertIsSatisfied
protected[scala] def getTemplate() = template
protected[scala] def mock(uri: String) = {
val mock = getMockEndpoint(uri)
endpoints += mock
mock
}
def in(message: Any) : Exchange = createExchangeWithBody(message)
val builder : RouteBuilder
override protected def createRouteBuilder = builder
override def setUp = {
super.setUp
endpoints.foreach(_.reset())
}
def test(block : => Unit) = {
block
endpoints.foreach(_.assertIsSatisfied)
}
override def createJndiContext = {
jndi match {
case Some(map) => {
val context = super.createJndiContext
map.foreach({case (key, value) => context.bind(key, value) })
context
}
case None => super.createJndiContext
}
}
def jndi : Option[Map[String, Any]] = None
}
| chicagozer/rheosoft | components/camel-scala/src/test/scala/org/apache/camel/scala/dsl/ScalaTestSupport.scala | Scala | apache-2.0 | 2,246 |
// scalac: -Xfatal-warnings
//
object Test {
0 match {
case _ => 0 == ""
}
}
| scala/scala | test/files/neg/t7756b.scala | Scala | apache-2.0 | 85 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.data
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.tools.data.AccumuloManagePartitionsCommand._
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
import org.locationtech.geomesa.tools.data.ManagePartitionsCommand
import org.locationtech.geomesa.tools.data.ManagePartitionsCommand._
import org.locationtech.geomesa.tools.{OptionalForceParam, RequiredTypeNameParam}
class AccumuloManagePartitionsCommand extends ManagePartitionsCommand {
override protected def list: AccumuloListPartitionsCommand = new AccumuloListPartitionsCommand
override protected def add: AccumuloAddPartitionsCommand = new AccumuloAddPartitionsCommand
override protected def adopt: AccumuloAdoptPartitionCommand = new AccumuloAdoptPartitionCommand
override protected def delete: AccumuloDeletePartitionsCommand = new AccumuloDeletePartitionsCommand
override protected def generate: AccumuloNamePartitionsCommand = new AccumuloNamePartitionsCommand
}
object AccumuloManagePartitionsCommand {
class AccumuloListPartitionsCommand extends AccumuloDataStoreCommand with ListPartitionsCommand[AccumuloDataStore] {
override val params: AccumuloListPartitionsParams = new AccumuloListPartitionsParams
}
class AccumuloAddPartitionsCommand extends AccumuloDataStoreCommand with AddPartitionsCommand[AccumuloDataStore] {
override val params: AccumuloAddPartitionsParams = new AccumuloAddPartitionsParams
}
class AccumuloAdoptPartitionCommand extends AccumuloDataStoreCommand with AdoptPartitionCommand[AccumuloDataStore] {
override val params: AccumuloAdoptPartitionParams = new AccumuloAdoptPartitionParams
}
class AccumuloDeletePartitionsCommand extends AccumuloDataStoreCommand with DeletePartitionsCommand[AccumuloDataStore] {
override val params: AccumuloDeletePartitionsParams = new AccumuloDeletePartitionsParams
}
class AccumuloNamePartitionsCommand extends AccumuloDataStoreCommand with NamePartitionsCommand[AccumuloDataStore] {
override val params: AccumuloNamePartitionsParams = new AccumuloNamePartitionsParams
}
@Parameters(commandDescription = "List the current partitions for a GeoMesa schema")
class AccumuloListPartitionsParams extends AccumuloDataStoreParams with RequiredTypeNameParam
@Parameters(commandDescription = "Configure new partitions for a GeoMesa schema")
class AccumuloAddPartitionsParams extends AccumuloDataStoreParams with PartitionsParam
@Parameters(commandDescription = "Adopt existing tables as a new partition for a GeoMesa schema")
class AccumuloAdoptPartitionParams extends AccumuloDataStoreParams with AdoptPartitionParam
@Parameters(commandDescription = "Delete existing partitions for a GeoMesa schema")
class AccumuloDeletePartitionsParams extends AccumuloDataStoreParams with PartitionsParam with OptionalForceParam
@Parameters(commandDescription = "Generate partition names from input values")
class AccumuloNamePartitionsParams extends AccumuloDataStoreParams with NamePartitionsParam
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/data/AccumuloManagePartitionsCommand.scala | Scala | apache-2.0 | 3,640 |
package fos
abstract class TypeInferencers {
import Type._
type Env = List[(String, TypeScheme)]
case class TypeError(msg: String) extends Exception(msg)
/** Lookup variable <code>name</code> in the given environment. */
def lookup(env: Env, name: String): TypeScheme = env match {
case Nil => null
case (n, tp) :: env1 => if (n == name) tp else lookup(env1, name)
}
/** Turn a syntactic type (given explicitly) into a proper type. */
def toType(s: TypeTree): Type = s match {
case BoolType => TypeBool
case NatType => TypeNat
case FunType(t1, t2) => TypeFun(toType(t1), toType(t2))
}
def typeOf(t: Term): Type;
}
| sana/WorkAtEPFL | HindleyMillerTypeInference/src/fos/TypeInferencers.scala | Scala | gpl-3.0 | 661 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import org.apache.spark.MapOutputStatistics
import org.apache.spark.sql.catalyst.plans.logical.{HintInfo, Join, LogicalPlan, NO_BROADCAST_HASH}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
/**
* This optimization rule detects a join child that has a high ratio of empty partitions and
* adds a no-broadcast-hash-join hint to avoid it being broadcast.
*/
case class DemoteBroadcastHashJoin(conf: SQLConf) extends Rule[LogicalPlan] {
private def shouldDemote(plan: LogicalPlan): Boolean = plan match {
case LogicalQueryStage(_, stage: QueryStageExec)
if stage.resultOption.isDefined &&
stage.resultOption.get.isInstanceOf[MapOutputStatistics] =>
val mapOutputStatistics = stage.resultOption.get.asInstanceOf[MapOutputStatistics]
val partitionCnt = mapOutputStatistics.bytesByPartitionId.length
val nonZeroCnt = mapOutputStatistics.bytesByPartitionId.count(_ > 0)
partitionCnt > 0 && nonZeroCnt > 0 &&
(nonZeroCnt * 1.0 / partitionCnt) < conf.nonEmptyPartitionRatioForBroadcastJoin
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformDown {
case j @ Join(left, right, _, _, hint) =>
var newHint = hint
if (!hint.leftHint.exists(_.strategy.isDefined) && shouldDemote(left)) {
newHint = newHint.copy(leftHint =
Some(hint.leftHint.getOrElse(HintInfo()).copy(strategy = Some(NO_BROADCAST_HASH))))
}
if (!hint.rightHint.exists(_.strategy.isDefined) && shouldDemote(right)) {
newHint = newHint.copy(rightHint =
Some(hint.rightHint.getOrElse(HintInfo()).copy(strategy = Some(NO_BROADCAST_HASH))))
}
if (newHint.ne(hint)) {
j.copy(hint = newHint)
} else {
j
}
}
}
| caneGuy/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/DemoteBroadcastHashJoin.scala | Scala | apache-2.0 | 2,648 |
package scaladex.dom
import scala.scalajs.js
import org.scalajs.dom.html.Element
@js.native // https://developer.mozilla.org/en-US/docs/Web/API/IntersectionObserverEntry
trait IntersectionObserverEntry extends js.Object {
def intersectionRatio: Double = js.native
def target: Element = js.native
}
| scalacenter/scaladex | modules/webclient/src/main/scala/scaladex/dom/IntersectionObserverEntry.scala | Scala | bsd-3-clause | 305 |
package com.advancedspark.streaming.rating.ml.incremental.model
import edu.berkeley.cs.amplab.spark.indexedrdd.IndexedRDD
import edu.berkeley.cs.amplab.spark.indexedrdd.IndexedRDD._
case class StreamingLatentMatrixFactorizationModel(
override val rank: Int,
override val userFactors: IndexedRDD[Long, LatentFactor], // bias and the user row
override val itemFactors: IndexedRDD[Long, LatentFactor], // bias and the item row
override val globalBias: Float,
observedExamples: Long,
override val minRating: Float,
override val maxRating: Float)
extends LatentMatrixFactorizationModel(rank, userFactors, itemFactors,
globalBias, minRating, maxRating)
| fluxcapacitor/source.ml | apachespark.ml/demos/streaming/src/main/scala/com/advancedspark/streaming/rating/ml/incremental/model/StreamingLatentMatrixFactorizationModel.scala | Scala | apache-2.0 | 683 |
//package io.skysail.core.app
//
//import io.skysail.api.text.TranslationRenderService
//import io.skysail.core.Constants
//import org.slf4j.LoggerFactory
//
//case class ScalaTranslationRenderServiceHolder(val service: TranslationRenderService, props: Map[String, String]) {
//
// val log = LoggerFactory.getLogger(this.getClass())
//
// def getServiceRanking(): Int = {
// val serviceRanking = props.get(org.osgi.framework.Constants.SERVICE_RANKING)
// if (serviceRanking.isDefined) serviceRankingAsInt(serviceRanking) else 0
// }
//
// def serviceRankingAsInt(sr: Option[String]): Int = {
// try {
// return Integer.valueOf(sr.get);
// } catch {
// case e: Throwable => log.error(e.getMessage(), e)
// }
// 0
// }
//} | evandor/skysail-core | skysail.core/src/io/skysail/core/app/TranslationRenderServiceHolder.scala | Scala | apache-2.0 | 753 |
/*
* ******************************************************************************
* Copyright 2012-2013 SpotRight
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ******************************************************************************
*/
package com.spotright.polidoro
package model
import java.nio.ByteBuffer
import com.netflix.astyanax.serializers.AbstractSerializer
import com.netflix.astyanax.Keyspace
/**
* Batchable Operations
*
* Operations can be acted on by classes and objects that mix in [[com.spotright.polidoro.session.BatchOps]].
*/
abstract sealed class Operation {
type Ktype
type Ntype
val rpath: RowPathish[Ktype]
lazy val keyspace: Keyspace = rpath.keyspace
lazy val cfname: String = rpath.cfname
lazy val rowkey: Ktype = rpath.rowkey
val keySD: AbstractSerializer[Ktype]
val nameSD: AbstractSerializer[Ntype]
}
case class Insert[K: Manifest, N: Manifest, V: Manifest](col: Column[K,N,V]) extends Operation {
type Ktype = K
type Ntype = N
val rpath: RowPathish[Ktype] = col
val keySD = SerDes[K]
val nameSD = SerDes[N]
val valSD = SerDes[V]
}
case class IncrCounter[K: Manifest, N: Manifest](col: Column[K,N,Long]) extends Operation {
type Ktype = K
type Ntype = N
val rpath: RowPathish[Ktype] = col
val keySD = SerDes[K]
val nameSD = SerDes[N]
}
case class Delete[K: Manifest, N: Manifest](colpath: ColumnPathish[K,N]) extends Operation {
type Ktype = K
type Ntype = N
val rpath: RowPathish[Ktype] = colpath
val keySD = SerDes[K]
val nameSD = SerDes[N]
}
object Delete {
def apply[K: Manifest](rowpath: RowPath[K]): RowDelete[K, ByteBuffer] = RowDelete[K, ByteBuffer](rowpath)
}
case class RowDelete[K: Manifest, N: Manifest](rowpath: RowPathish[K]) extends Operation {
type Ktype = K
type Ntype = N
val rpath: RowPathish[Ktype] = rowpath
val keySD = SerDes[K]
val nameSD = SerDes[N]
/**
* Change the name type for the RowDelete Operation.
*/
def withNtype[T: Manifest]: RowDelete[K, T] = RowDelete[K, T](rowpath)
}
| SpotRight/Polidoro | src/main/scala/com/spotright/polidoro/model/Operation.scala | Scala | apache-2.0 | 2,585 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import java.util
import java.util.concurrent.locks.ReentrantReadWriteLock
import com.typesafe.scalalogging.Logger
import kafka.api.KAFKA_2_0_IV1
import kafka.network.RequestChannel.Session
import kafka.security.auth.SimpleAclAuthorizer.{VersionedAcls, NoAcls}
import kafka.server.KafkaConfig
import kafka.utils.CoreUtils.{inReadLock, inWriteLock}
import kafka.utils._
import kafka.zk.{AclChangeNotificationHandler, AclChangeSubscription, KafkaZkClient, ZkAclChangeStore, ZkAclStore, ZkVersion}
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.resource.PatternType
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.{SecurityUtils, Time}
import scala.collection.JavaConverters._
import scala.util.Random
object SimpleAclAuthorizer {
//optional override zookeeper cluster configuration where acls will be stored, if not specified acls will be stored in
//same zookeeper where all other kafka broker info is stored.
val ZkUrlProp = "authorizer.zookeeper.url"
val ZkConnectionTimeOutProp = "authorizer.zookeeper.connection.timeout.ms"
val ZkSessionTimeOutProp = "authorizer.zookeeper.session.timeout.ms"
val ZkMaxInFlightRequests = "authorizer.zookeeper.max.in.flight.requests"
//List of users that will be treated as super users and will have access to all the resources for all actions from all hosts, defaults to no super users.
val SuperUsersProp = "super.users"
//If set to true when no acls are found for a resource , authorizer allows access to everyone. Defaults to false.
val AllowEveryoneIfNoAclIsFoundProp = "allow.everyone.if.no.acl.found"
case class VersionedAcls(acls: Set[Acl], zkVersion: Int) {
def exists: Boolean = zkVersion != ZkVersion.UnknownVersion
}
val NoAcls = VersionedAcls(Set.empty, ZkVersion.UnknownVersion)
}
class SimpleAclAuthorizer extends Authorizer with Logging {
private val authorizerLogger = Logger("kafka.authorizer.logger")
private var superUsers = Set.empty[KafkaPrincipal]
private var shouldAllowEveryoneIfNoAclIsFound = false
private var zkClient: KafkaZkClient = _
private var aclChangeListeners: Iterable[AclChangeSubscription] = Iterable.empty
private var extendedAclSupport: Boolean = _
@volatile
private var aclCache = new scala.collection.immutable.TreeMap[Resource, VersionedAcls]()(ResourceOrdering)
private val lock = new ReentrantReadWriteLock()
// The maximum number of times we should try to update the resource acls in zookeeper before failing;
// This should never occur, but is a safeguard just in case.
protected[auth] var maxUpdateRetries = 10
private val retryBackoffMs = 100
private val retryBackoffJitterMs = 50
/**
* Guaranteed to be called before any authorize call is made.
*/
override def configure(javaConfigs: util.Map[String, _]) {
val configs = javaConfigs.asScala
val props = new java.util.Properties()
configs.foreach { case (key, value) => props.put(key, value.toString) }
superUsers = configs.get(SimpleAclAuthorizer.SuperUsersProp).collect {
case str: String if str.nonEmpty => str.split(";").map(s => SecurityUtils.parseKafkaPrincipal(s.trim)).toSet
}.getOrElse(Set.empty[KafkaPrincipal])
shouldAllowEveryoneIfNoAclIsFound = configs.get(SimpleAclAuthorizer.AllowEveryoneIfNoAclIsFoundProp).exists(_.toString.toBoolean)
// Use `KafkaConfig` in order to get the default ZK config values if not present in `javaConfigs`. Note that this
// means that `KafkaConfig.zkConnect` must always be set by the user (even if `SimpleAclAuthorizer.ZkUrlProp` is also
// set).
val kafkaConfig = KafkaConfig.fromProps(props, doLog = false)
val zkUrl = configs.get(SimpleAclAuthorizer.ZkUrlProp).map(_.toString).getOrElse(kafkaConfig.zkConnect)
val zkConnectionTimeoutMs = configs.get(SimpleAclAuthorizer.ZkConnectionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkConnectionTimeoutMs)
val zkSessionTimeOutMs = configs.get(SimpleAclAuthorizer.ZkSessionTimeOutProp).map(_.toString.toInt).getOrElse(kafkaConfig.zkSessionTimeoutMs)
val zkMaxInFlightRequests = configs.get(SimpleAclAuthorizer.ZkMaxInFlightRequests).map(_.toString.toInt).getOrElse(kafkaConfig.zkMaxInFlightRequests)
val time = Time.SYSTEM
zkClient = KafkaZkClient(zkUrl, kafkaConfig.zkEnableSecureAcls, zkSessionTimeOutMs, zkConnectionTimeoutMs,
zkMaxInFlightRequests, time, "kafka.security", "SimpleAclAuthorizer")
zkClient.createAclPaths()
extendedAclSupport = kafkaConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1
// Start change listeners first and then populate the cache so that there is no timing window
// between loading cache and processing change notifications.
startZkChangeListeners()
loadCache()
}
override def authorize(session: Session, operation: Operation, resource: Resource): Boolean = {
if (resource.patternType != PatternType.LITERAL) {
throw new IllegalArgumentException("Only literal resources are supported. Got: " + resource.patternType)
}
// ensure we compare identical classes
val sessionPrincipal = session.principal
val principal = if (classOf[KafkaPrincipal] != sessionPrincipal.getClass)
new KafkaPrincipal(sessionPrincipal.getPrincipalType, sessionPrincipal.getName)
else
sessionPrincipal
val host = session.clientAddress.getHostAddress
def isEmptyAclAndAuthorized(acls: Set[Acl]): Boolean = {
if (acls.isEmpty) {
// No ACLs found for this resource, permission is determined by value of config allow.everyone.if.no.acl.found
authorizerLogger.debug(s"No acl found for resource $resource, authorized = $shouldAllowEveryoneIfNoAclIsFound")
shouldAllowEveryoneIfNoAclIsFound
} else false
}
def denyAclExists(acls: Set[Acl]): Boolean = {
// Check if there are any Deny ACLs which would forbid this operation.
aclMatch(operation, resource, principal, host, Deny, acls)
}
def allowAclExists(acls: Set[Acl]): Boolean = {
// Check if there are any Allow ACLs which would allow this operation.
// Allowing read, write, delete, or alter implies allowing describe.
// See #{org.apache.kafka.common.acl.AclOperation} for more details about ACL inheritance.
val allowOps = operation match {
case Describe => Set[Operation](Describe, Read, Write, Delete, Alter)
case DescribeConfigs => Set[Operation](DescribeConfigs, AlterConfigs)
case _ => Set[Operation](operation)
}
allowOps.exists(operation => aclMatch(operation, resource, principal, host, Allow, acls))
}
def aclsAllowAccess = {
//we allow an operation if no acls are found and user has configured to allow all users
//when no acls are found or if no deny acls are found and at least one allow acls matches.
val acls = getMatchingAcls(resource.resourceType, resource.name)
isEmptyAclAndAuthorized(acls) || (!denyAclExists(acls) && allowAclExists(acls))
}
// Evaluate if operation is allowed
val authorized = isSuperUser(operation, resource, principal, host) || aclsAllowAccess
logAuditMessage(principal, authorized, operation, resource, host)
authorized
}
def isSuperUser(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String): Boolean = {
if (superUsers.contains(principal)) {
authorizerLogger.debug(s"principal = $principal is a super user, allowing operation without checking acls.")
true
} else false
}
private def aclMatch(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String, permissionType: PermissionType, acls: Set[Acl]): Boolean = {
acls.find { acl =>
acl.permissionType == permissionType &&
(acl.principal == principal || acl.principal == Acl.WildCardPrincipal) &&
(operation == acl.operation || acl.operation == All) &&
(acl.host == host || acl.host == Acl.WildCardHost)
}.exists { acl =>
authorizerLogger.debug(s"operation = $operation on resource = $resource from host = $host is $permissionType based on acl = $acl")
true
}
}
override def addAcls(acls: Set[Acl], resource: Resource) {
if (acls != null && acls.nonEmpty) {
if (!extendedAclSupport && resource.patternType == PatternType.PREFIXED) {
throw new UnsupportedVersionException(s"Adding ACLs on prefixed resource patterns requires " +
s"${KafkaConfig.InterBrokerProtocolVersionProp} of $KAFKA_2_0_IV1 or greater")
}
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls ++ acls
}
}
}
}
override def removeAcls(aclsTobeRemoved: Set[Acl], resource: Resource): Boolean = {
inWriteLock(lock) {
updateResourceAcls(resource) { currentAcls =>
currentAcls -- aclsTobeRemoved
}
}
}
override def removeAcls(resource: Resource): Boolean = {
inWriteLock(lock) {
val result = zkClient.deleteResource(resource)
updateCache(resource, NoAcls)
updateAclChangedFlag(resource)
result
}
}
override def getAcls(resource: Resource): Set[Acl] = {
inReadLock(lock) {
aclCache.get(resource).map(_.acls).getOrElse(Set.empty[Acl])
}
}
override def getAcls(principal: KafkaPrincipal): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues { versionedAcls =>
versionedAcls.acls.filter(_.principal == principal)
}.filter { case (_, acls) =>
acls.nonEmpty
}
}
}
def getMatchingAcls(resourceType: ResourceType, resourceName: String): Set[Acl] = {
inReadLock(lock) {
val wildcard = aclCache.get(Resource(resourceType, Acl.WildCardResource, PatternType.LITERAL))
.map(_.acls)
.getOrElse(Set.empty[Acl])
val literal = aclCache.get(Resource(resourceType, resourceName, PatternType.LITERAL))
.map(_.acls)
.getOrElse(Set.empty[Acl])
val prefixed = aclCache.range(
Resource(resourceType, resourceName, PatternType.PREFIXED),
Resource(resourceType, resourceName.take(1), PatternType.PREFIXED)
)
.filterKeys(resource => resourceName.startsWith(resource.name))
.flatMap { case (resource, versionedAcls) => versionedAcls.acls }
.toSet
prefixed ++ wildcard ++ literal
}
}
override def getAcls(): Map[Resource, Set[Acl]] = {
inReadLock(lock) {
aclCache.mapValues(_.acls)
}
}
def close() {
aclChangeListeners.foreach(listener => listener.close())
if (zkClient != null) zkClient.close()
}
private def loadCache() {
inWriteLock(lock) {
ZkAclStore.stores.foreach(store => {
val resourceTypes = zkClient.getResourceTypes(store.patternType)
for (rType <- resourceTypes) {
val resourceType = ResourceType.fromString(rType)
val resourceNames = zkClient.getResourceNames(store.patternType, resourceType)
for (resourceName <- resourceNames) {
val resource = new Resource(resourceType, resourceName, store.patternType)
val versionedAcls = getAclsFromZk(resource)
updateCache(resource, versionedAcls)
}
}
})
}
}
private[auth] def startZkChangeListeners(): Unit = {
aclChangeListeners = ZkAclChangeStore.stores
.map(store => store.createListener(AclChangedNotificationHandler, zkClient))
}
private def logAuditMessage(principal: KafkaPrincipal, authorized: Boolean, operation: Operation, resource: Resource, host: String) {
def logMessage: String = {
val authResult = if (authorized) "Allowed" else "Denied"
s"Principal = $principal is $authResult Operation = $operation from host = $host on resource = $resource"
}
if (authorized) authorizerLogger.debug(logMessage)
else authorizerLogger.info(logMessage)
}
/**
* Safely updates the resources ACLs by ensuring reads and writes respect the expected zookeeper version.
* Continues to retry until it successfully updates zookeeper.
*
* Returns a boolean indicating if the content of the ACLs was actually changed.
*
* @param resource the resource to change ACLs for
* @param getNewAcls function to transform existing acls to new ACLs
* @return boolean indicating if a change was made
*/
private def updateResourceAcls(resource: Resource)(getNewAcls: Set[Acl] => Set[Acl]): Boolean = {
var currentVersionedAcls =
if (aclCache.contains(resource))
getAclsFromCache(resource)
else
getAclsFromZk(resource)
var newVersionedAcls: VersionedAcls = null
var writeComplete = false
var retries = 0
while (!writeComplete && retries <= maxUpdateRetries) {
val newAcls = getNewAcls(currentVersionedAcls.acls)
val (updateSucceeded, updateVersion) =
if (newAcls.nonEmpty) {
if (currentVersionedAcls.exists)
zkClient.conditionalSetAclsForResource(resource, newAcls, currentVersionedAcls.zkVersion)
else
zkClient.createAclsForResourceIfNotExists(resource, newAcls)
} else {
trace(s"Deleting path for $resource because it had no ACLs remaining")
(zkClient.conditionalDelete(resource, currentVersionedAcls.zkVersion), 0)
}
if (!updateSucceeded) {
trace(s"Failed to update ACLs for $resource. Used version ${currentVersionedAcls.zkVersion}. Reading data and retrying update.")
Thread.sleep(backoffTime)
currentVersionedAcls = getAclsFromZk(resource)
retries += 1
} else {
newVersionedAcls = VersionedAcls(newAcls, updateVersion)
writeComplete = updateSucceeded
}
}
if(!writeComplete)
throw new IllegalStateException(s"Failed to update ACLs for $resource after trying a maximum of $maxUpdateRetries times")
if (newVersionedAcls.acls != currentVersionedAcls.acls) {
debug(s"Updated ACLs for $resource to ${newVersionedAcls.acls} with version ${newVersionedAcls.zkVersion}")
updateCache(resource, newVersionedAcls)
updateAclChangedFlag(resource)
true
} else {
debug(s"Updated ACLs for $resource, no change was made")
updateCache(resource, newVersionedAcls) // Even if no change, update the version
false
}
}
private def getAclsFromCache(resource: Resource): VersionedAcls = {
aclCache.getOrElse(resource, throw new IllegalArgumentException(s"ACLs do not exist in the cache for resource $resource"))
}
private def getAclsFromZk(resource: Resource): VersionedAcls = {
zkClient.getVersionedAclsForResource(resource)
}
private def updateCache(resource: Resource, versionedAcls: VersionedAcls) {
if (versionedAcls.acls.nonEmpty) {
aclCache = aclCache + (resource -> versionedAcls)
} else {
aclCache = aclCache - resource
}
}
private def updateAclChangedFlag(resource: Resource) {
zkClient.createAclChangeNotification(resource)
}
private def backoffTime = {
retryBackoffMs + Random.nextInt(retryBackoffJitterMs)
}
object AclChangedNotificationHandler extends AclChangeNotificationHandler {
override def processNotification(resource: Resource) {
inWriteLock(lock) {
val versionedAcls = getAclsFromZk(resource)
updateCache(resource, versionedAcls)
}
}
}
// Orders by resource type, then resource pattern type and finally reverse ordering by name.
private object ResourceOrdering extends Ordering[Resource] {
def compare(a: Resource, b: Resource): Int = {
val rt = a.resourceType compare b.resourceType
if (rt != 0)
rt
else {
val rnt = a.patternType compareTo b.patternType
if (rnt != 0)
rnt
else
(a.name compare b.name) * -1
}
}
}
}
| ollie314/kafka | core/src/main/scala/kafka/security/auth/SimpleAclAuthorizer.scala | Scala | apache-2.0 | 16,760 |
/*
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.resolver.db
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import cats.data.NonEmptyList
import scala.collection.immutable.VectorBuilder
object GroupedByPredicate {
def apply[T, U](pred: T => U): GroupedByPredicate[T, U] = new GroupedByPredicate(pred)
}
class GroupedByPredicate[T, U](pred: T => U) extends GraphStage[FlowShape[T, NonEmptyList[T]]] {
val in = Inlet[T]("GroupByPred.in")
val out = Outlet[NonEmptyList[T]]("GroupByPred.out")
override def shape: FlowShape[T, NonEmptyList[T]] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = {
new GraphStageLogic(shape) {
private val buf: VectorBuilder[T] = new VectorBuilder
private var lastPred = Option.empty[U]
setHandler(in, new InHandler {
override def onPush(): Unit = {
read(in)(e => {
val p = pred(e)
if(lastPred.isEmpty || lastPred.contains(p)) { // same pred
buf += e
lastPred = Some(p)
pull(in)
} else { // predicate changed
emitBuffer(() => {
buf += e
lastPred = Some(p)
pull(in)
})
}
}
,
() => emitBuffer(completeStage)
)
}
override def onUpstreamFinish(): Unit = {
emitBuffer(completeStage)
}
})
private def emitBuffer(andThen: () => Unit): Unit = {
val b = buf.result()
if(b.nonEmpty) {
val e = NonEmptyList(b.head, b.tail.toList)
buf.clear()
emit(out, e, andThen)
} else {
andThen.apply()
}
}
setHandler(out, new OutHandler {
override def onPull(): Unit =
if(!isClosed(in) && !hasBeenPulled(in)) pull(in)
})
}
}
}
| PDXostc/rvi_sota_server | external-resolver/src/main/scala/org/genivi/sota/resolver/db/GroupedByPredicate.scala | Scala | mpl-2.0 | 2,075 |
package sw.rdds
import org.apache.spark._
object SomeTransformations extends App {
val sparkConf = new SparkConf()
.setAppName(this.getClass.getName)
.setMaster("local[*]")
val sc = new SparkContext(sparkConf)
val evenNumbers = sc.parallelize(1 to 1000).filter(_ % 2 == 0)
println("sum: " + evenNumbers.reduce(_ + _))
println("first even: " + evenNumbers.first)
println("Printing all even, that can be divided by 150")
println(evenNumbers.filter(_ % 150 == 0).foreach(n => println(s"Hey I'm $n and I can be divided by 150!'")))
sc.stop()
}
object SomeActions extends App {
val sparkConf = new SparkConf()
.setAppName(this.getClass.getName)
.setMaster("local[*]")
val sc = new SparkContext(sparkConf)
val evenNumbers = sc.parallelize(1 to 1000).filter(_ % 2 == 0)
println("sum: " + evenNumbers.reduce(_ + _))
println("first even: " + evenNumbers.first)
println("Printing all even, that can be divided by 150")
println(evenNumbers.filter(_ % 150 == 0).foreach(n => println(s"Hey I'm $n and I can be divided by 150!'")))
sc.stop()
}
| rabbitonweb/spark-workshop | src/main/scala/sw/rdds/SomeOperations.scala | Scala | apache-2.0 | 1,092 |
package task
import org.apache.samza.system.IncomingMessageEnvelope
import org.apache.samza.task.{TaskCoordinator, MessageCollector, StreamTask}
/**
* Created by mike on 17/06/15.
*/
class EventCounterStreamTask extends StreamTask {
override def process(envelope: IncomingMessageEnvelope, collector: MessageCollector, coordinator: TaskCoordinator): Unit = {
val message = envelope.getMessage
// need to deserialse the message and then output something
System.out.println(message);
}
}
| mikehancock/kafka-sample | app/task/EventCounterStreamTask.scala | Scala | mit | 508 |
package com.roundeights.s3cala
/** Thrown when an S3 request fails */
class S3Failed (
message: String, cause: Throwable
) extends Exception( message, cause ) {
/** Alternate constructor */
def this ( cause: Throwable ) = this( null, cause )
/** Alternate constructor */
def this ( message: String ) = this( message, null )
}
/** Thrown when an S3 resource is not found */
class S3NotFound ( val bucket: String, val key: String ) extends S3Failed (
"S3 resource not found: %s/%s".format(bucket, key)
)
| Nycto/S3cala | src/main/scala/s3cala/Err.scala | Scala | mit | 532 |
// java: -Dneeds.forked.jvm.maybe.because.context.classloader
/**
* Checks that serialization of hash-based collections works correctly if the hashCode
* changes on deserialization.
*/
object Test {
import collection._
def main(args: Array[String]): Unit = {
for (i <- Seq(0, 1, 2, 10, 100)) {
def entries = (0 until i).map(i => (new Foo, i)).toList
def elements = entries.map(_._1)
val maps = Seq[Map[Foo, Int]](new mutable.HashMap, new mutable.LinkedHashMap,
immutable.HashMap.empty).map(_ ++ entries)
test[Map[Foo, Int]](maps, entries.size, assertMap _)
val sets = Seq[Set[Foo]](new mutable.HashSet, new mutable.LinkedHashSet,
immutable.HashSet.empty).map(_ ++ elements)
test[Set[Foo]](sets, entries.size, assertSet _)
}
}
private def test[A <: AnyRef](collections: Seq[A], expectedSize: Int, assertFunction: (A, Int) => Unit): Unit = {
for (collection <- collections) {
assertFunction(collection, expectedSize)
val bytes = toBytes(collection)
Foo.hashCodeModifier = 1
val deserializedCollection = toObject[A](bytes)
assertFunction(deserializedCollection, expectedSize)
assert(deserializedCollection.getClass == collection.getClass,
"collection class should remain the same after deserialization ("+deserializedCollection.getClass+" != "+collection.getClass+")")
Foo.hashCodeModifier = 0
}
}
private def toObject[A](bytes: Array[Byte]): A = {
val in = new java.io.ObjectInputStream(new java.io.ByteArrayInputStream(bytes))
in.readObject.asInstanceOf[A]
}
private def toBytes(o: AnyRef): Array[Byte] = {
val bos = new java.io.ByteArrayOutputStream
val out = new java.io.ObjectOutputStream(bos)
out.writeObject(o)
out.close
bos.toByteArray
}
private def assertMap[A, B](map: Map[A, B], expectedSize: Int): Unit = {
assert(expectedSize == map.size, "expected map size: " + expectedSize + ", actual size: " + map.size)
map.foreach { case (k, v) =>
assert(map.contains(k), "contains should return true for key in the map, key: " + k)
assert(map(k) == v)
}
}
private def assertSet[A](set: Set[A], expectedSize: Int): Unit = {
assert(expectedSize == set.size, "expected set size: " + expectedSize + ", actual size: " + set.size)
set.foreach { e => assert(set.contains(e), "contains should return true for element in the set, element: " + e) }
}
object Foo {
/* Used to simulate a hashCode change caused by deserializing an instance with an
* identity-based hashCode in another JVM.
*/
var hashCodeModifier = 0
}
class Foo extends Serializable {
override def hashCode = System.identityHashCode(this) + Foo.hashCodeModifier
}
}
| scala/scala | test/files/jvm/t1600.scala | Scala | apache-2.0 | 2,778 |
package com.cloudera.hue.livy.server.sessions
import java.net.URL
import java.util.concurrent.TimeUnit
import com.cloudera.hue.livy._
import com.cloudera.hue.livy.msgs.ExecuteRequest
import com.cloudera.hue.livy.server.Statement
import com.cloudera.hue.livy.server.sessions.Session._
import dispatch._
import org.json4s.jackson.Serialization.write
import org.json4s.{DefaultFormats, Formats}
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration.Duration
import scala.concurrent.{Future, _}
class WebSession(val id: String,
val kind: Kind,
val proxyUser: Option[String]) extends Session with Logging {
protected implicit def executor: ExecutionContextExecutor = ExecutionContext.global
protected implicit def jsonFormats: Formats = DefaultFormats
protected[this] var _state: State = Starting()
private[this] var _lastActivity = Long.MaxValue
private[this] var _url: Option[URL] = None
private[this] var executedStatements = 0
private[this] var statements_ = new ArrayBuffer[Statement]
override def url: Option[URL] = _url
override def url_=(url: URL) = {
ensureState(Session.Starting(), {
_state = Idle()
_url = Some(url)
})
}
private def svc = {
val url = _url.head
dispatch.url(url.toString)
}
override def lastActivity: Long = _lastActivity
override def state: State = _state
override def executeStatement(content: ExecuteRequest): Statement = {
ensureIdle {
_state = Busy()
touchLastActivity()
var req = (svc / "execute").setContentType("application/json", "UTF-8")
req = req << write(content)
val future = Http(req OK as.json4s.Json).map { case (resp) =>
synchronized {
transition(Idle())
resp
}
}
var statement = new Statement(executedStatements, content, future)
executedStatements += 1
statements_ += statement
statement
}
}
override def statement(statementId: Int): Option[Statement] = statements_.lift(statementId)
override def statements(): Seq[Statement] = statements_.toSeq
override def statements(fromIndex: Integer, toIndex: Integer): Seq[Statement] = {
statements_.slice(fromIndex, toIndex).toSeq
}
override def interrupt(): Future[Unit] = {
stop()
}
override def stop(): Future[Unit] = {
synchronized {
_state match {
case Idle() =>
_state = Busy()
Http(svc.DELETE OK as.String).map { case rep =>
synchronized {
_state = Dead()
}
Unit
}
case NotStarted() =>
Future {
waitForStateChange(NotStarted(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Starting() =>
Future {
waitForStateChange(Starting(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Busy() =>
Future {
waitForStateChange(Busy(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Error() | Dead() =>
Future.successful(Unit)
}
}
}
private def transition(state: State) = synchronized {
_state = state
}
private def touchLastActivity() = {
_lastActivity = System.currentTimeMillis()
}
private def ensureState[A](state: State, f: => A) = {
synchronized {
if (_state == state) {
f
} else {
throw new IllegalStateException("Session is in state %s" format _state)
}
}
}
private def ensureIdle[A](f: => A) = {
ensureState(Idle(), f)
}
private def ensureRunning[A](f: => A) = {
synchronized {
_state match {
case Idle() | Busy() =>
f
case _ =>
throw new IllegalStateException("Session is in state %s" format _state)
}
}
}
}
| nvoron23/hue | apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/sessions/WebSession.scala | Scala | apache-2.0 | 3,878 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.scheduler
import akka.Done
import akka.actor.{ActorRef, ActorRefFactory, ActorSelection, ActorSystem, CoordinatedShutdown, Props}
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.util.Timeout
import akka.pattern.ask
import com.typesafe.config.ConfigValueFactory
import kamon.Kamon
import org.apache.openwhisk.common.Https.HttpsConfig
import org.apache.openwhisk.common._
import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig}
import org.apache.openwhisk.core.WhiskConfig.{servicePort, _}
import org.apache.openwhisk.core.ack.{MessagingActiveAck, UserEventSender}
import org.apache.openwhisk.core.connector._
import org.apache.openwhisk.core.database.{ActivationStoreProvider, NoDocumentException, UserContext}
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.etcd.EtcdKV.{QueueKeys, SchedulerKeys}
import org.apache.openwhisk.core.etcd.EtcdType.ByteStringToString
import org.apache.openwhisk.core.etcd.{EtcdClient, EtcdConfig}
import org.apache.openwhisk.core.scheduler.container.{ContainerManager, CreationJobManager}
import org.apache.openwhisk.core.scheduler.grpc.ActivationServiceImpl
import org.apache.openwhisk.core.scheduler.queue.{
DurationCheckerProvider,
MemoryQueue,
QueueManager,
QueueSize,
SchedulingDecisionMaker
}
import org.apache.openwhisk.core.service.{DataManagementService, EtcdWorker, LeaseKeepAliveService, WatcherService}
import org.apache.openwhisk.grpc.ActivationServiceHandler
import org.apache.openwhisk.http.BasicHttpService
import org.apache.openwhisk.spi.SpiLoader
import org.apache.openwhisk.utils.ExecutionContextFactory
import pureconfig.loadConfigOrThrow
import spray.json.{DefaultJsonProtocol, _}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
import pureconfig.generic.auto._
import scala.collection.JavaConverters
class Scheduler(schedulerId: SchedulerInstanceId, schedulerEndpoints: SchedulerEndpoints)(implicit config: WhiskConfig,
actorSystem: ActorSystem,
logging: Logging)
extends SchedulerCore {
implicit val ec = actorSystem.dispatcher
private val authStore = WhiskAuthStore.datastore()
val msgProvider = SpiLoader.get[MessagingProvider]
val producer = msgProvider.getProducer(config, Some(ActivationEntityLimit.MAX_ACTIVATION_LIMIT))
val maxPeek = loadConfigOrThrow[Int](ConfigKeys.schedulerMaxPeek)
val etcdClient = EtcdClient(loadConfigOrThrow[EtcdConfig](ConfigKeys.etcd).hosts)
val watcherService: ActorRef = actorSystem.actorOf(WatcherService.props(etcdClient))
val leaseService =
actorSystem.actorOf(LeaseKeepAliveService.props(etcdClient, schedulerId, watcherService))
implicit val entityStore = WhiskEntityStore.datastore()
private val activationStore =
SpiLoader.get[ActivationStoreProvider].instance(actorSystem, logging)
private val ack = {
val sender = if (UserEvents.enabled) Some(new UserEventSender(producer)) else None
new MessagingActiveAck(producer, schedulerId, sender)
}
/** Stores an activation in the database. */
private val store = (tid: TransactionId, activation: WhiskActivation, context: UserContext) => {
implicit val transid: TransactionId = tid
activationStore.store(activation, context)(tid, notifier = None).andThen {
case Success(doc) => logging.info(this, s"save ${doc} successfully")
case Failure(t) => logging.error(this, s"failed to save activation $activation, error: ${t.getMessage}")
}
}
val durationCheckerProvider = SpiLoader.get[DurationCheckerProvider]
val durationChecker = durationCheckerProvider.instance(actorSystem, logging)
override def getState: Future[(List[(SchedulerInstanceId, Int)], Int)] = {
logging.info(this, s"getting the queue states")
etcdClient
.getPrefix(s"${QueueKeys.inProgressPrefix}/${QueueKeys.queuePrefix}")
.map(res => {
JavaConverters
.asScalaIteratorConverter(res.getKvsList.iterator())
.asScala
.map(kv => ByteStringToString(kv.getValue))
.count(_ == schedulerId.asString)
})
.flatMap { creationCount =>
etcdClient
.get(SchedulerKeys.scheduler(schedulerId))
.map(res => {
JavaConverters
.asScalaIteratorConverter(res.getKvsList.iterator())
.asScala
.map { kv =>
SchedulerStates.parse(kv.getValue).getOrElse(SchedulerStates(schedulerId, -1, schedulerEndpoints))
}
.map { schedulerState =>
(schedulerState.sid, schedulerState.queueSize)
}
.toList
})
.map { list =>
(list, creationCount)
}
}
}
override def getQueueSize: Future[Int] = {
queueManager.ask(QueueSize)(Timeout(5.seconds)).mapTo[Int]
}
override def getQueueStatusData: Future[List[StatusData]] = {
queueManager.ask(StatusQuery)(Timeout(5.seconds)).mapTo[Future[List[StatusData]]].flatten
}
override def disable(): Unit = {
logging.info(this, s"Gracefully shutting down the scheduler")
containerManager ! GracefulShutdown
queueManager ! GracefulShutdown
}
private def getUserLimit(invocationNamespace: String): Future[Int] = {
Identity
.get(authStore, EntityName(invocationNamespace))(trasnid)
.map { identity =>
val limit = identity.limits.concurrentInvocations.getOrElse(config.actionInvokeConcurrentLimit.toInt)
logging.debug(this, s"limit for ${invocationNamespace}: ${limit}")(trasnid)
limit
}
.andThen {
case Failure(_: NoDocumentException) =>
logging.warn(this, s"namespace does not exist: $invocationNamespace")(trasnid)
case Failure(_: IllegalStateException) =>
logging.warn(this, s"namespace is not unique: $invocationNamespace")(trasnid)
}
}
private val etcdWorkerFactory = (f: ActorRefFactory) => f.actorOf(EtcdWorker.props(etcdClient, leaseService))
/**
* This component is in charge of storing data to ETCD.
* Even if any error happens we can assume the data will be eventually available in the ETCD by this component.
*/
val dataManagementService: ActorRef =
actorSystem.actorOf(DataManagementService.props(watcherService, etcdWorkerFactory))
val feedFactory = (f: ActorRefFactory,
description: String,
topic: String,
maxActiveAcksPerPoll: Int,
processAck: Array[Byte] => Future[Unit]) => {
val consumer = msgProvider.getConsumer(config, topic, topic, maxActiveAcksPerPoll)
f.actorOf(Props(new MessageFeed(description, logging, consumer, maxActiveAcksPerPoll, 1.second, processAck)))
}
val creationJobManagerFactory: ActorRefFactory => ActorRef =
factory => {
factory.actorOf(CreationJobManager.props(feedFactory, schedulerId, dataManagementService))
}
/**
* This component is responsible for creating containers for a given action.
* It relies on the creationJobManager to manage the container creation job.
*/
val containerManager: ActorRef =
actorSystem.actorOf(
ContainerManager.props(creationJobManagerFactory, msgProvider, schedulerId, etcdClient, config, watcherService))
/**
* This is a factory to create memory queues.
* In the new architecture, each action is given its own dedicated queue.
*/
val memoryQueueFactory
: (ActorRefFactory, String, FullyQualifiedEntityName, DocRevision, WhiskActionMetaData) => ActorRef =
(factory, invocationNamespace, fqn, revision, actionMetaData) => {
// Todo: Change this to SPI
val decisionMaker = factory.actorOf(SchedulingDecisionMaker.props(invocationNamespace, fqn))
factory.actorOf(
MemoryQueue.props(
etcdClient,
durationChecker,
fqn,
producer,
config,
invocationNamespace,
revision,
schedulerEndpoints,
actionMetaData,
dataManagementService,
watcherService,
containerManager,
decisionMaker,
schedulerId: SchedulerInstanceId,
ack,
store: (TransactionId, WhiskActivation, UserContext) => Future[Any],
getUserLimit: String => Future[Int]))
}
val topic = s"${Scheduler.topicPrefix}scheduler${schedulerId.asString}"
val schedulerConsumer =
msgProvider.getConsumer(config, topic, topic, maxPeek, maxPollInterval = TimeLimit.MAX_DURATION + 1.minute)
implicit val trasnid = TransactionId.containerCreation
/**
* This is one of the major components which take charge of managing queues and coordinating requests among the scheduler, controllers, and invokers.
*/
val queueManager = actorSystem.actorOf(
QueueManager.props(
entityStore,
WhiskActionMetaData.get,
etcdClient,
schedulerEndpoints,
schedulerId,
dataManagementService,
watcherService,
ack,
store: (TransactionId, WhiskActivation, UserContext) => Future[Any],
memoryQueueFactory,
schedulerConsumer),
QueueManager.actorName)
val serviceHandlers: HttpRequest => Future[HttpResponse] = ActivationServiceHandler.apply(ActivationServiceImpl())
}
case class CmdLineArgs(uniqueName: Option[String] = None, id: Option[Int] = None, displayedName: Option[String] = None)
trait SchedulerCore {
def getState: Future[(List[(SchedulerInstanceId, Int)], Int)]
def getQueueSize: Future[Int]
def getQueueStatusData: Future[List[StatusData]]
def disable(): Unit
}
object Scheduler {
protected val protocol = loadConfigOrThrow[String]("whisk.scheduler.protocol")
val topicPrefix = loadConfigOrThrow[String](ConfigKeys.kafkaTopicsPrefix)
/**
* The scheduler has two ports, one for akka-remote and the other for akka-grpc.
*/
def requiredProperties =
Map(
servicePort -> 8080.toString,
schedulerHost -> null,
schedulerAkkaPort -> null,
schedulerRpcPort -> null,
WhiskConfig.actionInvokePerMinuteLimit -> null,
WhiskConfig.actionInvokeConcurrentLimit -> null,
WhiskConfig.triggerFirePerMinuteLimit -> null) ++
kafkaHosts ++
zookeeperHosts ++
wskApiHost ++
ExecManifest.requiredProperties
def initKamon(instance: SchedulerInstanceId): Unit = {
// Replace the hostname of the scheduler to the assigned id of the scheduler.
val newKamonConfig = Kamon.config
.withValue("kamon.environment.host", ConfigValueFactory.fromAnyRef(s"scheduler${instance.asString}"))
Kamon.init(newKamonConfig)
}
def main(args: Array[String]): Unit = {
implicit val ec = ExecutionContextFactory.makeCachedThreadPoolExecutionContext()
implicit val actorSystem: ActorSystem =
ActorSystem(name = "scheduler-actor-system", defaultExecutionContext = Some(ec))
implicit val logger = new AkkaLogging(akka.event.Logging.getLogger(actorSystem, this))
// Prepare Kamon shutdown
CoordinatedShutdown(actorSystem).addTask(CoordinatedShutdown.PhaseActorSystemTerminate, "shutdownKamon") { () =>
logger.info(this, s"Shutting down Kamon with coordinated shutdown")
Kamon.stopModules().map(_ => Done)
}
def abort(message: String) = {
logger.error(this, message)
actorSystem.terminate()
Await.result(actorSystem.whenTerminated, 30.seconds)
sys.exit(1)
}
// extract configuration data from the environment
implicit val config = new WhiskConfig(requiredProperties)
if (!config.isValid) {
abort("Bad configuration, cannot start.")
}
val port = config.servicePort.toInt
val host = config.schedulerHost
val rpcPort = config.schedulerRpcPort.toInt
val akkaPort = config.schedulerAkkaPort.toInt
// if deploying multiple instances (scale out), must pass the instance number as they need to be uniquely identified.
require(args.length >= 1, "scheduler instance required")
val instanceId = SchedulerInstanceId(args(0))
initKamon(instanceId)
val msgProvider = SpiLoader.get[MessagingProvider]
Seq(
(topicPrefix + "scheduler" + instanceId.asString, "actions", Some(ActivationEntityLimit.MAX_ACTIVATION_LIMIT)),
(
topicPrefix + "creationAck" + instanceId.asString,
"creationAck",
Some(ActivationEntityLimit.MAX_ACTIVATION_LIMIT)))
.foreach {
case (topic, topicConfigurationKey, maxMessageBytes) =>
if (msgProvider.ensureTopic(config, topic, topicConfigurationKey, maxMessageBytes).isFailure) {
abort(s"failure during msgProvider.ensureTopic for topic $topic")
}
}
ExecManifest.initialize(config) match {
case Success(_) =>
val schedulerEndpoints = SchedulerEndpoints(host, rpcPort, akkaPort)
// Create scheduler
val scheduler = new Scheduler(instanceId, schedulerEndpoints)
// TODO: Add Akka-grpc handler
val httpsConfig =
if (Scheduler.protocol == "https") Some(loadConfigOrThrow[HttpsConfig]("whisk.controller.https")) else None
BasicHttpService.startHttpService(FPCSchedulerServer.instance(scheduler).route, port, httpsConfig)(actorSystem)
case Failure(t) =>
abort(s"Invalid runtimes manifest: $t")
}
}
}
case class SchedulerEndpoints(host: String, rpcPort: Int, akkaPort: Int) {
require(rpcPort != 0 || akkaPort != 0)
def asRpcEndpoint: String = s"$host:$rpcPort"
def asAkkaEndpoint: String = s"$host:$akkaPort"
def getRemoteRef(name: String)(implicit context: ActorRefFactory): ActorSelection = {
implicit val ec = context.dispatcher
val path = s"akka://scheduler-actor-system@${asAkkaEndpoint}/user/${name}"
context.actorSelection(path)
}
def serialize = SchedulerEndpoints.serdes.write(this).compactPrint
}
object SchedulerEndpoints extends DefaultJsonProtocol {
implicit val serdes = jsonFormat(SchedulerEndpoints.apply, "host", "rpcPort", "akkaPort")
def parse(endpoints: String) = Try(serdes.read(endpoints.parseJson))
}
case class SchedulerStates(sid: SchedulerInstanceId, queueSize: Int, endpoints: SchedulerEndpoints) {
private implicit val askTimeout = Timeout(5 seconds)
def getRemoteRef(name: String)(implicit context: ActorRefFactory): ActorSelection = {
implicit val ec = context.dispatcher
val path = s"akka//scheduler-actor-system@${endpoints.asAkkaEndpoint}/user/${name}"
context.actorSelection(path)
}
def getSchedulerId(): SchedulerInstanceId = sid
def serialize = SchedulerStates.serdes.write(this).compactPrint
}
object SchedulerStates extends DefaultJsonProtocol {
private implicit val endpointsSerde = SchedulerEndpoints.serdes
implicit val serdes = jsonFormat(SchedulerStates.apply, "sid", "queueSize", "endpoints")
def parse(states: String) = Try(serdes.read(states.parseJson))
}
| style95/openwhisk | core/scheduler/src/main/scala/org/apache/openwhisk/core/scheduler/Scheduler.scala | Scala | apache-2.0 | 15,934 |
package rovak.steamkit.steam.gc
import java.io.IOException
import lombok.Getter
import rovak.steamkit.steam.AMsgBase
import rovak.steamkit.steam.language.internal.IGCSerializableHeader
import rovak.steamkit.types.JobID
/**
* This is the abstract base class for all available game coordinator messages.
* It's used to maintain packet payloads and provide a header for all gc messages.
* @param <T> The header type for this gc message.
*/
abstract class GCMsgBase[T](clazz: Class[T], payloadReserve: Int) extends AMsgBase(payloadReserve) with IClientGCMsg {
var header = clazz.newInstance
def isProto = false
/**
* serializes this client message instance to a byte array.
* @return Data representing a client message.
*/
def serialize(): Array[Byte]
/**
* Initializes this client message by deserializing the specified data.
* @param data The data representing a client message.
*/
def deserialize(data: Array[Byte]): Unit
}
| Rovak/scala-steamkit | steam/src/main/scala/rovak/steamkit/steam/gc/GCMsgBase.scala | Scala | mit | 964 |
package info.rnowak.simplega.operators.selection
import info.rnowak.simplega.fitness.IndividualWithFitness
import info.rnowak.simplega.population.Population
import scala.collection.parallel.ParSeq
trait SelectionOperator[PopulationType <: Population] {
def select(individualsWithFitness: ParSeq[IndividualWithFitness[PopulationType#IndividualType]]): PopulationType#IndividualType
}
| rafalnowak/simplega | src/main/scala/info/rnowak/simplega/operators/selection/SelectionOperator.scala | Scala | mit | 388 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.errors
import org.antlr.v4.runtime.ParserRuleContext
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.trees.Origin
/**
* Object for grouping all error messages of the query parsing.
* Currently it includes all ParseException.
*/
object QueryParsingErrors {
def invalidInsertIntoError(ctx: InsertIntoContext): Throwable = {
new ParseException("Invalid InsertIntoContext", ctx)
}
def insertOverwriteDirectoryUnsupportedError(ctx: InsertIntoContext): Throwable = {
new ParseException("INSERT OVERWRITE DIRECTORY is not supported", ctx)
}
def columnAliasInOperationNotAllowedError(op: String, ctx: TableAliasContext): Throwable = {
new ParseException(s"Columns aliases are not allowed in $op.", ctx.identifierList())
}
def emptySourceForMergeError(ctx: MergeIntoTableContext): Throwable = {
new ParseException("Empty source for merge: you should specify a source" +
" table/subquery in merge.", ctx.source)
}
def unrecognizedMatchedActionError(ctx: MatchedClauseContext): Throwable = {
new ParseException(s"Unrecognized matched action: ${ctx.matchedAction().getText}",
ctx.matchedAction())
}
def insertedValueNumberNotMatchFieldNumberError(ctx: NotMatchedClauseContext): Throwable = {
new ParseException("The number of inserted values cannot match the fields.",
ctx.notMatchedAction())
}
def unrecognizedNotMatchedActionError(ctx: NotMatchedClauseContext): Throwable = {
new ParseException(s"Unrecognized not matched action: ${ctx.notMatchedAction().getText}",
ctx.notMatchedAction())
}
def mergeStatementWithoutWhenClauseError(ctx: MergeIntoTableContext): Throwable = {
new ParseException("There must be at least one WHEN clause in a MERGE statement", ctx)
}
def nonLastMatchedClauseOmitConditionError(ctx: MergeIntoTableContext): Throwable = {
new ParseException("When there are more than one MATCHED clauses in a MERGE " +
"statement, only the last MATCHED clause can omit the condition.", ctx)
}
def nonLastNotMatchedClauseOmitConditionError(ctx: MergeIntoTableContext): Throwable = {
new ParseException("When there are more than one NOT MATCHED clauses in a MERGE " +
"statement, only the last NOT MATCHED clause can omit the condition.", ctx)
}
def emptyPartitionKeyError(key: String, ctx: PartitionSpecContext): Throwable = {
new ParseException(s"Found an empty partition key '$key'.", ctx)
}
def combinationQueryResultClausesUnsupportedError(ctx: QueryOrganizationContext): Throwable = {
new ParseException(
"Combination of ORDER BY/SORT BY/DISTRIBUTE BY/CLUSTER BY is not supported", ctx)
}
def distributeByUnsupportedError(ctx: QueryOrganizationContext): Throwable = {
new ParseException("DISTRIBUTE BY is not supported", ctx)
}
def transformNotSupportQuantifierError(ctx: ParserRuleContext): Throwable = {
new ParseException("TRANSFORM does not support DISTINCT/ALL in inputs", ctx)
}
def transformWithSerdeUnsupportedError(ctx: ParserRuleContext): Throwable = {
new ParseException("TRANSFORM with serde is only supported in hive mode", ctx)
}
def lateralWithPivotInFromClauseNotAllowedError(ctx: FromClauseContext): Throwable = {
new ParseException("LATERAL cannot be used together with PIVOT in FROM clause", ctx)
}
def lateralJoinWithNaturalJoinUnsupportedError(ctx: ParserRuleContext): Throwable = {
new ParseException("LATERAL join with NATURAL join is not supported", ctx)
}
def lateralJoinWithUsingJoinUnsupportedError(ctx: ParserRuleContext): Throwable = {
new ParseException("LATERAL join with USING join is not supported", ctx)
}
def unsupportedLateralJoinTypeError(ctx: ParserRuleContext, joinType: String): Throwable = {
new ParseException(s"Unsupported LATERAL join type $joinType", ctx)
}
def repetitiveWindowDefinitionError(name: String, ctx: WindowClauseContext): Throwable = {
new ParseException(s"The definition of window '$name' is repetitive", ctx)
}
def invalidWindowReferenceError(name: String, ctx: WindowClauseContext): Throwable = {
new ParseException(s"Window reference '$name' is not a window specification", ctx)
}
def cannotResolveWindowReferenceError(name: String, ctx: WindowClauseContext): Throwable = {
new ParseException(s"Cannot resolve window reference '$name'", ctx)
}
def joinCriteriaUnimplementedError(join: JoinCriteriaContext, ctx: RelationContext): Throwable = {
new ParseException(s"Unimplemented joinCriteria: $join", ctx)
}
def naturalCrossJoinUnsupportedError(ctx: RelationContext): Throwable = {
new ParseException("NATURAL CROSS JOIN is not supported", ctx)
}
def emptyInputForTableSampleError(ctx: ParserRuleContext): Throwable = {
new ParseException("TABLESAMPLE does not accept empty inputs.", ctx)
}
def tableSampleByBytesUnsupportedError(msg: String, ctx: SampleMethodContext): Throwable = {
new ParseException(s"TABLESAMPLE($msg) is not supported", ctx)
}
def invalidByteLengthLiteralError(bytesStr: String, ctx: SampleByBytesContext): Throwable = {
new ParseException(s"$bytesStr is not a valid byte length literal, " +
"expected syntax: DIGIT+ ('B' | 'K' | 'M' | 'G')", ctx)
}
def invalidEscapeStringError(ctx: PredicateContext): Throwable = {
new ParseException("Invalid escape string. Escape string must contain only one character.", ctx)
}
def trimOptionUnsupportedError(trimOption: Int, ctx: TrimContext): Throwable = {
new ParseException("Function trim doesn't support with " +
s"type $trimOption. Please use BOTH, LEADING or TRAILING as trim type", ctx)
}
def functionNameUnsupportedError(functionName: String, ctx: ParserRuleContext): Throwable = {
new ParseException(s"Unsupported function name '$functionName'", ctx)
}
def cannotParseValueTypeError(
valueType: String, value: String, ctx: TypeConstructorContext): Throwable = {
new ParseException(s"Cannot parse the $valueType value: $value", ctx)
}
def cannotParseIntervalValueError(value: String, ctx: TypeConstructorContext): Throwable = {
new ParseException(s"Cannot parse the INTERVAL value: $value", ctx)
}
def literalValueTypeUnsupportedError(
valueType: String, ctx: TypeConstructorContext): Throwable = {
new ParseException(s"Literals of type '$valueType' are currently not supported.", ctx)
}
def parsingValueTypeError(
e: IllegalArgumentException, valueType: String, ctx: TypeConstructorContext): Throwable = {
val message = Option(e.getMessage).getOrElse(s"Exception parsing $valueType")
new ParseException(message, ctx)
}
def invalidNumericLiteralRangeError(rawStrippedQualifier: String, minValue: BigDecimal,
maxValue: BigDecimal, typeName: String, ctx: NumberContext): Throwable = {
new ParseException(s"Numeric literal $rawStrippedQualifier does not " +
s"fit in range [$minValue, $maxValue] for type $typeName", ctx)
}
def moreThanOneFromToUnitInIntervalLiteralError(ctx: ParserRuleContext): Throwable = {
new ParseException("Can only have a single from-to unit in the interval literal syntax", ctx)
}
def invalidIntervalLiteralError(ctx: IntervalContext): Throwable = {
new ParseException("at least one time unit should be given for interval literal", ctx)
}
def invalidIntervalFormError(value: String, ctx: MultiUnitsIntervalContext): Throwable = {
new ParseException("Can only use numbers in the interval value part for" +
s" multiple unit value pairs interval form, but got invalid value: $value", ctx)
}
def invalidFromToUnitValueError(ctx: IntervalValueContext): Throwable = {
new ParseException("The value of from-to unit must be a string", ctx)
}
def fromToIntervalUnsupportedError(
from: String, to: String, ctx: UnitToUnitIntervalContext): Throwable = {
new ParseException(s"Intervals FROM $from TO $to are not supported.", ctx)
}
def dataTypeUnsupportedError(dataType: String, ctx: PrimitiveDataTypeContext): Throwable = {
new ParseException(s"DataType $dataType is not supported.", ctx)
}
def partitionTransformNotExpectedError(
name: String, describe: String, ctx: ApplyTransformContext): Throwable = {
new ParseException(s"Expected a column reference for transform $name: $describe", ctx)
}
def tooManyArgumentsForTransformError(name: String, ctx: ApplyTransformContext): Throwable = {
new ParseException(s"Too many arguments for transform $name", ctx)
}
def notEnoughArgumentsForTransformError(name: String, ctx: ApplyTransformContext): Throwable = {
new ParseException(s"Not enough arguments for transform $name", ctx)
}
def invalidBucketsNumberError(describe: String, ctx: ApplyTransformContext): Throwable = {
new ParseException(s"Invalid number of buckets: $describe", ctx)
}
def invalidTransformArgumentError(ctx: TransformArgumentContext): Throwable = {
new ParseException("Invalid transform argument", ctx)
}
def cannotCleanReservedNamespacePropertyError(
property: String, ctx: ParserRuleContext, msg: String): Throwable = {
new ParseException(s"$property is a reserved namespace property, $msg.", ctx)
}
def propertiesAndDbPropertiesBothSpecifiedError(ctx: CreateNamespaceContext): Throwable = {
new ParseException("Either PROPERTIES or DBPROPERTIES is allowed.", ctx)
}
def fromOrInNotAllowedInShowDatabasesError(ctx: ShowNamespacesContext): Throwable = {
new ParseException(s"FROM/IN operator is not allowed in SHOW DATABASES", ctx)
}
def cannotCleanReservedTablePropertyError(
property: String, ctx: ParserRuleContext, msg: String): Throwable = {
new ParseException(s"$property is a reserved table property, $msg.", ctx)
}
def duplicatedTablePathsFoundError(
pathOne: String, pathTwo: String, ctx: ParserRuleContext): Throwable = {
new ParseException(s"Duplicated table paths found: '$pathOne' and '$pathTwo'. LOCATION" +
s" and the case insensitive key 'path' in OPTIONS are all used to indicate the custom" +
s" table path, you can only specify one of them.", ctx)
}
def storedAsAndStoredByBothSpecifiedError(ctx: CreateFileFormatContext): Throwable = {
new ParseException("Expected either STORED AS or STORED BY, not both", ctx)
}
def operationInHiveStyleCommandUnsupportedError(operation: String,
command: String, ctx: StatementContext, msgOpt: Option[String] = None): Throwable = {
val basicError = s"$operation is not supported in Hive-style $command"
val msg = if (msgOpt.isDefined) {
s"$basicError, ${msgOpt.get}."
} else {
basicError
}
new ParseException(msg, ctx)
}
def operationNotAllowedError(message: String, ctx: ParserRuleContext): Throwable = {
new ParseException(s"Operation not allowed: $message", ctx)
}
def descColumnForPartitionUnsupportedError(ctx: DescribeRelationContext): Throwable = {
new ParseException("DESC TABLE COLUMN for a specific partition is not supported", ctx)
}
def incompletePartitionSpecificationError(
key: String, ctx: DescribeRelationContext): Throwable = {
new ParseException(s"PARTITION specification is incomplete: `$key`", ctx)
}
def computeStatisticsNotExpectedError(ctx: IdentifierContext): Throwable = {
new ParseException(s"Expected `NOSCAN` instead of `${ctx.getText}`", ctx)
}
def addCatalogInCacheTableAsSelectNotAllowedError(
quoted: String, ctx: CacheTableContext): Throwable = {
new ParseException(s"It is not allowed to add catalog/namespace prefix $quoted to " +
"the table name in CACHE TABLE AS SELECT", ctx)
}
def showFunctionsUnsupportedError(identifier: String, ctx: IdentifierContext): Throwable = {
new ParseException(s"SHOW $identifier FUNCTIONS not supported", ctx)
}
def duplicateCteDefinitionNamesError(duplicateNames: String, ctx: CtesContext): Throwable = {
new ParseException(s"CTE definition can't have duplicate names: $duplicateNames.", ctx)
}
def sqlStatementUnsupportedError(sqlText: String, position: Origin): Throwable = {
new ParseException(Option(sqlText), "Unsupported SQL statement", position, position)
}
def unquotedIdentifierError(ident: String, ctx: ErrorIdentContext): Throwable = {
new ParseException(s"Possibly unquoted identifier $ident detected. " +
s"Please consider quoting it with back-quotes as `$ident`", ctx)
}
def duplicateClausesError(clauseName: String, ctx: ParserRuleContext): Throwable = {
new ParseException(s"Found duplicate clauses: $clauseName", ctx)
}
def duplicateKeysError(key: String, ctx: ParserRuleContext): Throwable = {
new ParseException(s"Found duplicate keys '$key'.", ctx)
}
def unexpectedFomatForSetConfigurationError(ctx: SetConfigurationContext): Throwable = {
new ParseException(
s"""
|Expected format is 'SET', 'SET key', or 'SET key=value'. If you want to include
|special characters in key, or include semicolon in value, please use quotes,
|e.g., SET `ke y`=`v;alue`.
""".stripMargin.replaceAll("\\n", " "), ctx)
}
def invalidPropertyKeyForSetQuotedConfigurationError(
keyCandidate: String, valueStr: String, ctx: SetQuotedConfigurationContext): Throwable = {
new ParseException(s"'$keyCandidate' is an invalid property key, please " +
s"use quotes, e.g. SET `$keyCandidate`=`$valueStr`", ctx)
}
def invalidPropertyValueForSetQuotedConfigurationError(
valueCandidate: String, keyStr: String, ctx: SetQuotedConfigurationContext): Throwable = {
new ParseException(s"'$valueCandidate' is an invalid property value, please " +
s"use quotes, e.g. SET `$keyStr`=`$valueCandidate`", ctx)
}
def unexpectedFormatForResetConfigurationError(ctx: ResetConfigurationContext): Throwable = {
new ParseException(
s"""
|Expected format is 'RESET' or 'RESET key'. If you want to include special characters
|in key, please use quotes, e.g., RESET `ke y`.
""".stripMargin.replaceAll("\\n", " "), ctx)
}
def intervalValueOutOfRangeError(ctx: IntervalContext): Throwable = {
new ParseException("The interval value must be in the range of [-18, +18] hours" +
" with second precision", ctx)
}
def invalidTimeZoneDisplacementValueError(ctx: SetTimeZoneContext): Throwable = {
new ParseException("Invalid time zone displacement value", ctx)
}
def createTempTableNotSpecifyProviderError(ctx: CreateTableContext): Throwable = {
new ParseException("CREATE TEMPORARY TABLE without a provider is not allowed.", ctx)
}
def rowFormatNotUsedWithStoredAsError(ctx: CreateTableLikeContext): Throwable = {
new ParseException("'ROW FORMAT' must be used with 'STORED AS'", ctx)
}
def useDefinedRecordReaderOrWriterClassesError(ctx: ParserRuleContext): Throwable = {
new ParseException(
"Unsupported operation: Used defined record reader/writer classes.", ctx)
}
def directoryPathAndOptionsPathBothSpecifiedError(ctx: InsertOverwriteDirContext): Throwable = {
new ParseException(
"Directory path and 'path' in OPTIONS should be specified one, but not both", ctx)
}
def unsupportedLocalFileSchemeError(ctx: InsertOverwriteDirContext): Throwable = {
new ParseException("LOCAL is supported only with file: scheme", ctx)
}
def invalidGroupingSetError(element: String, ctx: GroupingAnalyticsContext): Throwable = {
new ParseException(s"Empty set in $element grouping sets is not supported.", ctx)
}
}
| cloud-fan/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala | Scala | apache-2.0 | 16,486 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of BitWatts.
*
* Copyright (C) 2011-2015 Inria, University of Lille 1,
* University of Neuchâtel.
*
* BitWatts is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* BitWatts is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with BitWatts.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.bitwatts.reporter
import java.io.File
import java.net.Socket
import java.util.UUID
import org.apache.logging.log4j.LogManager
import org.newsclub.net.unix.{AFUNIXSocketAddress, AFUNIXSocket}
import org.powerapi.PowerDisplay
import org.powerapi.core.power.Power
import org.powerapi.core.target.Target
/**
* This display is used to report data inside a JUnixSocket.
*
* @author <a href="mailto:maxime.colmant@gmail.com">Maxime Colmant</a>
* @author <a href="mailto:mascha.kurpicz@unine.ch">Mascha Kurpicz</a>
*/
class VirtioDisplay(path: String) extends PowerDisplay {
private[this] val log = LogManager.getLogger
private[this] var output: Option[Socket] = None
def initializeConnection(): Option[Socket] = {
try {
val sock = AFUNIXSocket.newInstance()
val address = new AFUNIXSocketAddress(new File(path))
sock.connect(address)
Some(sock)
}
catch {
case _: Throwable => log.warn("Connexion impossible, path: {}", path); None
}
}
def initOutput(): Unit = {
if(output == None) {
initializeConnection() match {
case option: Option[Socket] => {
log.debug("socket opened, path {}", path)
output = option
}
case _ => {}
}
}
}
def writePower(targets: Set[Target], power: Power): Unit = {
output match {
case Some(socket) => {
try {
log.debug(s"{} has been written for targets {} in {}", s"${power.toWatts}", s"${targets.mkString(",")}", path)
socket.getOutputStream.write(s"${power.toWatts}\n".getBytes)
}
catch {
case _: Throwable => {
log.warn("Connexion lost, path {}", path)
output = None
}
}
}
case _ => {}
}
}
def display(muid: UUID, timestamp: Long, targets: Set[Target], device: Set[String], power: Power): Unit = {
initOutput()
writePower(targets, power)
}
}
| Spirals-Team/bitwatts | bitwatts-core/src/main/scala/org/powerapi/bitwatts/reporter/VirtioDisplay.scala | Scala | agpl-3.0 | 2,859 |
package hclu.hreg.dao
import java.util.UUID
import hclu.hreg.dao.sql.SqlDatabase
import hclu.hreg.domain.Doc
import org.joda.time.DateTime
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
class DocDao(protected val database: SqlDatabase)(implicit val ec: ExecutionContext) extends SqlDocSchema {
import database._
import database.driver.api._
type DocId = UUID
def add(doc: Doc, postAction: (Int) => Future[Unit]): Future[Int] = {
val action = (for {
regId <- addAction(doc)
} yield {
Await.result(postAction(regId), 20 seconds)
regId
}).transactionally
db.run(action)
}
def findById(id: UUID) = findOneWhere(_.id === id)
def findByRegId(regId: Int) = db.run(findByRegIdAction(regId))
def findByRegIdAction(regId: Int) = findOneWhereAction(_.regId === regId)
private def findOneWhere(condition: Docs => Rep[Boolean]) = {
db.run(findOneWhereAction(condition))
}
private def findOneWhereAction(condition: Docs => Rep[Boolean]) = {
docs.filter(condition).result.headOption
}
private def addAction(doc: Doc) = {
(docs returning docs.map(_.regId)) += doc
}
}
trait SqlDocSchema extends SqlDocRecipientSchema {
protected val database: SqlDatabase
import database._
import database.driver.api._
protected val docs = TableQuery[Docs]
protected class Docs(tag: Tag) extends Table[Doc](tag, "DOCS") {
// format: OFF
def id = column[UUID]("ID", O.PrimaryKey)
def regId = column[Int]("REG_ID", O.AutoInc)
def preId = column[Option[UUID]]("PRE_ID")
def postId = column[Option[UUID]]("POST_ID")
def createdOn = column[DateTime]("CREATED_ON")
def createdBy = column[UUID]("CREATED_BY")
def senderDescription = column[Option[String]]("SENDER_DESCRIPTION")
def description = column[Option[String]]("DESCRIPTION")
def primaryRecipient = column[Option[String]]("PRIMARY_RECIPIENT")
def secondaryRecipient = column[Option[String]]("SECONDARY_RECIPIENT")
def scannedDocumentId = column[String]("SCAN_DOC_ID")
def scannedDocumentName = column[String]("SCAN_DOC_NAME")
def emailDocumentId = column[Option[String]]("EMAIL_DOC_ID")
def emailDocumentName = column[Option[String]]("EMAIL_DOC_NAME")
def emailId = column[Option[UUID]]("EMAIL_ID")
def note = column[Option[String]]("NOTE")
def saved = column[Boolean]("SAVED")
def savedOn = column[Option[DateTime]]("SAVED_ON")
def savedBy = column[Option[UUID]]("SAVED_BY")
def deleted = column[Boolean]("DELETED")
def * = (
id,
regId,
preId,
postId,
createdOn,
createdBy,
senderDescription,
description,
primaryRecipient,
secondaryRecipient,
scannedDocumentId,
scannedDocumentName,
emailDocumentId,
emailDocumentName,
emailId,
note,
saved,
savedOn,
savedBy,
deleted) <>
((Doc.apply _).tupled, Doc.unapply)
def recipients = DocRecipientQuery.filter(_.docId === id)
.flatMap(_.recipientFK)
// format: ON
}
}
| tsechov/hclu-registry | backend/src/main/scala/hclu/hreg/dao/DocDao.scala | Scala | apache-2.0 | 3,131 |
package com.twitter.sbt
import scala.reflect.Manifest
import _root_.sbt._
class WrappedDefaultProject(val underlying: DefaultProject)
extends StandardProject(underlying.info)
{
override def name = underlying.name
override def version = underlying.version
override def organization = underlying.organization
override def scratch = underlying.scratch
override def libraryDependencies = underlying.libraryDependencies
override def subProjects = Map() ++ underlying.subProjects
override def repositories = underlying.repositories
// Ivy stuff.
override def ivyUpdateConfiguration = underlying.ivyUpdateConfiguration
override def ivyUpdateLogging = underlying.ivyUpdateLogging
override def ivyRepositories = underlying.ivyRepositories
override def otherRepositories = underlying.otherRepositories
override def ivyValidate = underlying.ivyValidate
override def ivyScala = underlying.ivyScala
override def ivyCacheDirectory = underlying.ivyCacheDirectory
override def ivyPaths = underlying.ivyPaths
override def inlineIvyConfiguration = underlying.inlineIvyConfiguration
override def ivyConfiguration = underlying.ivyConfiguration
override def ivySbt = underlying.ivySbt
override def ivyModule = underlying.ivyModule
override def updateTask(module: => IvySbt#Module, configuration: => UpdateConfiguration) = task {
underlying.updateTask(module, configuration).run
}
override def moduleSettings = underlying.moduleSettings
override def inlineSettings = underlying.inlineSettings
override def compatTestFramework = underlying.compatTestFramework
override def defaultModuleSettings = underlying.defaultModuleSettings
override def externalSettings = underlying.externalSettings
override def outputPattern = underlying.outputPattern
override def ivyXML = underlying.ivyXML
override def pomExtra = underlying.pomExtra
override def ivyConfigurations = underlying.ivyConfigurations
override def extraDefaultConfigurations = underlying.extraDefaultConfigurations
override def useIntegrationTestConfiguration = underlying.useIntegrationTestConfiguration
override def defaultConfiguration = underlying.defaultConfiguration
override def useMavenConfigurations = underlying.useMavenConfigurations
override def useDefaultConfigurations = underlying.useDefaultConfigurations
override def mainSourceRoots = underlying.mainSourceRoots
override def updateModuleSettings = underlying.updateModuleSettings
override def updateIvyModule = underlying.updateIvyModule
override def deliverModuleSettings = underlying.deliverModuleSettings
override def deliverIvyModule = underlying.deliverIvyModule
override def publishModuleSettings = underlying.publishModuleSettings
override def publishIvyModule = underlying.publishIvyModule
override lazy val clean = task { underlying.clean.run }
// override def cleanAction = underlying.cleanAction
// override protected def updateAction = underlying.updateAction
// override protected def cleanLibAction = underlying.cleanLibAction
// override protected def cleanCacheAction = underlying.cleanCacheAction
// override protected def deliverProjectDependencies = underlying.deliverProjectDependencies
override def packageToPublishActions = underlying.packageToPublishActions
override lazy val makePom = task {
underlying.makePom.run
}
override def compileOptions =
underlying.compileOptions map { opt => CompileOption(opt.asString) }
override def compileOrder = underlying.compileOrder
override def managedStyle = underlying.managedStyle
override def fullUnmanagedClasspath(config: Configuration) =
underlying.fullUnmanagedClasspath(config)
override def managedClasspath(config: Configuration): PathFinder =
underlying.managedClasspath(config)
// Properties.
override def property[T](implicit manifest: Manifest[T], format: Format[T]) = {
lazy val p = underlying.property(manifest, format)
new Property[T] with Proxy {
def self = p
def update(v: T) { self.update(v) }
def resolve = self.resolve
}
}
override def propertyLocal[T](implicit manifest: Manifest[T], format: Format[T]) = {
lazy val p = underlying.propertyLocal(manifest, format)
new Property[T] with Proxy {
def self = p
def update(v: T) { self.update(v) }
def resolve = self.resolve
}
}
override def propertyOptional[T]
(defaultValue: => T)
(implicit manifest: Manifest[T], format: Format[T]) = {
lazy val p = underlying.propertyOptional(defaultValue)(manifest, format)
new Property[T] with Proxy {
def self = p
def update(v: T) { self.update(v) }
def resolve = self.resolve
}
}
override def system[T](propName: String)(implicit format: Format[T]) = {
lazy val p = underlying.system(propName)(format)
new Property[T] with Proxy {
def self = p
def update(v: T) { self.update(v) }
def resolve = self.resolve
}
}
override def systemOptional[T]
(propName: String, defaultValue: => T)
(implicit format: Format[T]) = {
lazy val p = underlying.systemOptional(propName, defaultValue)(format)
new Property[T] with Proxy {
def self = p
def update(v: T) { self.update(v) }
def resolve = self.resolve
}
}
// TODO: as needed.
}
| twitter/standard-project | src/main/scala/com/twitter/sbt/ProjectWrapper.scala | Scala | apache-2.0 | 5,521 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Locale, ServiceConfigurationError, ServiceLoader}
import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, TypeUtils}
import org.apache.spark.sql.connector.catalog.TableProvider
import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.DataWritingCommand
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.orc.OrcDataSourceV2
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{DataType, StructField, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.{HadoopFSUtils, ThreadUtils, Utils}
/**
* The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to
* acting as the canonical set of parameters that can describe a Data Source, this class is used to
* resolve a description to a concrete implementation that can be used in a query plan
* (either batch or streaming) or to write out data using an external library.
*
* From an end user's perspective a DataSource description can be created explicitly using
* [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is
* used when resolving a description from a metastore to a concrete implementation.
*
* Many of the arguments to this class are optional, though depending on the specific API being used
* these optional arguments might be filled in during resolution using either inference or external
* metadata. For example, when reading a partitioned table from a file system, partition columns
* will be inferred from the directory layout even if they are not specified.
*
* @param paths A list of file system paths that hold data. These will be globbed before if
* the "__globPaths__" option is true, and will be qualified. This option only works
* when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. This list is
* generally empty during the read path, unless this DataSource is managed
* by Hive. In these cases, during `resolveRelation`, we will call
* `getOrInferFileFormatSchema` for file based DataSources to infer the
* partitioning. In other cases, if this list is empty, then this table
* is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
* @param catalogTable Optional catalog table reference that can be used to push down operations
* over the datasource to the catalog service.
*/
case class DataSource(
sparkSession: SparkSession,
className: String,
paths: Seq[String] = Nil,
userSpecifiedSchema: Option[StructType] = None,
partitionColumns: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
options: Map[String, String] = Map.empty,
catalogTable: Option[CatalogTable] = None) extends Logging {
case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String])
lazy val providingClass: Class[_] = {
val cls = DataSource.lookupDataSource(className, sparkSession.sessionState.conf)
// `providingClass` is used for resolving data source relation for catalog tables.
// As now catalog for data source V2 is under development, here we fall back all the
// [[FileDataSourceV2]] to [[FileFormat]] to guarantee the current catalog works.
// [[FileDataSourceV2]] will still be used if we call the load()/save() method in
// [[DataFrameReader]]/[[DataFrameWriter]], since they use method `lookupDataSource`
// instead of `providingClass`.
cls.newInstance() match {
case f: FileDataSourceV2 => f.fallbackFileFormat
case _ => cls
}
}
private def providingInstance() = providingClass.getConstructor().newInstance()
private def newHadoopConfiguration(): Configuration =
sparkSession.sessionState.newHadoopConfWithOptions(options)
lazy val sourceInfo: SourceInfo = sourceSchema()
private val caseInsensitiveOptions = CaseInsensitiveMap(options)
private val equality = sparkSession.sessionState.conf.resolver
/**
* Whether or not paths should be globbed before being used to access files.
*/
def globPaths: Boolean = {
options.get(DataSource.GLOB_PATHS_KEY)
.map(_ == "true")
.getOrElse(true)
}
bucketSpec.foreach { bucket =>
SchemaUtils.checkColumnNameDuplication(
bucket.bucketColumnNames, "in the bucket definition", equality)
SchemaUtils.checkColumnNameDuplication(
bucket.sortColumnNames, "in the sort definition", equality)
}
/**
* Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer
* it. In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510.
* This method will try to skip file scanning whether `userSpecifiedSchema` and
* `partitionColumns` are provided. Here are some code paths that use this method:
* 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns
* 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the
* dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred
* dataType if they don't.
* 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to
* provide the schema. Here, we also perform partition inference like 2, and try to use
* dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use
* this information, therefore calls to this method should be very cheap, i.e. there won't
* be any further inference in any triggers.
*
* @param format the file format object for this DataSource
* @param getFileIndex [[InMemoryFileIndex]] for getting partition schema and file list
* @return A pair of the data schema (excluding partition columns) and the schema of the partition
* columns.
*/
private def getOrInferFileFormatSchema(
format: FileFormat,
getFileIndex: () => InMemoryFileIndex): (StructType, StructType) = {
lazy val tempFileIndex = getFileIndex()
val partitionSchema = if (partitionColumns.isEmpty) {
// Try to infer partitioning, because no DataSource in the read path provides the partitioning
// columns properly unless it is a Hive DataSource
tempFileIndex.partitionSchema
} else {
// maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred
// partitioning
if (userSpecifiedSchema.isEmpty) {
val inferredPartitions = tempFileIndex.partitionSchema
inferredPartitions
} else {
val partitionFields = partitionColumns.map { partitionColumn =>
userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse {
val inferredPartitions = tempFileIndex.partitionSchema
val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn))
if (inferredOpt.isDefined) {
logDebug(
s"""Type of partition column: $partitionColumn not found in specified schema
|for $format.
|User Specified Schema
|=====================
|${userSpecifiedSchema.orNull}
|
|Falling back to inferred dataType if it exists.
""".stripMargin)
}
inferredOpt
}.getOrElse {
throw QueryCompilationErrors.partitionColumnNotSpecifiedError(
format.toString, partitionColumn)
}
}
StructType(partitionFields)
}
}
val dataSchema = userSpecifiedSchema.map { schema =>
StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name))))
}.orElse {
// Remove "path" option so that it is not added to the paths returned by
// `tempFileIndex.allFiles()`.
format.inferSchema(
sparkSession,
caseInsensitiveOptions - "path",
tempFileIndex.allFiles())
}.getOrElse {
throw QueryCompilationErrors.dataSchemaNotSpecifiedError(format.toString)
}
// We just print a warning message if the data schema and partition schema have the duplicate
// columns. This is because we allow users to do so in the previous Spark releases and
// we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`).
// See SPARK-18108 and SPARK-21144 for related discussions.
try {
SchemaUtils.checkColumnNameDuplication(
(dataSchema ++ partitionSchema).map(_.name),
"in the data schema and the partition schema",
equality)
} catch {
case e: AnalysisException => logWarning(e.getMessage)
}
(dataSchema, partitionSchema)
}
/** Returns the name and schema of the source that can be used to continually read data. */
private def sourceSchema(): SourceInfo = {
providingInstance() match {
case s: StreamSourceProvider =>
val (name, schema) = s.sourceSchema(
sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions)
SourceInfo(name, schema, Nil)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
// Check whether the path exists if it is not a glob pattern.
// For glob pattern, we do not check it because the glob pattern might only make sense
// once the streaming job starts and some upstream source starts dropping data.
val hdfsPath = new Path(path)
if (!globPaths || !SparkHadoopUtil.get.isGlobPath(hdfsPath)) {
val fs = hdfsPath.getFileSystem(newHadoopConfiguration())
if (!fs.exists(hdfsPath)) {
throw QueryCompilationErrors.dataPathNotExistError(path)
}
}
val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference
val isTextSource = providingClass == classOf[text.TextFileFormat]
// If the schema inference is disabled, only text sources require schema to be specified
if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) {
throw QueryExecutionErrors.createStreamingSourceNotSpecifySchemaError()
}
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format, () => {
// The operations below are expensive therefore try not to do them if we don't need to,
// e.g., in streaming mode, we have already inferred and registered partition columns,
// we will never have to materialize the lazy val below
val globbedPaths =
checkAndGlobPathIfNecessary(checkEmptyGlobPath = false, checkFilesExist = false)
createInMemoryFileIndex(globbedPaths)
})
val forceNullable =
sparkSession.sessionState.conf.getConf(SQLConf.FILE_SOURCE_SCHEMA_FORCE_NULLABLE)
val sourceDataSchema = if (forceNullable) dataSchema.asNullable else dataSchema
SourceInfo(
s"FileSource[$path]",
StructType(sourceDataSchema ++ partitionSchema),
partitionSchema.fieldNames)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "reading")
}
}
/** Returns a source that can be used to continually read data. */
def createSource(metadataPath: String): Source = {
providingInstance() match {
case s: StreamSourceProvider =>
s.createSource(
sparkSession.sqlContext,
metadataPath,
userSpecifiedSchema,
className,
caseInsensitiveOptions)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
new FileStreamSource(
sparkSession = sparkSession,
path = path,
fileFormatClassName = className,
schema = sourceInfo.schema,
partitionColumns = sourceInfo.partitionColumns,
metadataPath = metadataPath,
options = caseInsensitiveOptions)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "reading")
}
}
/** Returns a sink that can be used to continually write data. */
def createSink(outputMode: OutputMode): Sink = {
providingInstance() match {
case s: StreamSinkProvider =>
s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode)
case fileFormat: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw QueryExecutionErrors.dataPathNotSpecifiedError()
})
if (outputMode != OutputMode.Append) {
throw QueryCompilationErrors.dataSourceOutputModeUnsupportedError(className, outputMode)
}
new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions)
case _ =>
throw QueryExecutionErrors.streamedOperatorUnsupportedByDataSourceError(
className, "writing")
}
}
/**
* Create a resolved [[BaseRelation]] that can be used to read data from or write data into this
* [[DataSource]]
*
* @param checkFilesExist Whether to confirm that the files exist when generating the
* non-streaming file based datasource. StructuredStreaming jobs already
* list file existence, and when generating incremental jobs, the batch
* is considered as a non-streaming file based data source. Since we know
* that files already exist, we don't need to check them again.
*/
def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = {
val relation = (providingInstance(), userSpecifiedSchema) match {
// TODO: Throw when too much is given.
case (dataSource: SchemaRelationProvider, Some(schema)) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema)
case (dataSource: RelationProvider, None) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
case (_: SchemaRelationProvider, None) =>
throw QueryCompilationErrors.schemaNotSpecifiedForSchemaRelationProviderError(className)
case (dataSource: RelationProvider, Some(schema)) =>
val baseRelation =
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
if (!DataType.equalsIgnoreCompatibleNullability(baseRelation.schema, schema)) {
throw QueryCompilationErrors.userSpecifiedSchemaMismatchActualSchemaError(
schema, baseRelation.schema)
}
baseRelation
// We are reading from the results of a streaming query. Load files from the metadata log
// instead of listing them using HDFS APIs. Note that the config
// `spark.sql.streaming.fileStreamSink.metadata.ignored` can be enabled to ignore the
// metadata log.
case (format: FileFormat, _)
if FileStreamSink.hasMetadata(
caseInsensitiveOptions.get("path").toSeq ++ paths,
newHadoopConfiguration(),
sparkSession.sessionState.conf) =>
val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head)
val fileCatalog = new MetadataLogFileIndex(sparkSession, basePath,
caseInsensitiveOptions, userSpecifiedSchema)
val dataSchema = userSpecifiedSchema.orElse {
// Remove "path" option so that it is not added to the paths returned by
// `fileCatalog.allFiles()`.
format.inferSchema(
sparkSession,
caseInsensitiveOptions - "path",
fileCatalog.allFiles())
}.getOrElse {
throw QueryCompilationErrors.dataSchemaNotSpecifiedError(
format.toString, fileCatalog.allFiles().mkString(","))
}
HadoopFsRelation(
fileCatalog,
partitionSchema = fileCatalog.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
format,
caseInsensitiveOptions)(sparkSession)
// This is a non-streaming file based datasource.
case (format: FileFormat, _) =>
val useCatalogFileIndex = sparkSession.sqlContext.conf.manageFilesourcePartitions &&
catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog &&
catalogTable.get.partitionColumnNames.nonEmpty
val (fileCatalog, dataSchema, partitionSchema) = if (useCatalogFileIndex) {
val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes
val index = new CatalogFileIndex(
sparkSession,
catalogTable.get,
catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize))
(index, catalogTable.get.dataSchema, catalogTable.get.partitionSchema)
} else {
val globbedPaths = checkAndGlobPathIfNecessary(
checkEmptyGlobPath = true, checkFilesExist = checkFilesExist)
val index = createInMemoryFileIndex(globbedPaths)
val (resultDataSchema, resultPartitionSchema) =
getOrInferFileFormatSchema(format, () => index)
(index, resultDataSchema, resultPartitionSchema)
}
HadoopFsRelation(
fileCatalog,
partitionSchema = partitionSchema,
dataSchema = dataSchema.asNullable,
bucketSpec = bucketSpec,
format,
caseInsensitiveOptions)(sparkSession)
case _ =>
throw QueryCompilationErrors.invalidDataSourceError(className)
}
relation match {
case hs: HadoopFsRelation =>
SchemaUtils.checkSchemaColumnNameDuplication(
hs.dataSchema,
"in the data schema",
equality)
SchemaUtils.checkSchemaColumnNameDuplication(
hs.partitionSchema,
"in the partition schema",
equality)
DataSourceUtils.verifySchema(hs.fileFormat, hs.dataSchema)
case _ =>
SchemaUtils.checkSchemaColumnNameDuplication(
relation.schema,
"in the data schema",
equality)
}
relation
}
/**
* Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]].
* The returned command is unresolved and need to be analyzed.
*/
private def planForWritingFileFormat(
format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = {
// Don't glob path for the write path. The contracts here are:
// 1. Only one output path can be specified on the write path;
// 2. Output path must be a legal HDFS style file system path;
// 3. It's OK that the output path doesn't exist yet;
val allPaths = paths ++ caseInsensitiveOptions.get("path")
val outputPath = if (allPaths.length == 1) {
val path = new Path(allPaths.head)
val fs = path.getFileSystem(newHadoopConfiguration())
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
} else {
throw QueryExecutionErrors.multiplePathsSpecifiedError(allPaths)
}
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive)
val fileIndex = catalogTable.map(_.identifier).map { tableIdent =>
sparkSession.table(tableIdent).queryExecution.analyzed.collect {
case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location
}.head
}
// For partitioned relation r, r.schema's column ordering can be different from the column
// ordering of data.logicalPlan (partition columns are all moved after data column). This
// will be adjusted within InsertIntoHadoopFsRelation.
InsertIntoHadoopFsRelationCommand(
outputPath = outputPath,
staticPartitions = Map.empty,
ifPartitionNotExists = false,
partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted),
bucketSpec = bucketSpec,
fileFormat = format,
options = options,
query = data,
mode = mode,
catalogTable = catalogTable,
fileIndex = fileIndex,
outputColumnNames = data.output.map(_.name))
}
/**
* Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for
* the following reading.
*
* @param mode The save mode for this writing.
* @param data The input query plan that produces the data to be written. Note that this plan
* is analyzed and optimized.
* @param outputColumnNames The original output column names of the input query plan. The
* optimizer may not preserve the output column's names' case, so we need
* this parameter instead of `data.output`.
* @param physicalPlan The physical plan of the input query plan. We should run the writing
* command with this physical plan instead of creating a new physical plan,
* so that the metrics can be correctly linked to the given physical plan and
* shown in the web UI.
*/
def writeAndRead(
mode: SaveMode,
data: LogicalPlan,
outputColumnNames: Seq[String],
physicalPlan: SparkPlan,
metrics: Map[String, SQLMetric]): BaseRelation = {
val outputColumns = DataWritingCommand.logicalPlanOutputWithNames(data, outputColumnNames)
providingInstance() match {
case dataSource: CreatableRelationProvider =>
disallowWritingIntervals(outputColumns.map(_.dataType), forbidAnsiIntervals = true)
dataSource.createRelation(
sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data))
case format: FileFormat =>
disallowWritingIntervals(outputColumns.map(_.dataType), forbidAnsiIntervals = false)
val cmd = planForWritingFileFormat(format, mode, data)
val resolvedPartCols = cmd.partitionColumns.map { col =>
// The partition columns created in `planForWritingFileFormat` should always be
// `UnresolvedAttribute` with a single name part.
assert(col.isInstanceOf[UnresolvedAttribute])
val unresolved = col.asInstanceOf[UnresolvedAttribute]
assert(unresolved.nameParts.length == 1)
val name = unresolved.nameParts.head
outputColumns.find(a => equality(a.name, name)).getOrElse {
throw QueryCompilationErrors.cannotResolveAttributeError(
name, data.output.map(_.name).mkString(", "))
}
}
val resolved = cmd.copy(
partitionColumns = resolvedPartCols,
outputColumnNames = outputColumnNames)
resolved.run(sparkSession, physicalPlan)
DataWritingCommand.propogateMetrics(sparkSession.sparkContext, resolved, metrics)
// Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring
copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation()
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/**
* Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]].
*/
def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = {
providingInstance() match {
case dataSource: CreatableRelationProvider =>
disallowWritingIntervals(data.schema.map(_.dataType), forbidAnsiIntervals = true)
SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode)
case format: FileFormat =>
disallowWritingIntervals(data.schema.map(_.dataType), forbidAnsiIntervals = false)
DataSource.validateSchema(data.schema)
planForWritingFileFormat(format, mode, data)
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/** Returns an [[InMemoryFileIndex]] that can be used to get partition schema and file list. */
private def createInMemoryFileIndex(globbedPaths: Seq[Path]): InMemoryFileIndex = {
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
new InMemoryFileIndex(
sparkSession, globbedPaths, options, userSpecifiedSchema, fileStatusCache)
}
/**
* Checks and returns files in all the paths.
*/
private def checkAndGlobPathIfNecessary(
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean): Seq[Path] = {
val allPaths = caseInsensitiveOptions.get("path") ++ paths
DataSource.checkAndGlobPathIfNecessary(allPaths.toSeq, newHadoopConfiguration(),
checkEmptyGlobPath, checkFilesExist, enableGlobbing = globPaths)
}
private def disallowWritingIntervals(
dataTypes: Seq[DataType],
forbidAnsiIntervals: Boolean): Unit = {
dataTypes.foreach(
TypeUtils.invokeOnceForInterval(_, forbidAnsiIntervals) {
throw QueryCompilationErrors.cannotSaveIntervalIntoExternalStorageError()
})
}
}
object DataSource extends Logging {
/** A map to maintain backward compatibility in case we move data sources around. */
private val backwardCompatibilityMap: Map[String, String] = {
val jdbc = classOf[JdbcRelationProvider].getCanonicalName
val json = classOf[JsonFileFormat].getCanonicalName
val parquet = classOf[ParquetFileFormat].getCanonicalName
val csv = classOf[CSVFileFormat].getCanonicalName
val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat"
val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat"
val nativeOrc = classOf[OrcFileFormat].getCanonicalName
val socket = classOf[TextSocketSourceProvider].getCanonicalName
val rate = classOf[RateStreamProvider].getCanonicalName
Map(
"org.apache.spark.sql.jdbc" -> jdbc,
"org.apache.spark.sql.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc" -> jdbc,
"org.apache.spark.sql.json" -> json,
"org.apache.spark.sql.json.DefaultSource" -> json,
"org.apache.spark.sql.execution.datasources.json" -> json,
"org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json,
"org.apache.spark.sql.parquet" -> parquet,
"org.apache.spark.sql.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.hive.orc.DefaultSource" -> orc,
"org.apache.spark.sql.hive.orc" -> orc,
"org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc,
"org.apache.spark.sql.execution.datasources.orc" -> nativeOrc,
"org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm,
"org.apache.spark.ml.source.libsvm" -> libsvm,
"com.databricks.spark.csv" -> csv,
"org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket,
"org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate
)
}
/**
* Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0.
*/
private val spark2RemovedClasses = Set(
"org.apache.spark.sql.DataFrame",
"org.apache.spark.sql.sources.HadoopFsRelationProvider",
"org.apache.spark.Logging")
/** Given a provider name, look up the data source class definition. */
def lookupDataSource(provider: String, conf: SQLConf): Class[_] = {
val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match {
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" =>
classOf[OrcDataSourceV2].getCanonicalName
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" =>
"org.apache.spark.sql.hive.orc.OrcFileFormat"
case "com.databricks.spark.avro" if conf.replaceDatabricksSparkAvroEnabled =>
"org.apache.spark.sql.avro.AvroFileFormat"
case name => name
}
val provider2 = s"$provider1.DefaultSource"
val loader = Utils.getContextOrSparkClassLoader
val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader)
try {
serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match {
// the provider format did not match any given registered aliases
case Nil =>
try {
Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match {
case Success(dataSource) =>
// Found the data source using fully qualified path
dataSource
case Failure(error) =>
if (provider1.startsWith("org.apache.spark.sql.hive.orc")) {
throw QueryCompilationErrors.orcNotUsedWithHiveEnabledError()
} else if (provider1.toLowerCase(Locale.ROOT) == "avro" ||
provider1 == "com.databricks.spark.avro" ||
provider1 == "org.apache.spark.sql.avro") {
throw QueryCompilationErrors.failedToFindAvroDataSourceError(provider1)
} else if (provider1.toLowerCase(Locale.ROOT) == "kafka") {
throw QueryCompilationErrors.failedToFindKafkaDataSourceError(provider1)
} else {
throw QueryExecutionErrors.failedToFindDataSourceError(provider1, error)
}
}
} catch {
case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw QueryExecutionErrors.removedClassInSpark2Error(className, e)
} else {
throw e
}
}
case head :: Nil =>
// there is exactly one registered alias
head.getClass
case sources =>
// There are multiple registered aliases for the input. If there is single datasource
// that has "org.apache.spark" package in the prefix, we use it considering it is an
// internal datasource within Spark.
val sourceNames = sources.map(_.getClass.getName)
val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark"))
if (internalSources.size == 1) {
logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " +
s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).")
internalSources.head.getClass
} else {
throw QueryCompilationErrors.findMultipleDataSourceError(provider1, sourceNames)
}
}
} catch {
case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] =>
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getCause.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw QueryExecutionErrors.incompatibleDataSourceRegisterError(e)
} else {
throw e
}
}
}
/**
* Returns an optional [[TableProvider]] instance for the given provider. It returns None if
* there is no corresponding Data Source V2 implementation, or the provider is configured to
* fallback to Data Source V1 code path.
*/
def lookupDataSourceV2(provider: String, conf: SQLConf): Option[TableProvider] = {
val useV1Sources = conf.getConf(SQLConf.USE_V1_SOURCE_LIST).toLowerCase(Locale.ROOT)
.split(",").map(_.trim)
val cls = lookupDataSource(provider, conf)
cls.newInstance() match {
case d: DataSourceRegister if useV1Sources.contains(d.shortName()) => None
case t: TableProvider
if !useV1Sources.contains(cls.getCanonicalName.toLowerCase(Locale.ROOT)) =>
Some(t)
case _ => None
}
}
/**
* The key in the "options" map for deciding whether or not to glob paths before use.
*/
val GLOB_PATHS_KEY = "__globPaths__"
/**
* Checks and returns files in all the paths.
*/
private[sql] def checkAndGlobPathIfNecessary(
pathStrings: Seq[String],
hadoopConf: Configuration,
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean,
numThreads: Integer = 40,
enableGlobbing: Boolean): Seq[Path] = {
val qualifiedPaths = pathStrings.map { pathString =>
val path = new Path(pathString)
val fs = path.getFileSystem(hadoopConf)
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
}
// Split the paths into glob and non glob paths, because we don't need to do an existence check
// for globbed paths.
val (globPaths, nonGlobPaths) = qualifiedPaths.partition(SparkHadoopUtil.get.isGlobPath)
val globbedPaths =
try {
ThreadUtils.parmap(globPaths, "globPath", numThreads) { globPath =>
val fs = globPath.getFileSystem(hadoopConf)
val globResult = if (enableGlobbing) {
SparkHadoopUtil.get.globPath(fs, globPath)
} else {
qualifiedPaths
}
if (checkEmptyGlobPath && globResult.isEmpty) {
throw QueryCompilationErrors.dataPathNotExistError(globPath.toString)
}
globResult
}.flatten
} catch {
case e: SparkException => throw e.getCause
}
if (checkFilesExist) {
try {
ThreadUtils.parmap(nonGlobPaths, "checkPathsExist", numThreads) { path =>
val fs = path.getFileSystem(hadoopConf)
if (!fs.exists(path)) {
throw QueryCompilationErrors.dataPathNotExistError(path.toString)
}
}
} catch {
case e: SparkException => throw e.getCause
}
}
val allPaths = globbedPaths ++ nonGlobPaths
if (checkFilesExist) {
val (filteredOut, filteredIn) = allPaths.partition { path =>
HadoopFSUtils.shouldFilterOutPathName(path.getName)
}
if (filteredIn.isEmpty) {
logWarning(
s"All paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
} else {
logDebug(
s"Some paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
}
}
allPaths
}
/**
* When creating a data source table, the `path` option has a special meaning: the table location.
* This method extracts the `path` option and treat it as table location to build a
* [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this.
*/
def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = {
val path = CaseInsensitiveMap(options).get("path")
val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path")
CatalogStorageFormat.empty.copy(
locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath.toMap)
}
/**
* Called before writing into a FileFormat based data source to make sure the
* supplied schema is not empty.
* @param schema
*/
def validateSchema(schema: StructType): Unit = {
def hasEmptySchema(schema: StructType): Boolean = {
schema.size == 0 || schema.exists {
case StructField(_, b: StructType, _, _) => hasEmptySchema(b)
case _ => false
}
}
if (hasEmptySchema(schema)) {
throw QueryCompilationErrors.writeEmptySchemasUnsupportedByDataSourceError()
}
}
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala | Scala | apache-2.0 | 38,880 |
package org.typeclassopedia
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
import scala.language.implicitConversions
import org.typeclassopedia.std.Lists.{given _, _}
import org.typeclassopedia.std.Options.{given _, _}
class CopointedSpec extends AnyFlatSpec with Matchers {
"A copointed functor" must "extract" in { Option(1).extract mustEqual 1 }
it must "extract on List" in { List(1).extract mustEqual 1 }
it must "extract on Blub" in { Blub(1).extract mustEqual 1 }
}
| channingwalton/typeclassopedia | src/test/scala/org/typeclassopedia/CopointedSpec.scala | Scala | mit | 521 |
/*
* Copyright (c) 2011, Daniel Spiewak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* - Neither the name of "Anti-XML" nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.codecommit
package antixml
/**
* Pimp container for the explicit conversions into Anti-XML types. Out of the
* box, conversions are provided from `scala.xml` types. However, this mechanism
* is very extensible due to the use of a typeclass ([[com.codecommit.antixml.XMLConvertable]])
* to represent the actual conversion. Thus, it is possible to add conversions
* by defining an implicit instance of the typeclass and having it in scope. It
* is even possible to override the built-in conversions for `scala.xml` types
* simply by shadowing the conversions for types like [[scala.xml.Elem]]. The
* built-in conversions are defined in such a way that Scala's implicit resolution
* will give precedence to almost anything you define, as long as it is somehow
* in scope.
*/
class Converter[A](a: A) {
/**
* Converts a target type `A` into some result type B (presumably in the Anti-XML
* API). Technically, this function is not just restricted to converting into
* Anti-XML types. However, it would probably minimize confusion if it were
* exclusively used for this purpose. This generality comes from the fact that
* the `convert` function itself doesn't perform any conversion, but merely delegates
* directly to the `apply` method on whatever instance of `XMLConvertable` it
* happens to be passed.
*
* '''Note:''' If no conversion is available for the target type, then the compiler
* will reject this method call. Similarly, if more than one conversion is in
* scope and neither has implicit precedence over the other, then the compiler
* will reject this method call as ambiguous. In such cases, it is always
* possible to pass the conversion explicitly.
*
* @see [[com.codecommit.antixml.XMLConvertable]]
* @usecase def convert: Node
*/
def convert[B](implicit conversion: XMLConvertable[A, B]) = conversion(a)
}
/**
* Typeclass definition for conversions used by the [[com.codecommit.antixml.Converter]] pimp.
* Note that this type is ''exactly'' isomorphic to [[scala.Function1]], right
* down to the method name (`apply`). Normally, such a class would in fact extend
* `A => B`, rather than simply emulating its interface. However, because most
* instances of `XMLConvertable` will be implicit, we cannot blithely extend
* `Function1`. To do so would polute the scope with an unexpected proliferation
* of ''implicit'' conversions which would be automatically injected by the Scala
* compiler, rather than allowing us to tag them ''explicitly'' using the `convert` method.
*
* @see [[com.codecommit.antixml.Converter]]
*/
trait XMLConvertable[-A, +B] { // note: doesn't extend Function1 to avoid coercion
/**
* Convert a value of type `A` into a (hopefully equivalent) value of type `B`.
*/
def apply(a: A): B
}
/**
* Contains the built-in explicit conversions into Anti-XML. Currently, these
* conversions only cover types in `scala.xml`. This may be expanded in future.
*
* All of the members in this object are implicit, and thus it is rare for a user
* to need to access them directly. The membership is contrived in such a way
* that the implicit resolution will use the following precedence order:
*
* <ul>
* <li>`ElemConvertable`</li>
* <li>`TextConvertable`</li>
* <li>`EntityRefConvertable`</li>
* <li>`NodeConvertable`</li>
* <li>`NodeSeqConvertable`</li>
* </ul>
*
* This corresponds with the roughly-intuitive conversion precedence. Thus, if
* we have a value of type [[scala.xml.Elem]] and we invoke the `convert` method on
* that value, the result will be of type [[com.codecommit.antixml.Elem]]. However,
* if we take that same value and ascribe it the type of [[scala.xml.Node]],
* the `convert` method will return a value of type [[com.codecommit.antixml.Node]].
* Finally, we can take this same value and ascribe it the even less-specific type
* of [[scala.xml.NodeSeq]] (or even [[scala.Seq]]`[`[[scala.xml.Node]]`]`, for
* that matter). Invoking the `convert` method on this maximally-widened type will
* produce a value of type [[com.codecommit.antixml.Group]]`[`[[com.codecommit.antixml.Node]]`]`.
* Thus, the most specific conversion is chosen in all cases.
*/
object XMLConvertable extends SecondPrecedenceConvertables {
def scope2stream(nb: xml.NamespaceBinding): Stream[xml.NamespaceBinding] = {
if(nb == null)
Stream.empty
else
Stream.cons(nb, scope2stream(nb.parent))
}
implicit object ElemConvertable extends XMLConvertable[xml.Elem, Elem] {
def apply(e: xml.Elem) = {
val attrs = (Attributes() /: e.attributes) {
case (attr, pa: xml.PrefixedAttribute) => attr + (QName(Option(pa.pre), pa.key) -> pa.value.mkString)
case (attr, ua: xml.UnprefixedAttribute) => attr + (ua.key -> ua.value.mkString)
case (attr, _) => attr
}
val children = NodeSeqConvertable(xml.NodeSeq fromSeq e.child)
val scopes = scope2stream(e.scope).toList.filter(x => x.prefix != null || x.uri != null)
val namespaceBindings = scopes.foldLeft(NamespaceBinding.empty)((parent, t) => if(t.prefix == null) parent.append(t.uri) else parent.append(t.prefix, t.uri))
val prefix = Option(e.prefix)
Elem(prefix, e.label, attrs, namespaceBindings, children)
}
}
implicit object TextConvertable extends XMLConvertable[xml.Atom[String], Text] {
def apply(t: xml.Atom[String]) = Text(t.text)
}
implicit object EntityRefConvertable extends XMLConvertable[xml.EntityRef, EntityRef] {
def apply(ref: xml.EntityRef) = EntityRef(ref.entityName)
}
}
// it really amazes me that this even works
private[antixml] sealed trait SecondPrecedenceConvertables extends ThirdPrecedenceConvertables { this: XMLConvertable.type =>
implicit object NodeConvertable extends XMLConvertable[xml.Node, Node] {
def apply(n: xml.Node) = n match {
case e: xml.Elem => ElemConvertable(e)
case a: xml.Atom[String] => TextConvertable(a)
case r: xml.EntityRef => EntityRefConvertable(r)
case g: xml.Group => sys.error("xml.Group should never have been a Node; there is no sane conversion")
}
}
}
private[antixml] sealed trait ThirdPrecedenceConvertables { this: XMLConvertable.type =>
// written against Seq[xml.Node] rather than NodeSeq since scala.xml isn't consistent
implicit object NodeSeqConvertable extends XMLConvertable[Seq[xml.Node], Group[Node]] {
def apply(ns: Seq[xml.Node]) = Group(ns map NodeConvertable.apply: _*)
}
}
| arktekk/anti-xml | src/main/scala/com/codecommit/antixml/conversion.scala | Scala | bsd-3-clause | 8,103 |
package sbt.internal.util
package complete
import java.io.File
import sbt.io.IO._
class FileExamplesTest extends UnitSpec {
"listing all files in an absolute base directory" should
"produce the entire base directory's contents" in {
val _ = new DirectoryStructure {
fileExamples().toList should contain theSameElementsAs (allRelativizedPaths)
}
}
"listing files with a prefix that matches none" should
"produce an empty list" in {
val _ = new DirectoryStructure(withCompletionPrefix = "z") {
fileExamples().toList shouldBe empty
}
}
"listing single-character prefixed files" should
"produce matching paths only" in {
val _ = new DirectoryStructure(withCompletionPrefix = "f") {
fileExamples().toList should contain theSameElementsAs (prefixedPathsOnly)
}
}
"listing directory-prefixed files" should
"produce matching paths only" in {
val _ = new DirectoryStructure(withCompletionPrefix = "far") {
fileExamples().toList should contain theSameElementsAs (prefixedPathsOnly)
}
}
it should "produce sub-dir contents only when appending a file separator to the directory" in {
val _ = new DirectoryStructure(withCompletionPrefix = "far" + File.separator) {
fileExamples().toList should contain theSameElementsAs (prefixedPathsOnly)
}
}
"listing files with a sub-path prefix" should
"produce matching paths only" in {
val _ = new DirectoryStructure(withCompletionPrefix = "far" + File.separator + "ba") {
fileExamples().toList should contain theSameElementsAs (prefixedPathsOnly)
}
}
"completing a full path" should
"produce a list with an empty string" in {
val _ = new DirectoryStructure(withCompletionPrefix = "bazaar") {
fileExamples().toList shouldEqual List("")
}
}
class DirectoryStructure(withCompletionPrefix: String = "") extends DelayedInit {
var fileExamples: FileExamples = _
var baseDir: File = _
var childFiles: List[File] = _
var childDirectories: List[File] = _
var nestedFiles: List[File] = _
var nestedDirectories: List[File] = _
def allRelativizedPaths: List[String] =
(childFiles ++ childDirectories ++ nestedFiles ++ nestedDirectories).map(relativize(baseDir, _).get)
def prefixedPathsOnly: List[String] =
allRelativizedPaths.filter(_ startsWith withCompletionPrefix).map(_ substring withCompletionPrefix.length)
override def delayedInit(testBody: => Unit): Unit = {
withTemporaryDirectory {
tempDir =>
createSampleDirStructure(tempDir)
fileExamples = new FileExamples(baseDir, withCompletionPrefix)
testBody
}
}
private def createSampleDirStructure(tempDir: File): Unit = {
childFiles = toChildFiles(tempDir, List("foo", "bar", "bazaar"))
childDirectories = toChildFiles(tempDir, List("moo", "far"))
nestedFiles = toChildFiles(childDirectories(1), List("farfile1", "barfile2"))
nestedDirectories = toChildFiles(childDirectories(1), List("fardir1", "bardir2"))
(childDirectories ++ nestedDirectories).map(_.mkdirs())
(childFiles ++ nestedFiles).map(_.createNewFile())
baseDir = tempDir
}
private def toChildFiles(baseDir: File, files: List[String]): List[File] = files.map(new File(baseDir, _))
}
}
| Duhemm/util | internal/util-complete/src/test/scala/sbt/complete/FileExamplesTest.scala | Scala | bsd-3-clause | 3,391 |
package com.sksamuel.elastic4s.http.search.queries
import com.sksamuel.elastic4s.searches.queries.matches.{MatchAllQueryDefinition, MatchNoneQueryDefinition}
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory}
object MatchAllBodyFn {
def apply(q: MatchAllQueryDefinition): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject().startObject("match_all")
q.boost.foreach(builder.field("boost", _))
q.queryName.foreach(builder.field("_name", _))
builder.endObject().endObject()
}
}
object MatchNoneBodyFn {
def apply(q: MatchNoneQueryDefinition): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject().startObject("match_none")
q.queryName.foreach(builder.field("_name", _))
builder.endObject().endObject()
}
}
| aroundus-inc/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/queries/MatchAllBodyFn.scala | Scala | apache-2.0 | 842 |
package io.buoyant.namer
import com.twitter.finagle.{NameTree, Dentry, Path}
sealed trait DelegateTree[+T] {
def path: Path
def map[U](f: T => U): DelegateTree[U] = DelegateTree.map(this, f)
def flatMap[U >: T](f: DelegateTree.Leaf[T] => DelegateTree[U]) = DelegateTree.flatMap(this, f)
def simplified: DelegateTree[T] = DelegateTree.simplify(this)
def toNameTree: NameTree[T] = DelegateTree.toNameTree(this)
def withDentry(dentry: Dentry) = DelegateTree.withDentry(this, dentry)
}
object DelegateTree {
case class Exception(path: Path, dentry: Dentry, thrown: Throwable) extends DelegateTree[Nothing]
case class Empty(path: Path, dentry: Dentry) extends DelegateTree[Nothing]
case class Fail(path: Path, dentry: Dentry) extends DelegateTree[Nothing]
case class Neg(path: Path, dentry: Dentry) extends DelegateTree[Nothing]
case class Delegate[+T](path: Path, dentry: Dentry, tree: DelegateTree[T]) extends DelegateTree[T]
case class Leaf[+T](path: Path, dentry: Dentry, value: T) extends DelegateTree[T]
case class Alt[+T](path: Path, dentry: Dentry, trees: DelegateTree[T]*) extends DelegateTree[T]
case class Union[+T](path: Path, dentry: Dentry, trees: Weighted[T]*) extends DelegateTree[T]
case class Weighted[+T](weight: Double, tree: DelegateTree[T]) {
def map[U](f: T => U): Weighted[U] = copy(tree = tree.map(f))
def flatMap[U >: T](f: Leaf[T] => DelegateTree[U]): Weighted[U] = copy(tree = DelegateTree.flatMap(tree, f))
}
case class Transformation[+T](path: Path, name: String, value: T, tree: DelegateTree[T]) extends DelegateTree[T]
private def withDentry[T](orig: DelegateTree[T], dentry: Dentry): DelegateTree[T] = orig match {
case tree: Exception => tree.copy(dentry = dentry)
case tree: Empty => tree.copy(dentry = dentry)
case tree: Fail => tree.copy(dentry = dentry)
case tree: Neg => tree.copy(dentry = dentry)
case Delegate(path, _, tree) => Delegate(path, dentry, tree)
case Leaf(path, _, v) => Leaf(path, dentry, v)
case Alt(path, _, trees@_*) => Alt(path, dentry, trees: _*)
case Union(path, _, trees@_*) => Union(path, dentry, trees: _*)
case t: Transformation[_] => t
}
private def map[T, U](orig: DelegateTree[T], f: T => U): DelegateTree[U] = orig match {
case tree: Exception => tree
case tree: Empty => tree
case tree: Fail => tree
case tree: Neg => tree
case Delegate(path, dentry, tree) => Delegate(path, dentry, tree.map(f))
case Leaf(path, dentry, v) => Leaf(path, dentry, f(v))
case Alt(path, dentry, trees@_*) => Alt(path, dentry, trees.map(_.map(f)): _*)
case Union(path, dentry, trees@_*) => Union(path, dentry, trees.map(_.map(f)): _*)
case Transformation(path, name, v, tree) => Transformation(path, name, f(v), tree.map(f))
}
private def flatMap[T, U >: T](orig: DelegateTree[T], f: Leaf[T] => DelegateTree[U]): DelegateTree[U] = orig match {
case tree: Exception => tree
case tree: Empty => tree
case tree: Fail => tree
case tree: Neg => tree
case Delegate(path, dentry, tree) => Delegate(path, dentry, flatMap(tree, f))
case leaf@Leaf(_, _, _) => f(leaf)
case Alt(path, dentry, trees@_*) => Alt(path, dentry, trees.map(flatMap(_, f)): _*)
case Union(path, dentry, trees@_*) => Union(path, dentry, trees.map(_.flatMap(f)): _*)
case Transformation(path, name, v, tree) => Transformation(path, name, v, flatMap(tree, f))
}
private def simplify[T](tree: DelegateTree[T]): DelegateTree[T] = tree match {
case Delegate(path, dentry, tree) =>
val simplified = simplify(tree)
val collapse = simplified match {
case _: DelegateTree.Neg | _: DelegateTree.Fail | _: DelegateTree.Empty =>
false
case _ => simplified.path == path
}
if (collapse) simplified.withDentry(dentry)
else Delegate(path, dentry, simplified)
case Alt(path, dentry) => Neg(path, dentry)
case Alt(path, dentry, tree) => simplify(Delegate(path, dentry, tree))
case Alt(path, dentry, trees@_*) =>
val simplified = trees.foldLeft(Seq.empty[DelegateTree[T]]) {
case (trees, tree) => simplify(tree) match {
case Alt(p, pf, ts@_*) if p == path =>
trees ++ ts
case tree =>
trees :+ tree
}
}
Alt(path, dentry, simplified: _*)
case Union(path, dentry) => Neg(path, dentry)
case Union(path, dentry, Weighted(_, tree)) => simplify(Delegate(path, dentry, tree))
case Union(path, dentry, weights@_*) =>
val simplified = weights.map {
case Weighted(w, tree) => Weighted(w, simplify(tree))
}
Union(path, dentry, simplified: _*)
case tree => tree
}
private def toNameTree[T](delegates: DelegateTree[T]): NameTree[T] = delegates match {
case Exception(_, _, e) => throw e
case Empty(_, _) => NameTree.Empty
case Fail(_, _) => NameTree.Fail
case Neg(_, _) => NameTree.Neg
case Delegate(_, _, tree) => toNameTree(tree)
case Leaf(_, _, v) => NameTree.Leaf(v)
case Alt(_, _, delegates@_*) => NameTree.Alt(delegates.map(toNameTree): _*)
case Union(_, _, delegates@_*) => NameTree.Union(delegates.map(toNameTreeWeighted): _*)
case Transformation(_, _, _, tree) => toNameTree(tree)
}
private def toNameTreeWeighted[T](delegate: DelegateTree.Weighted[T]): NameTree.Weighted[T] =
NameTree.Weighted(delegate.weight, toNameTree(delegate.tree))
def fromNameTree[T](path: Path, dentry: Dentry, names: NameTree[T]): DelegateTree[T] =
names match {
case NameTree.Empty => DelegateTree.Empty(path, dentry)
case NameTree.Fail => DelegateTree.Fail(path, dentry)
case NameTree.Neg => DelegateTree.Neg(Path.empty, dentry)
case NameTree.Leaf(v) => DelegateTree.Leaf(path, dentry, v)
case NameTree.Alt(names@_*) =>
val delegates = names.map(fromNameTree[T](path, dentry, _))
DelegateTree.Alt(path, dentry, delegates: _*)
case NameTree.Union(names@_*) =>
val delegates = names.map {
case NameTree.Weighted(w, tree) =>
DelegateTree.Weighted(w, fromNameTree(path, dentry, tree))
}
DelegateTree.Union(path, dentry, delegates: _*)
}
def fromNameTree[T](names: NameTree[T], path: Path = Path.empty): DelegateTree[T] =
names match {
case NameTree.Empty => DelegateTree.Empty(path, Dentry.nop)
case NameTree.Fail => DelegateTree.Fail(path, Dentry.nop)
case NameTree.Neg => DelegateTree.Neg(path, Dentry.nop)
case NameTree.Leaf(v) => DelegateTree.Leaf(path, Dentry.nop, v)
case NameTree.Alt(names@_*) =>
val delegates = names.map(fromNameTree[T](path, Dentry.nop, _))
DelegateTree.Alt(path, Dentry.nop, delegates: _*)
case NameTree.Union(names@_*) =>
val delegates = names.map {
case NameTree.Weighted(w, tree) =>
DelegateTree.Weighted(w, fromNameTree(path, Dentry.nop, tree))
}
DelegateTree.Union(path, Dentry.nop, delegates: _*)
}
/**
* Find a leaf in the DelegateTree that satisfies the predicate and returns a list of delegate
* tree nodes which form a path from the root of the tree to the satisfying leaf. For each node
* in this path, the node's Path and Dentry are returned.
*
* @return A list of (Path, Dentry) tuples forming a path from the root of the tree to a leaf
* which matches the predicate. None if no such leaf exists.
*/
def find[T](
tree: DelegateTree[T],
predicate: T => Boolean
): Option[List[(Path, String)]] = {
// We define our own version of Dentry.show to ensure that Dentry.nop is formatted correctly.
def show(dentry: Dentry) = if (dentry == Dentry.nop) "" else dentry.show
tree match {
// When we reach a DelegateTree.Leaf we may have found a path.
case Leaf(leafPath, dentry, t) if predicate(t) =>
Some(List(leafPath -> show(dentry)))
// Leaf does not satisfy the predicate.
case Leaf(_, _, _) =>
None
case Transformation(path, name, _, remainingTree) =>
find(remainingTree, predicate).map {
// We need to check if this transformation is followed by a leaf.
// If so, we need to switch the transformation's dentry with the leaf's dentry. We do
// this to make sure that the node list is more readable.
case List((leafPath, leafDentry)) =>
List(path -> leafDentry, leafPath -> name)
case rest =>
(path -> name) +: rest
}
case Delegate(path, dentry, remainingTree) =>
find(remainingTree, predicate).map { rest =>
(path -> show(dentry)) +: rest
}
case Union(path, dentry, remainingWeightedTrees@_*) =>
remainingWeightedTrees.map { wd =>
find(wd.tree, predicate)
}.collectFirst {
case Some(rest) => (path -> show(dentry)) +: rest
}
case Alt(path, dentry, remainingTrees@_*) =>
remainingTrees.map { d =>
find(d, predicate)
}.collectFirst {
case Some(rest) => (path -> show(dentry)) +: rest
}
case Exception(_, _, _) | Empty(_, _) | Fail(_, _) | Neg(_, _) =>
None
}
}
}
| linkerd/linkerd | namer/core/src/main/scala/io/buoyant/namer/DelegateTree.scala | Scala | apache-2.0 | 9,240 |
package com.dominikgruber.fpinscala.chapter04
import org.scalatest._
class Exercise02Spec extends FlatSpec with Matchers {
"variance" should "multiply by 2" in {
Chapter04.variance(Seq(1.0, 2.0, 3.0)) should be (Some(2.0 / 3.0))
}
it should "be None" in {
Chapter04.variance(Seq[Double]()) should be (None)
}
} | TheDom/functional-programming-in-scala | src/test/scala/com/dominikgruber/fpinscala/chapter04/Exercise02Spec.scala | Scala | mit | 330 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.junit
import org.scalatest._
import org.junit.runner.notification.RunNotifier
import org.junit.runner.Description
import org.junit.runner.notification.Failure
import org.scalatest.events._
// TODO: Mention on each Reporter method that it does nothing
// There's no way to really pass along a suiteStarting or suiteCompleted
// report. They have a dumb comment to "Do not invoke" fireTestRunStarted
// and fireTestRunFinished, so I think they must be doing that themselves.
// This means we don't have a way to really forward runStarting and
// runCompleted reports either. But runAborted reports should be sent
// out the door somehow, so we report them with yet another fireTestFailure.
private[junit] class RunNotifierReporter(runNotifier: RunNotifier) extends Reporter {
// This form isn't clearly specified in JUnit docs, but some tools may assume it, so why rock the boat.
// Here's what JUnit code does:
// public static Description createTestDescription(Class<?> clazz, String name, Annotation... annotations) {
// return new Description(String.format("%s(%s)", name, clazz.getName()), annotations);
// }
// So you can see the test name shows up, which is normally a test method name, followed by the fully qualified class name in parens
// We put test name and suite class name (or suite name if no class) in parens, but don't try and do anything to get rid of spaces or
// parens the test or suite names themselves, since it is unclear if this format is used by anyone anyway. If actual bug reports come
// in, then we can fix each actual problem once it is understood.
//
private def testDescriptionName(suiteName: String, suiteClassName: Option[String], testName: String) =
suiteClassName match {
case Some(suiteClassName) => testName + "(" + suiteClassName + ")"
case None => testName + "(" + suiteName + ")"
}
private def suiteDescriptionName(suiteName: String, suiteClassName: Option[String]) =
suiteClassName match {
case Some(suiteClassName) => suiteClassName
case None => suiteName
}
override def apply(event: Event) {
event match {
case TestStarting(ordinal, suiteName, suiteId, suiteClassName, testName, testText, formatter, location, rerunnable, payload, threadName, timeStamp) =>
runNotifier.fireTestStarted(Description.createSuiteDescription(testDescriptionName(suiteName, suiteClassName, testName)))
case TestFailed(ordinal, message, suiteName, suiteId, suiteClassName, testName, testText, recordedEvents, throwable, duration, formatter, location, rerunnable, payload, threadName, timeStamp) =>
val throwableOrNull =
throwable match {
case Some(t) => t
case None => null // Yuck. Not sure if the exception passed to new Failure can be null, but it could be given this code. Usually throwable would be defined.
}
val description = Description.createSuiteDescription(testDescriptionName(suiteName, suiteClassName, testName))
runNotifier.fireTestFailure(new Failure(description, throwableOrNull))
runNotifier.fireTestFinished(description)
case TestSucceeded(ordinal, suiteName, suiteId, suiteClassName, testName, testText, recordedEvents, duration, formatter, location, rerunnable, payload, threadName, timeStamp) =>
runNotifier.fireTestFinished(Description.createSuiteDescription(testDescriptionName(suiteName, suiteClassName, testName)))
case TestIgnored(ordinal, suiteName, suiteId, suiteClassName, testName, testText, formatter, location, payload, threadName, timeStamp) =>
runNotifier.fireTestIgnored(Description.createSuiteDescription(testDescriptionName(suiteName, suiteClassName, testName)))
// TODO: I dont see TestCanceled here. Probably need to add it
// Closest thing we can do with pending is report an ignored test
case TestPending(ordinal, suiteName, suiteId, suiteClassName, testName, testText, recordedEvents, duration, formatter, location, payload, threadName, timeStamp) =>
runNotifier.fireTestIgnored(Description.createSuiteDescription(testDescriptionName(suiteName, suiteClassName, testName)))
case SuiteAborted(ordinal, message, suiteName, suiteId, suiteClassName, throwable, duration, formatter, location, rerunnable, payload, threadName, timeStamp) =>
val throwableOrNull =
throwable match {
case Some(t) => t
case None => null // Yuck. Not sure if the exception passed to new Failure can be null, but it could be given this code. Usually throwable would be defined.
}
val description = Description.createSuiteDescription(suiteDescriptionName(suiteName, suiteClassName))
runNotifier.fireTestFailure(new Failure(description, throwableOrNull)) // Best we can do in JUnit, as far as I know
runNotifier.fireTestFinished(description)
case RunAborted(ordinal, message, throwable, duration, summary, formatter, location, payload, threadName, timeStamp) =>
val throwableOrNull =
throwable match {
case Some(t) => t
case None => null // Yuck. Not sure if the exception passed to new Failure can be null, but it could be given this code. Usually throwable would be defined.
}
val possiblyEmptyMessage = Reporter.messageOrThrowablesDetailMessage(message, throwable)
val description = Description.createSuiteDescription(Resources("runAborted") + " " + possiblyEmptyMessage)
runNotifier.fireTestFailure(new Failure(description, throwableOrNull)) // Best we can do in JUnit, as far as I know
runNotifier.fireTestFinished(description)
case _ =>
}
}
}
| travisbrown/scalatest | src/main/scala/org/scalatest/junit/RunNotifierReporter.scala | Scala | apache-2.0 | 6,329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import org.apache.flink.api.common.typeinfo.{LocalTimeTypeInfo, SqlTimeTypeInfo}
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.expressions.{E => PlannerE, UUID => PlannerUUID}
import org.apache.flink.table.functions.BuiltInFunctionDefinitions._
import org.apache.flink.table.functions._
import org.apache.flink.table.types.logical.LogicalTypeRoot.SYMBOL
import org.apache.flink.table.types.logical.utils.LogicalTypeChecks._
import org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToLegacyInfo
import java.time.{LocalDate, LocalDateTime}
import org.apache.flink.table.util.Logging
import _root_.scala.collection.JavaConverters._
/**
* Visitor implementation for converting [[Expression]]s to [[PlannerExpression]]s.
*/
class PlannerExpressionConverter private
extends ApiExpressionVisitor[PlannerExpression]
with Logging {
override def visit(call: CallExpression): PlannerExpression = {
translateCall(call.getFunctionDefinition, call.getChildren.asScala)
}
override def visit(unresolvedCall: UnresolvedCallExpression): PlannerExpression = {
translateCall(unresolvedCall.getFunctionDefinition, unresolvedCall.getChildren.asScala)
}
override def visit(other: ResolvedExpression): PlannerExpression = {
throw new TableException("Unsupported resolved expression:" + other)
}
private def translateCall(
func: FunctionDefinition,
children: Seq[Expression])
: PlannerExpression = {
// special case: requires individual handling of child expressions
func match {
case CAST =>
assert(children.size == 2)
return Cast(
children.head.accept(this),
fromDataTypeToLegacyInfo(
children(1).asInstanceOf[TypeLiteralExpression].getOutputDataType))
case WINDOW_START =>
assert(children.size == 1)
val windowReference = translateWindowReference(children.head)
return WindowStart(windowReference)
case WINDOW_END =>
assert(children.size == 1)
val windowReference = translateWindowReference(children.head)
return WindowEnd(windowReference)
case PROCTIME =>
assert(children.size == 1)
val windowReference = translateWindowReference(children.head)
return ProctimeAttribute(windowReference)
case ROWTIME =>
assert(children.size == 1)
val windowReference = translateWindowReference(children.head)
return RowtimeAttribute(windowReference)
case _ =>
}
val args = children.map(_.accept(this))
func match {
// explicit legacy
case sfd: ScalarFunctionDefinition =>
val call = PlannerScalarFunctionCall(
sfd.getScalarFunction,
args)
//it configures underlying state
call.validateInput()
call
// explicit legacy
case tfd: TableFunctionDefinition =>
PlannerTableFunctionCall(
tfd.toString,
tfd.getTableFunction,
args,
tfd.getResultType)
// explicit legacy
case afd: AggregateFunctionDefinition =>
AggFunctionCall(
afd.getAggregateFunction,
afd.getResultTypeInfo,
afd.getAccumulatorTypeInfo,
args)
// explicit legacy
case tafd: TableAggregateFunctionDefinition =>
AggFunctionCall(
tafd.getTableAggregateFunction,
tafd.getResultTypeInfo,
tafd.getAccumulatorTypeInfo,
args)
// best-effort support for new type inference
case sf: ScalarFunction =>
LOG.warn(
"The new type inference for functions is only supported in the Blink planner. " +
"Falling back to legacy type inference for function '{}'.", sf.getClass)
val call = PlannerScalarFunctionCall(
sf,
args)
// it configures underlying state
call.validateInput()
call
// best-effort support for new type inference
case tf: TableFunction[_] =>
LOG.warn(
"The new type inference for functions is only supported in the Blink planner. " +
"Falling back to legacy type inference for function '{}'.", tf.getClass)
PlannerTableFunctionCall(
tf.toString,
tf,
args,
UserDefinedFunctionHelper.getReturnTypeOfTableFunction(tf))
// best-effort support for new type inference
case af: AggregateFunction[_, _] =>
LOG.warn(
"The new type inference for functions is only supported in the Blink planner. " +
"Falling back to legacy type inference for function '{}'.", af.getClass)
AggFunctionCall(
af,
UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(af),
UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(af),
args)
// best-effort support for new type inference
case taf: TableAggregateFunction[_, _] =>
LOG.warn(
"The new type inference for functions is only supported in the Blink planner. " +
"Falling back to legacy type inference for function '{}'.", taf.getClass)
AggFunctionCall(
taf,
UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(taf),
UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(taf),
args)
case _ : UserDefinedFunction =>
throw new ValidationException(
"The new type inference for functions is only supported in the Blink planner.")
case fd: FunctionDefinition =>
fd match {
case AS =>
assert(args.size >= 2)
val name = getValue[String](args(1))
val extraNames = args
.drop(2)
.map(e => getValue[String](e))
Alias(args.head, name, extraNames)
case FLATTEN =>
assert(args.size == 1)
Flattening(args.head)
case GET =>
assert(args.size == 2)
val expr = GetCompositeField(args.head, getValue(args.last))
//it configures underlying state
expr.validateInput()
expr
case AND =>
assert(args.size >= 2)
args.reduceLeft(And)
case OR =>
assert(args.size >= 2)
args.reduceLeft(Or)
case NOT =>
assert(args.size == 1)
Not(args.head)
case EQUALS =>
assert(args.size == 2)
EqualTo(args.head, args.last)
case GREATER_THAN =>
assert(args.size == 2)
GreaterThan(args.head, args.last)
case GREATER_THAN_OR_EQUAL =>
assert(args.size == 2)
GreaterThanOrEqual(args.head, args.last)
case LESS_THAN =>
assert(args.size == 2)
LessThan(args.head, args.last)
case LESS_THAN_OR_EQUAL =>
assert(args.size == 2)
LessThanOrEqual(args.head, args.last)
case NOT_EQUALS =>
assert(args.size == 2)
NotEqualTo(args.head, args.last)
case IN =>
assert(args.size > 1)
In(args.head, args.drop(1))
case IS_NULL =>
assert(args.size == 1)
IsNull(args.head)
case IS_NOT_NULL =>
assert(args.size == 1)
IsNotNull(args.head)
case IS_TRUE =>
assert(args.size == 1)
IsTrue(args.head)
case IS_FALSE =>
assert(args.size == 1)
IsFalse(args.head)
case IS_NOT_TRUE =>
assert(args.size == 1)
IsNotTrue(args.head)
case IS_NOT_FALSE =>
assert(args.size == 1)
IsNotFalse(args.head)
case IF =>
assert(args.size == 3)
If(args.head, args(1), args.last)
case BETWEEN =>
assert(args.size == 3)
Between(args.head, args(1), args.last)
case NOT_BETWEEN =>
assert(args.size == 3)
NotBetween(args.head, args(1), args.last)
case DISTINCT =>
assert(args.size == 1)
DistinctAgg(args.head)
case AVG =>
assert(args.size == 1)
Avg(args.head)
case COUNT =>
assert(args.size == 1)
Count(args.head)
case MAX =>
assert(args.size == 1)
Max(args.head)
case MIN =>
assert(args.size == 1)
Min(args.head)
case SUM =>
assert(args.size == 1)
Sum(args.head)
case SUM0 =>
assert(args.size == 1)
Sum0(args.head)
case STDDEV_POP =>
assert(args.size == 1)
StddevPop(args.head)
case STDDEV_SAMP =>
assert(args.size == 1)
StddevSamp(args.head)
case VAR_POP =>
assert(args.size == 1)
VarPop(args.head)
case VAR_SAMP =>
assert(args.size == 1)
VarSamp(args.head)
case COLLECT =>
assert(args.size == 1)
Collect(args.head)
case CHAR_LENGTH =>
assert(args.size == 1)
CharLength(args.head)
case INIT_CAP =>
assert(args.size == 1)
InitCap(args.head)
case LIKE =>
assert(args.size == 2)
Like(args.head, args.last)
case LOWER =>
assert(args.size == 1)
Lower(args.head)
case LOWERCASE =>
assert(args.size == 1)
Lower(args.head)
case SIMILAR =>
assert(args.size == 2)
Similar(args.head, args.last)
case SUBSTRING =>
assert(args.size == 2 || args.size == 3)
if (args.size == 2) {
new Substring(args.head, args.last)
} else {
Substring(args.head, args(1), args.last)
}
case REPLACE =>
assert(args.size == 3)
Replace(args.head, args(1), args.last)
case TRIM =>
assert(args.size == 4)
val removeLeading = getValue[Boolean](args.head)
val removeTrailing = getValue[Boolean](args(1))
val trimMode = if (removeLeading && removeTrailing) {
PlannerTrimMode.BOTH
} else if (removeLeading) {
PlannerTrimMode.LEADING
} else if (removeTrailing) {
PlannerTrimMode.TRAILING
} else {
throw new TableException("Unsupported trim mode.")
}
Trim(trimMode, args(2), args(3))
case UPPER =>
assert(args.size == 1)
Upper(args.head)
case UPPERCASE =>
assert(args.size == 1)
Upper(args.head)
case POSITION =>
assert(args.size == 2)
Position(args.head, args.last)
case OVERLAY =>
assert(args.size == 3 || args.size == 4)
if (args.size == 3) {
new Overlay(args.head, args(1), args.last)
} else {
Overlay(
args.head,
args(1),
args(2),
args.last)
}
case CONCAT =>
Concat(args)
case CONCAT_WS =>
assert(args.nonEmpty)
ConcatWs(args.head, args.tail)
case LPAD =>
assert(args.size == 3)
Lpad(args.head, args(1), args.last)
case RPAD =>
assert(args.size == 3)
Rpad(args.head, args(1), args.last)
case REGEXP_EXTRACT =>
assert(args.size == 2 || args.size == 3)
if (args.size == 2) {
RegexpExtract(args.head, args.last)
} else {
RegexpExtract(args.head, args(1), args.last)
}
case FROM_BASE64 =>
assert(args.size == 1)
FromBase64(args.head)
case TO_BASE64 =>
assert(args.size == 1)
ToBase64(args.head)
case BuiltInFunctionDefinitions.UUID =>
assert(args.isEmpty)
PlannerUUID()
case LTRIM =>
assert(args.size == 1)
LTrim(args.head)
case RTRIM =>
assert(args.size == 1)
RTrim(args.head)
case REPEAT =>
assert(args.size == 2)
Repeat(args.head, args.last)
case REGEXP_REPLACE =>
assert(args.size == 3)
RegexpReplace(args.head, args(1), args.last)
case PLUS =>
assert(args.size == 2)
Plus(args.head, args.last)
case MINUS =>
assert(args.size == 2)
Minus(args.head, args.last)
case DIVIDE =>
assert(args.size == 2)
Div(args.head, args.last)
case TIMES =>
assert(args.size == 2)
Mul(args.head, args.last)
case ABS =>
assert(args.size == 1)
Abs(args.head)
case CEIL =>
assert(args.size == 1 || args.size == 2)
if (args.size == 1) {
Ceil(args.head)
} else {
TemporalCeil(args.last, args.head)
}
case EXP =>
assert(args.size == 1)
Exp(args.head)
case FLOOR =>
assert(args.size == 1 || args.size == 2)
if (args.size == 1) {
Floor(args.head)
} else {
TemporalFloor(args.last, args.head)
}
case LOG10 =>
assert(args.size == 1)
Log10(args.head)
case LOG2 =>
assert(args.size == 1)
Log2(args.head)
case LN =>
assert(args.size == 1)
Ln(args.head)
case BuiltInFunctionDefinitions.LOG =>
assert(args.size == 1 || args.size == 2)
if (args.size == 1) {
Log(args.head)
} else {
Log(args.head, args.last)
}
case POWER =>
assert(args.size == 2)
Power(args.head, args.last)
case MOD =>
assert(args.size == 2)
Mod(args.head, args.last)
case SQRT =>
assert(args.size == 1)
Sqrt(args.head)
case MINUS_PREFIX =>
assert(args.size == 1)
UnaryMinus(args.head)
case SIN =>
assert(args.size == 1)
Sin(args.head)
case COS =>
assert(args.size == 1)
Cos(args.head)
case SINH =>
assert(args.size == 1)
Sinh(args.head)
case TAN =>
assert(args.size == 1)
Tan(args.head)
case TANH =>
assert(args.size == 1)
Tanh(args.head)
case COT =>
assert(args.size == 1)
Cot(args.head)
case ASIN =>
assert(args.size == 1)
Asin(args.head)
case ACOS =>
assert(args.size == 1)
Acos(args.head)
case ATAN =>
assert(args.size == 1)
Atan(args.head)
case ATAN2 =>
assert(args.size == 2)
Atan2(args.head, args.last)
case COSH =>
assert(args.size == 1)
Cosh(args.head)
case DEGREES =>
assert(args.size == 1)
Degrees(args.head)
case RADIANS =>
assert(args.size == 1)
Radians(args.head)
case SIGN =>
assert(args.size == 1)
Sign(args.head)
case ROUND =>
assert(args.size == 2)
Round(args.head, args.last)
case PI =>
assert(args.isEmpty)
Pi()
case BuiltInFunctionDefinitions.E =>
assert(args.isEmpty)
PlannerE()
case RAND =>
assert(args.isEmpty || args.size == 1)
if (args.isEmpty) {
new Rand()
} else {
Rand(args.head)
}
case RAND_INTEGER =>
assert(args.size == 1 || args.size == 2)
if (args.size == 1) {
new RandInteger(args.head)
} else {
RandInteger(args.head, args.last)
}
case BIN =>
assert(args.size == 1)
Bin(args.head)
case HEX =>
assert(args.size == 1)
Hex(args.head)
case TRUNCATE =>
assert(args.size == 1 || args.size == 2)
if (args.size == 1) {
new Truncate(args.head)
} else {
Truncate(args.head, args.last)
}
case EXTRACT =>
assert(args.size == 2)
Extract(args.head, args.last)
case CURRENT_DATE =>
assert(args.isEmpty)
CurrentDate()
case CURRENT_TIME =>
assert(args.isEmpty)
CurrentTime()
case CURRENT_TIMESTAMP =>
assert(args.isEmpty)
CurrentTimestamp()
case LOCAL_TIME =>
assert(args.isEmpty)
LocalTime()
case LOCAL_TIMESTAMP =>
assert(args.isEmpty)
LocalTimestamp()
case TEMPORAL_OVERLAPS =>
assert(args.size == 4)
TemporalOverlaps(
args.head,
args(1),
args(2),
args.last)
case DATE_FORMAT =>
assert(args.size == 2)
DateFormat(args.head, args.last)
case TIMESTAMP_DIFF =>
assert(args.size == 3)
TimestampDiff(args.head, args(1), args.last)
case AT =>
assert(args.size == 2)
ItemAt(args.head, args.last)
case CARDINALITY =>
assert(args.size == 1)
Cardinality(args.head)
case ARRAY =>
ArrayConstructor(args)
case ARRAY_ELEMENT =>
assert(args.size == 1)
ArrayElement(args.head)
case MAP =>
MapConstructor(args)
case ROW =>
RowConstructor(args)
case ORDER_ASC =>
assert(args.size == 1)
Asc(args.head)
case ORDER_DESC =>
assert(args.size == 1)
Desc(args.head)
case MD5 =>
assert(args.size == 1)
Md5(args.head)
case SHA1 =>
assert(args.size == 1)
Sha1(args.head)
case SHA224 =>
assert(args.size == 1)
Sha224(args.head)
case SHA256 =>
assert(args.size == 1)
Sha256(args.head)
case SHA384 =>
assert(args.size == 1)
Sha384(args.head)
case SHA512 =>
assert(args.size == 1)
Sha512(args.head)
case SHA2 =>
assert(args.size == 2)
Sha2(args.head, args.last)
case OVER =>
assert(args.size >= 4)
OverCall(
args.head,
args.slice(4, args.size),
args(1),
args(2),
args(3)
)
case UNBOUNDED_RANGE =>
assert(args.isEmpty)
UnboundedRange()
case UNBOUNDED_ROW =>
assert(args.isEmpty)
UnboundedRow()
case CURRENT_RANGE =>
assert(args.isEmpty)
CurrentRange()
case CURRENT_ROW =>
assert(args.isEmpty)
CurrentRow()
case STREAM_RECORD_TIMESTAMP =>
assert(args.isEmpty)
StreamRecordTimestamp()
case _ =>
throw new TableException(s"Unsupported function definition: $fd")
}
}
}
override def visit(literal: ValueLiteralExpression): PlannerExpression = {
if (hasRoot(literal.getOutputDataType.getLogicalType, SYMBOL)) {
val plannerSymbol = getSymbol(literal.getValueAs(classOf[TableSymbol]).get())
return SymbolPlannerExpression(plannerSymbol)
}
val typeInfo = fromDataTypeToLegacyInfo(literal.getOutputDataType)
if (literal.isNull) {
Null(typeInfo)
} else {
typeInfo match {
case LocalTimeTypeInfo.LOCAL_DATE =>
Literal(
java.sql.Date.valueOf(literal.getValueAs(classOf[LocalDate]).get()),
SqlTimeTypeInfo.DATE)
case LocalTimeTypeInfo.LOCAL_DATE_TIME =>
Literal(
java.sql.Timestamp.valueOf(literal.getValueAs(classOf[LocalDateTime]).get()),
SqlTimeTypeInfo.TIMESTAMP)
case LocalTimeTypeInfo.LOCAL_TIME =>
Literal(
java.sql.Time.valueOf(literal.getValueAs(classOf[java.time.LocalTime]).get()),
SqlTimeTypeInfo.TIME)
case _ =>
Literal(
literal.getValueAs(typeInfo.getTypeClass).get(),
typeInfo)
}
}
}
private def getSymbol(symbol: TableSymbol): PlannerSymbol = symbol match {
case TimeIntervalUnit.YEAR => PlannerTimeIntervalUnit.YEAR
case TimeIntervalUnit.YEAR_TO_MONTH => PlannerTimeIntervalUnit.YEAR_TO_MONTH
case TimeIntervalUnit.QUARTER => PlannerTimeIntervalUnit.QUARTER
case TimeIntervalUnit.MONTH => PlannerTimeIntervalUnit.MONTH
case TimeIntervalUnit.WEEK => PlannerTimeIntervalUnit.WEEK
case TimeIntervalUnit.DAY => PlannerTimeIntervalUnit.DAY
case TimeIntervalUnit.DAY_TO_HOUR => PlannerTimeIntervalUnit.DAY_TO_HOUR
case TimeIntervalUnit.DAY_TO_MINUTE => PlannerTimeIntervalUnit.DAY_TO_MINUTE
case TimeIntervalUnit.DAY_TO_SECOND => PlannerTimeIntervalUnit.DAY_TO_SECOND
case TimeIntervalUnit.HOUR => PlannerTimeIntervalUnit.HOUR
case TimeIntervalUnit.SECOND => PlannerTimeIntervalUnit.SECOND
case TimeIntervalUnit.HOUR_TO_MINUTE => PlannerTimeIntervalUnit.HOUR_TO_MINUTE
case TimeIntervalUnit.HOUR_TO_SECOND => PlannerTimeIntervalUnit.HOUR_TO_SECOND
case TimeIntervalUnit.MINUTE => PlannerTimeIntervalUnit.MINUTE
case TimeIntervalUnit.MINUTE_TO_SECOND => PlannerTimeIntervalUnit.MINUTE_TO_SECOND
case TimePointUnit.YEAR => PlannerTimePointUnit.YEAR
case TimePointUnit.MONTH => PlannerTimePointUnit.MONTH
case TimePointUnit.DAY => PlannerTimePointUnit.DAY
case TimePointUnit.HOUR => PlannerTimePointUnit.HOUR
case TimePointUnit.MINUTE => PlannerTimePointUnit.MINUTE
case TimePointUnit.SECOND => PlannerTimePointUnit.SECOND
case TimePointUnit.QUARTER => PlannerTimePointUnit.QUARTER
case TimePointUnit.WEEK => PlannerTimePointUnit.WEEK
case TimePointUnit.MILLISECOND => PlannerTimePointUnit.MILLISECOND
case TimePointUnit.MICROSECOND => PlannerTimePointUnit.MICROSECOND
case _ =>
throw new TableException("Unsupported symbol: " + symbol)
}
override def visit(fieldReference: FieldReferenceExpression): PlannerExpression = {
PlannerResolvedFieldReference(
fieldReference.getName,
fromDataTypeToLegacyInfo(fieldReference.getOutputDataType))
}
override def visit(fieldReference: UnresolvedReferenceExpression)
: PlannerExpression = {
UnresolvedFieldReference(fieldReference.getName)
}
override def visit(typeLiteral: TypeLiteralExpression): PlannerExpression = {
throw new TableException("Unsupported type literal expression: " + typeLiteral)
}
override def visit(tableRef: TableReferenceExpression): PlannerExpression = {
TableReference(
tableRef.asInstanceOf[TableReferenceExpression].getName,
tableRef.asInstanceOf[TableReferenceExpression].getQueryOperation
)
}
override def visit(localReference: LocalReferenceExpression): PlannerExpression =
throw new TableException(
"Local reference should be handled individually by a call: " + localReference)
override def visit(lookupCall: LookupCallExpression): PlannerExpression =
throw new TableException("Unsupported function call: " + lookupCall)
override def visit(sqlCall: SqlCallExpression): PlannerExpression =
throw new TableException("Unsupported function call: " + sqlCall)
override def visitNonApiExpression(other: Expression): PlannerExpression = {
other match {
// already converted planner expressions will pass this visitor without modification
case plannerExpression: PlannerExpression => plannerExpression
case _ =>
throw new TableException("Unrecognized expression: " + other)
}
}
private def getValue[T](literal: PlannerExpression): T = {
literal.asInstanceOf[Literal].value.asInstanceOf[T]
}
private def assert(condition: Boolean): Unit = {
if (!condition) {
throw new ValidationException("Invalid number of arguments for function.")
}
}
private def translateWindowReference(reference: Expression): PlannerExpression = reference match {
case expr : LocalReferenceExpression =>
WindowReference(expr.getName, Some(fromDataTypeToLegacyInfo(expr.getOutputDataType)))
//just because how the datastream is converted to table
case expr: UnresolvedReferenceExpression =>
UnresolvedFieldReference(expr.getName)
case _ =>
throw new ValidationException(s"Expected LocalReferenceExpression. Got: $reference")
}
}
object PlannerExpressionConverter {
val INSTANCE: PlannerExpressionConverter = new PlannerExpressionConverter
}
| rmetzger/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/expressions/PlannerExpressionConverter.scala | Scala | apache-2.0 | 26,544 |
package cpup.mc.tweak.content.tools
import cpup.mc.lib.util.serialization.SingletonSerialization
import cpup.mc.tweak.CPupTweak
object GenericParts {
def mod = CPupTweak
final val binding = Part.Shape("binding")
final val handle = Part.Shape("handle")
}
| CoderPuppy/cpup-tweak-mc | src/main/scala/cpup/mc/tweak/content/tools/GenericParts.scala | Scala | mit | 260 |
package net.fwbrasil.bond
class ObjectsSpec extends Spec {
new ValidatorTest(IsNull) {
def valids = List(null)
def invalids = List(1, "a", new Object)
}
new ValidatorTest(IsNotNull) {
def valids = List(1, "a", new Object)
def invalids = List(null)
}
}
| fwbrasil/bond | src/test/scala/net/fwbrasil/bond/ObjectsSpec.scala | Scala | lgpl-2.1 | 279 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <guillaume.dubuisson_duplessis@insa-rouen.fr>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <guillaume.dubuisson_duplessis@insa-rouen.fr> - initial API and implementation
******************************************************************************/
package binaryTree.P63
import util.ExerciseTemplate
import binaryTree.Tree
import binaryTree.Node
import binaryTree.End
trait P63 extends ExerciseTemplate {
/*
P63 (**) Construct a complete binary tree.
A complete binary tree with height H is defined as follows: The levels 1,2,3,...,H-1 contain the maximum number of nodes
(i.e 2(i-1) at the level i, note that we start counting the levels from 1 at the root).
In level H, which may contain less than the maximum possible number of nodes, all the nodes are "left-adjusted".
This means that in a levelorder tree traversal all internal nodes come first, the leaves come second,
and empty successors (the Ends which are not really nodes!) come last.
Particularly, complete binary trees are used as data structures (or addressing schemes) for heaps.
We can assign an address number to each node in a complete binary tree by enumerating the nodes in levelorder, starting at
the root with number 1.
In doing so, we realize that for every node X with address A the following property holds:
The address of X's left and right successors are 2*A and 2*A+1, respectively, supposed the successors do exist.
This fact can be used to elegantly construct a complete binary tree structure.
Write a method completeBinaryTree that takes as parameters the number of nodes and the value to put in each node.
scala> Tree.completeBinaryTree(6, "x")
res0: Node[String] = T(x T(x T(x . .) T(x . .)) T(x T(x . .) .))
*/
val name = "Construct a complete binary tree (P63)"
def completeBinaryTree[T](n: Int, elt: T): Tree[T]
test("Invoking completeBinaryTree with n < 0 should throw an IllegalArgumentException") {
intercept[IllegalArgumentException] {
completeBinaryTree(-1, 'a)
}
}
test("Invoking completeBinaryTree should constructor a complete binary tree with n nodes") {
// 0 node
assert(completeBinaryTree(0, 'a) == End)
// 1 nodes
assert(completeBinaryTree(1, 'a) == Node('a))
// 2 nodes
// 'a
// /
// 'a
assert(completeBinaryTree(2, 'a) == Node('a, Node('a), End))
// 3 nodes
// 'a
// / \\
// 'a 'a
assert(completeBinaryTree(3, 'a) == Node('a, Node('a), Node('a)))
// 4 nodes
// 'a
// / \\
// 'a 'a
// /
// 'a
assert(completeBinaryTree(4, 'a) == Node('a, Node('a, Node('a), End), Node('a, End, End)))
// 5 nodes
// 'a
// / \\
// 'a 'a
// / \\
// 'a 'a
assert(completeBinaryTree(5, 'a) == Node('a, Node('a, Node('a), Node('a)), Node('a, End, End)))
// 6 nodes
// 'a
// / \\
// 'a 'a
// / \\ /
// 'a 'a 'a
assert(completeBinaryTree(6, 'a) == Node('a, Node('a, Node('a), Node('a)), Node('a, Node('a), End)))
// 7 nodes
// 'a
// / \\
// 'a 'a
// / \\ / \\
// 'a 'a 'a 'a
assert(completeBinaryTree(7, 'a) == Node('a, Node('a, Node('a), Node('a)), Node('a, Node('a), Node('a))))
// 8 nodes
// 'a
// / \\
// 'a 'a
// / \\ / \\
// 'a 'a 'a 'a
// /
// 'a
assert(completeBinaryTree(8, 'a) == Node('a, Node('a, Node('a, Node('a), End), Node('a)), Node('a, Node('a), Node('a))))
}
}
| GuillaumeDD/scala99problems | src/main/scala/binaryTree/P63/P63.scala | Scala | gpl-3.0 | 4,106 |
class Foo private() {
inline def foo = new Foo // error
}
| som-snytt/dotty | tests/neg/i2564b.scala | Scala | apache-2.0 | 60 |
#!/bin/sh
exec scala -nocompdaemon -savecompiled $0 $@
!#
import scala.io.Source.{fromFile, fromInputStream}
import scala.util.Sorting
/**
* Call with <column> <list-of-files> ('-' for stdin)
*
* Selects a single line for each set of aligned lines from a set of tsv files based on the smallest
* value in the given column of lines in a line set
*
* Files must be aligned line-wise. A commment ('#') line in any file causes the corresponding lines in other files
* to be ignored. Processing stops as soon as the first input source has been read completely.
*
* @author Stefan Plantikow <stefan.plantikow@googlemail.com>
*
*/
class Merger(iters: Array[Iterator[String]], index: Int) {
def findBest(cands: Array[String]): String = {
Sorting.quickSort[String](cands)({ (l1: String) => new Ordered[String] {
def compare(l2: String): Int = column(l1) - column(l2)
}})
cands(0)
}
def column(l: String): Int = Integer.parseInt((l.split('\\t')(index)).trim)
def isComment(s: String): Boolean = {
val trimmed = s.trim
trimmed.length == 0 || trimmed.startsWith("#")
}
def loop: Unit = doDropWhile match {
case None => return
case Some(result: Array[String]) => { Console.print(findBest(result)); loop }
}
def doDropWhile: Option[Array[String]] = {
if (iters.elements.forall { iter => iter.hasNext }) {
val elements = iters.map { iter => iter.next }
if (elements.exists(isComment))
doDropWhile
else
Some(elements)
}
else
None
}
}
if (args.length <= 2) {
Console.println("Call with <column> <list-of-files> ('-' for stdin)")
Console.println
Console.println("Selects a single line for each set of aligned lines from a set of tsv files based on the smallest " +
"value in the given column of lines in a line set")
Console.println
Console.print("Files must be aligned line-wise. A commment ('#') line in any file causes the corresponding lines "
+ "in other files to be ignored. ")
Console.println("Processing stops as soon as the first input source has been read completely.")
exit(1)
}
else {
val iters = args.subArray(1, args.length).map
{ fname => (if (fname.trim == "-") fromInputStream(System.in) else fromFile(fname)).getLines }
val column = Integer.parseInt(args(0))
new Merger(iters, column).loop
exit(0)
}
| boggle/souffleuse | bin/merge-join-tsv.scala | Scala | mit | 2,398 |
package com.github.mdr.mash.compiler
import com.github.mdr.mash.parser.AbstractSyntax._
object AddHolesToHeadlessMembers {
def addHoles(program: Program): Program = program.transform {
case HeadlessMemberExpr(member, isSafe, sourceInfoOpt) ⇒ MemberExpr(Hole(1, None), member, isSafe, sourceInfoOpt)
}.asInstanceOf[Program]
} | mdr/mash | src/main/scala/com/github/mdr/mash/compiler/AddHolesToHeadlessMembers.scala | Scala | mit | 338 |
package org.bitcoins.core.serializers.blockchain
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.number.{UInt32, UInt64}
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.protocol.blockchain.MerkleBlock
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.core.util.BitcoinSUtil
import scodec.bits.{BitVector, ByteVector}
import scala.annotation.tailrec
/**
* Created by chris on 8/15/16.
* [[https://bitcoin.org/en/developer-reference#merkleblock]]
*/
sealed abstract class RawMerkleBlockSerializer
extends RawBitcoinSerializer[MerkleBlock] {
def read(bytes: ByteVector): MerkleBlock = {
val (headerBytes, afterHeader) = bytes.splitAt(80)
val blockHeader = RawBlockHeaderSerializer.read(headerBytes)
val (txCountBytes, afterTxCount) = afterHeader.splitAt(4)
val transactionCount = UInt32.fromBytes(txCountBytes.reverse)
val hashCount = CompactSizeUInt.parseCompactSizeUInt(afterTxCount)
val (_, afterHashCountBytes) = afterTxCount.splitAt(hashCount.bytes.length)
val (hashes, bytesAfterTxHashParsing) =
parseTransactionHashes(afterHashCountBytes, hashCount)
val flagCount =
CompactSizeUInt.parseCompactSizeUInt(bytesAfterTxHashParsing)
val (_, afterFlagCountBytes) =
bytesAfterTxHashParsing.splitAt(flagCount.bytes.length)
val flags = afterFlagCountBytes.take(flagCount.toInt)
val matches = flags.toArray
.map(BitVector(_).reverse)
.foldLeft(BitVector.empty)(_ ++ _)
MerkleBlock(blockHeader, transactionCount, hashes, matches)
}
def write(merkleBlock: MerkleBlock): ByteVector = {
val partialMerkleTree = merkleBlock.partialMerkleTree
val bitVectors = partialMerkleTree.bits
val byteVectors: ByteVector = {
bitVectors.toByteArray
.map(BitVector(_).reverse)
.foldLeft(ByteVector.empty)(_ ++ _.bytes)
}
val flagCount = CompactSizeUInt(
UInt64(Math.ceil(partialMerkleTree.bits.size.toDouble / 8).toInt))
val hashes: ByteVector = BitcoinSUtil.toByteVector(merkleBlock.hashes)
merkleBlock.blockHeader.bytes ++
merkleBlock.transactionCount.bytes.reverse ++
CompactSizeUInt(UInt64(merkleBlock.hashes.size)).bytes ++
hashes ++ flagCount.bytes ++ byteVectors
}
/**
* Parses a sequence of transactions hashes from inside of a merkle block message
* @param bytes the bytes from which the tx hashes are parsed from
* @param hashCount the amount of tx hashes we need to parse from bytes
* @return the sequence of tx hashes and the remaining bytes to be parsed into a MerkleBlockMessage
*/
private def parseTransactionHashes(
bytes: ByteVector,
hashCount: CompactSizeUInt): (Seq[DoubleSha256Digest], ByteVector) = {
@tailrec
def loop(
remainingHashes: Long,
remainingBytes: ByteVector,
accum: List[DoubleSha256Digest]): (Seq[DoubleSha256Digest], ByteVector) = {
if (remainingHashes <= 0) (accum.reverse, remainingBytes)
else {
val (hashBytes, newRemainingBytes) = remainingBytes.splitAt(32)
loop(remainingHashes - 1,
newRemainingBytes,
DoubleSha256Digest(hashBytes) :: accum)
}
}
loop(hashCount.num.toInt, bytes, Nil)
}
}
object RawMerkleBlockSerializer extends RawMerkleBlockSerializer
| bitcoin-s/bitcoin-s-core | core/src/main/scala/org/bitcoins/core/serializers/blockchain/RawMerkleBlockSerializer.scala | Scala | mit | 3,376 |
package com.nyavro.manythanks.ws.microservice
import javax.inject.Inject
import com.google.inject.{AbstractModule, Guice}
import org.scalatest.{Matchers, WordSpec}
class MicroserviceTest extends WordSpec with Matchers {
"Guice" should {
"inject dependencies" in {
val injector = Guice.createInjector(
new AbstractModule() {
protected def configure() = {
bind(classOf[ServiceAPI]).to(classOf[ServiceImpl])
}
}
)
injector.getInstance(classOf[MockService]).get() should === ("service-impl")
}
"dependencies inject" in {
val injector = Guice.createInjector(
new AbstractModule() {
protected def configure() = {
bind(classOf[ServiceAPI]).to(classOf[ImplService])
}
}
)
injector.getInstance(classOf[MockService]).get() should === ("impl-service")
}
"bind string" in {
val injector = Guice.createInjector(
new AbstractModule() {
protected def configure() = {
bind(classOf[String]).toInstance("str-value")
}
}
)
injector.getInstance(classOf[String]) should === ("str-value")
}
}
}
private trait ServiceAPI {
def name():String
}
private class ServiceImpl extends ServiceAPI {
override def name() = "service-impl"
}
private class ImplService extends ServiceAPI {
override def name() = "impl-service"
}
private class MockService @Inject() (val service:ServiceAPI) {
def get() = service.name()
}
| nyavro/manythanks | webService/src/test/scala/com/nyavro/manythanks/ws/microservice/MicroserviceTest.scala | Scala | apache-2.0 | 1,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import breeze.linalg.{DenseVector => BDV}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.mllib.linalg.{Vectors, Vector}
/**
* Class used to solve an optimization problem using Gradient Descent.
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
*/
class GradientDescentWithLargeMemory(var gradient: Gradient, var updater: Updater) extends GradientDescent(gradient, updater) with Logging
{
private var numLocalIterations: Int = 1
private var stepSize: Double = 1.0
private var numIterations: Int = 100
private var regParam: Double = 0.0
private var miniBatchFraction: Double = 1.0
/**
* Set the initial step size of SGD for the first step. Default 1.0.
* In subsequent steps, the step size will decrease with stepSize/sqrt(t)
*/
override def setStepSize(step: Double): this.type = {
this.stepSize = step
this
}
/**
* Set fraction of data to be used for each SGD iteration.
* Default 1.0 (corresponding to deterministic/classical gradient descent)
*/
override def setMiniBatchFraction(fraction: Double): this.type = {
this.miniBatchFraction = fraction
this
}
/**
* Set the number of iterations for SGD. Default 100.
*/
override def setNumIterations(iters: Int): this.type = {
this.numIterations = iters
this
}
/**
* Set the regularization parameter. Default 0.0.
*/
override def setRegParam(regParam: Double): this.type = {
this.regParam = regParam
this
}
/**
* Set the gradient function (of the loss function of one single data example)
* to be used for SGD.
*/
override def setGradient(gradient: Gradient): this.type = {
this.gradient = gradient
this
}
/**
* Set the updater function to actually perform a gradient step in a given direction.
* The updater is responsible to perform the update from the regularization term as well,
* and therefore determines what kind or regularization is used, if any.
*/
override def setUpdater(updater: Updater): this.type = {
this.updater = updater
this
}
/**
* Set the number of local iterations. Default 1.
*/
def setNumLocalIterations(numLocalIter: Int): this.type = {
this.numLocalIterations = numLocalIter
this
}
override def optimize(data: RDD[(Double, Vector)], initialWeights: Vector): Vector = {
val (weights, _) = GradientDescentWithLargeMemory.runMiniBatchSGD(
data,
gradient,
updater,
stepSize,
numIterations,
numLocalIterations,
regParam,
miniBatchFraction,
initialWeights)
weights
}
}
// Top-level method to run gradient descent.
object GradientDescentWithLargeMemory extends Logging {
/**
* Run BSP+ gradient descent in parallel using mini batches.
*
* @param data - Input data for SGD. RDD of form (label, [feature values]).
* @param gradient - Gradient object that will be used to compute the gradient.
* @param updater - Updater object that will be used to update the model.
* @param stepSize - stepSize to be used during update.
* @param numIterations - number of outer iterations that SGD should be run.
* @param numLocalIterations - number of inner iterations that SGD should be run.
* @param regParam - regularization parameter
* @param miniBatchFraction - fraction of the input data set that should be used for
* one iteration of SGD. Default value 1.0.
*
* @return A tuple containing two elements. The first element is a column matrix containing
* weights for every feature, and the second element is an array containing the stochastic
* loss computed for every iteration.
*/
def runMiniBatchSGD(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
stepSize: Double,
numIterations: Int,
numLocalIterations: Int,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector) : (Vector, Array[Double]) = {
val stochasticLossHistory = new ArrayBuffer[Double](numIterations)
val numExamples: Long = data.count()
val numPartition = data.partitions.length
val miniBatchSize = numExamples * miniBatchFraction / numPartition
// Initialize weights as a column vector
var weights = Vectors.dense(initialWeights.toArray)
/**
* For the first iteration, the regVal will be initialized as sum of sqrt of
* weights if it's L2 update; for L1 update; the same logic is followed.
*/
var regVal = updater.compute(
weights, Vectors.dense(new Array[Double](weights.size)), 0, 1, regParam)._2
for (i <- 1 to numIterations) {
val weightsAndLosses = data.mapPartitions { iter =>
var iterReserved = iter
val localLossHistory = new ArrayBuffer[Double](numLocalIterations)
for (j <- 1 to numLocalIterations) {
val (iterCurrent, iterNext) = iterReserved.duplicate
val rand = new Random(42 + i * numIterations + j)
val sampled = iterCurrent.filter(x => rand.nextDouble() <= miniBatchFraction)
val (gradientSum, lossSum) = sampled.aggregate((BDV.zeros[Double](weights.size), 0.0))(
seqop = (c, v) => (c, v) match { case ((grad, loss), (label, features)) =>
val l = gradient.compute(features, label, weights, Vectors.fromBreeze(grad))
(grad, loss + l)
},
combop = (c1, c2) => (c1, c2) match { case ((grad1, loss1), (grad2, loss2)) =>
(grad1 += grad2, loss1 + loss2)
})
localLossHistory += lossSum / miniBatchSize + regVal
val update = updater.compute(weights, Vectors.fromBreeze(gradientSum :/ miniBatchSize),
stepSize, (i - 1) + numIterations + j, regParam)
weights = update._1
regVal = update._2
iterReserved = iterNext
}
List((weights.toBreeze, localLossHistory.toArray)).iterator
}
val c = weightsAndLosses.collect()
val (ws, ls) = c.unzip
stochasticLossHistory.append(ls.head.reduce(_ + _) / ls.head.size)
val weightsSum = ws.reduce(_ += _)
weights = Vectors.fromBreeze(weightsSum :/ c.size.toDouble)
}
logInfo("GradientDescentWithLocalUpdate finished. Last 10 stochastic losses %s".format(
stochasticLossHistory.mkString(", ")))
(weights, stochasticLossHistory.toArray)
}
}
| jz3707/gradient_descent_variants | src/main/scala/org.apache.spark.mllib.optimization/GradientDescentWithLargeMemory.scala | Scala | apache-2.0 | 7,431 |
package fabricator
import org.testng.annotations.{DataProvider, Test}
import scala.collection.mutable
class WordsTestSuite extends BaseTestSuite {
@DataProvider(name = "languageDp")
def languageDp(): Array[Array[Any]] = {
Array(Array("nl"),
Array("de")
)
}
@Test(dataProvider = "languageDp")
def testCustomConstructor(lang: String) {
val customWords = Option(fabricator.Words(lang))
assert(customWords.isDefined)
}
@DataProvider(name = "wordsCountDP")
def wordsCountDP():Array[Array[String]]= {
Array(Array("10"),
Array("100"),
Array("1000"),
Array("4000"),
Array("9500")
)
}
@Test
def testDefaultWords() {
val wordsDefaultArray: Array[String] = words.words()
if (debugEnabled) logger.debug("Getting words array generated with default length ")
assert(wordsDefaultArray.length == 10)
val inputSet: mutable.Set[String] = scala.collection.mutable.Set()
wordsDefaultArray.indices.foreach{
index => inputSet.add(wordsDefaultArray(index))
}
assertResult(10)(inputSet.size)
}
@Test(expectedExceptions = Array(classOf[Exception]))
def testWordsMaximumAmountException(): Unit = {
words.words(100001)
}
@Test(dataProvider = "wordsCountDP")
def testWords(count: String) = {
if (debugEnabled) logger.debug("Getting words array generated with length = " + count)
assertResult(count.toInt)(words.words(count.toInt).length)
}
@Test
def testSentenceDefault() = {
val sentence = words.sentence
if (debugEnabled) logger.debug("Testing sentence generation. Creating sentence with 10 words length \\n" + sentence)
assertResult(sentence.split(" ").length)(10)
}
@Test
def testSentenceCustomLength() = {
val sentence = words.sentence(20)
if (debugEnabled) logger.debug("Testing sentence generation. Creating sentence with 10 words length: \\n" + sentence)
assertResult(sentence.split(" ").length)(20)
}
@Test
def testTextDefaultValue() = {
val paragraph = words.paragraph
if (debugEnabled) logger.debug("Testing sentence generation. Creating text with 10 words length: \\n" + paragraph)
assertResult(paragraph.length)(100)
}
@Test(dataProvider = "wordsCountDP")
def testTextCustomValue(length: String) = {
val paragraph = words.paragraph(length.toInt)
if (debugEnabled) logger.debug("Testing sentence generation. Creating paragraph with chars length: " + length.toInt + "\\n" + paragraph)
assertResult(paragraph.length)(length.toInt)
}
}
| edombowsky/fabricator | src/test/scala/fabricator/WordsTestSuite.scala | Scala | apache-2.0 | 2,541 |
package class_instance_extractor
import java.io.BufferedWriter
import java.io.FileOutputStream
import java.io.OutputStreamWriter
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.mutable.Map
import com.hp.hpl.jena.rdf.model.ResourceFactory
import com.hp.hpl.jena.util.FileManager
object RoleStatementsExtractor {
def main(args: Array[String]) {
val inputInstances = "ontologies/wikipediaontology_instance_20101114ja.rdf"
val inputOntology = "ontologies/wikipediaontology_class_20101114ja.rdf"
val outputText = "inputs_and_outputs/tests/role_statements.txt"
val writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputText), "UTF-8"))
val clsInstanceCntMap = Map[String, Int]()
val model = FileManager.get().loadModel(inputInstances)
val ontModel = FileManager.get().loadModel(inputOntology)
for (stmt <- model.listStatements().toList()) {
if (stmt.getObject().isResource() && stmt.getObject().asResource().getURI().split("instance/").size == 2) {
val clsName = stmt.getObject().asResource().getURI().split("instance/")(1)
val cls = ResourceFactory.createResource("http://www.yamaguti.comp.ae.keio.ac.jp/wikipedia_ontology/class/" + clsName)
if (0 < ontModel.listStatements(cls, null, null).toList().size()) {
val sub = stmt.getSubject().getURI().split("instance/")(1)
val pre = stmt.getPredicate().getURI().split("property/")(1)
val obj = clsName
writer.write(sub + "\\t" + pre + "\\t" + obj)
writer.newLine()
println(sub + "\\t" + pre + "\\t" + obj)
}
}
}
writer.close()
}
} | t-morita/JWO_Refinement_Tools | src/main/scala/class_instance_extractor/RoleStatementsExtractor.scala | Scala | apache-2.0 | 1,683 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package databases.firebird
import java.net.URI
import akka.testkit.{ TestActorRef, TestFSMRef }
import com.wegtam.scalatest.tags.{ DbTest, DbTestFirebird }
import com.wegtam.tensei.adt._
import com.wegtam.tensei.agent.{ ActorSpec, DummyActor, TenseiAgent }
import org.scalatest.BeforeAndAfterEach
import scala.concurrent.duration._
import scala.math.BigDecimal.RoundingMode
import scala.sys.process.Process
class ForeignKeysTest extends ActorSpec with BeforeAndAfterEach {
val databaseHost = testConfig.getString("firebird.host")
val databasePort = testConfig.getInt("firebird.port")
val databaseName = testConfig.getString("firebird.target-db.name")
val databaseUser = testConfig.getString("firebird.target-db.user")
val databasePass = testConfig.getString("firebird.target-db.pass")
val databasePath = testConfig.getString("firebird.target-db.path")
override protected def beforeEach(): Unit = {
// The database connection.
val sqlScript =
getClass.getResource("/databases/generic/SqlScripts/Firebird/Database_before_each.sql")
Process(s"isql-fb -user $databaseUser -p $databasePass -i ${sqlScript.getPath}")
.run()
.exitValue() should be(0)
Thread.sleep(2000)
createSourceData()
createTargetStructure()
super.beforeEach()
}
/**
* Remove the test database.
*/
override protected def afterEach(): Unit = {
val sqlScript =
getClass.getResource("/databases/generic/SqlScripts/Firebird/Database_after_each.sql")
Process(s"isql-fb -user $databaseUser -p $databasePass -i ${sqlScript.getPath}")
.run()
.exitValue() should be(0)
Thread.sleep(2000)
super.afterEach()
}
private def createSourceData(): Unit = {
val c = java.sql.DriverManager.getConnection(
s"jdbc:firebirdsql://$databaseHost:$databasePort//$databasePath/$databaseName",
databaseUser,
databasePass
)
val s = c.createStatement()
s.execute("""
|CREATE TABLE employees (
| id BIGINT PRIMARY KEY,
| firstname VARCHAR(254),
| lastname VARCHAR(254),
| birthday DATE
|)
""".stripMargin)
s.execute(
"""
|CREATE TABLE salary (
| employee_id BIGINT,
| amount DECIMAL(10,2),
| CONSTRAINT fk_salary_to_employees FOREIGN KEY (employee_id) REFERENCES employees (id)
|)
""".stripMargin
)
s.execute(
"""INSERT INTO employees (id, firstname, lastname, birthday) VALUES(123, 'Albert', 'Einstein', '1879-03-14')"""
)
s.execute("""INSERT INTO salary (employee_id, amount) VALUES(123, 3.14)""")
s.execute(
"""INSERT INTO employees (id, firstname, lastname, birthday) VALUES(456, 'Bernhard', 'Riemann', '1826-09-17')"""
)
s.execute("""INSERT INTO salary (employee_id, amount) VALUES(456, 6.28)""")
s.execute(
"""INSERT INTO employees (id, firstname, lastname, birthday) VALUES(789, 'Johann Carl Friedrich', 'Gauß', '1777-04-30')"""
)
s.execute("""INSERT INTO salary (employee_id, amount) VALUES(789, 12.56)""")
s.execute(
"""INSERT INTO employees (id, firstname, lastname, birthday) VALUES(5, 'Johann Benedict', 'Listing', '1808-07-25')"""
)
s.execute("""INSERT INTO salary (employee_id, amount) VALUES(5, 25.12)""")
s.execute(
"""INSERT INTO employees (id, firstname, lastname, birthday) VALUES(8, 'Gottfried Wilhelm', 'Leibnitz', '1646-07-01')"""
)
s.execute("""INSERT INTO salary (employee_id, amount) VALUES(8, 50.24)""")
s.close()
c.close()
}
private def createTargetStructure(): Unit = {
val c = java.sql.DriverManager.getConnection(
s"jdbc:firebirdsql://$databaseHost:$databasePort//$databasePath/$databaseName",
databaseUser,
databasePass
)
val s = c.createStatement()
s.execute("""
|CREATE TABLE t_employees (
| id BIGINT PRIMARY KEY,
| firstname VARCHAR(254),
| lastname VARCHAR(254),
| birthday DATE
|)
""".stripMargin)
s.execute("""
|CREATE GENERATOR GEN_T_EMPLOYEES_ID;
""".stripMargin)
s.execute("""
|SET GENERATOR GEN_T_EMPLOYEES_ID TO 0;
""".stripMargin)
s.execute("""
|CREATE TRIGGER T_EMPLOYEES_TRIGGER FOR T_EMPLOYEES
|ACTIVE BEFORE INSERT POSITION 0
|AS
|BEGIN
|if (NEW.ID is NULL) then NEW.ID = GEN_ID(GEN_T_EMPLOYEES_ID, 1);
|END
""".stripMargin)
s.close()
c.close()
}
describe("Foreign keys") {
describe("using firebird") {
describe("using one to one mappings") {
describe("with single mappings") {
it("should replace changed auto-increment values", DbTest, DbTestFirebird) {
val connection = java.sql.DriverManager.getConnection(
s"jdbc:firebirdsql://$databaseHost:$databasePort//$databasePath/$databaseName",
databaseUser,
databasePass
)
val sourceDfasdl = new DFASDL(
id = "SRC",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/source-dfasdl.xml")
)
.mkString
)
val targetDfasdl = new DFASDL(
id = "DST",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/target-dfasdl.xml")
)
.mkString
)
val cookbook: Cookbook = Cookbook(
id = "COOKBOOK",
sources = List(sourceDfasdl),
target = Option(targetDfasdl),
recipes = List(
Recipe(
id = "CopyEmployees",
mode = Recipe.MapOneToOne,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id, elementId = "employees_row_id")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id, elementId = "employees_row_id")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_firstname")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_firstname")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_lastname")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_lastname")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_birthday")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_birthday")
)
)
)
),
Recipe(
id = "CopySalaries",
mode = Recipe.MapOneToOne,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "salary_row_employee_id")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "salary_row_employee_id")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "salary_row_amount")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "salary_row_amount")
)
)
)
)
)
)
val source = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = sourceDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val target = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = targetDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val dummy = TestActorRef(DummyActor.props())
val client = system.actorSelection(dummy.path)
val agent = TestFSMRef(new TenseiAgent("TEST-AGENT", client))
val msg = AgentStartTransformationMessage(
sources = List(source),
target = target,
cookbook = cookbook,
uniqueIdentifier = Option("FOREIGN-KEY-TEST-OneToOne")
)
agent ! msg
expectMsgType[GlobalMessages.TransformationStarted](FiniteDuration(5, SECONDS))
expectMsgType[GlobalMessages.TransformationCompleted](FiniteDuration(7, SECONDS))
val s = connection.createStatement()
withClue("Written data should be correct!") {
val expectedData = Map(
"Einstein" -> new java.math.BigDecimal("3.14"),
"Riemann" -> new java.math.BigDecimal("6.28"),
"Gauß" -> new java.math.BigDecimal("12.56"),
"Listing" -> new java.math.BigDecimal("25.12"),
"Leibnitz" -> new java.math.BigDecimal("50.24")
)
val r = s.executeQuery(
"SELECT t_employees.id AS id, t_employees.lastname AS name, t_salary.amount AS amount FROM t_employees JOIN t_salary ON t_employees.id = t_salary.employee_id"
)
if (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
while (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
}
} else
fail("No results found in database!")
}
connection.close()
}
}
describe("with bulk mappings") {
it("should replace changed auto-increment values", DbTest, DbTestFirebird) {
val connection = java.sql.DriverManager.getConnection(
s"jdbc:firebirdsql://$databaseHost:$databasePort//$databasePath/$databaseName",
databaseUser,
databasePass
)
val sourceDfasdl = new DFASDL(
id = "SRC",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/source-dfasdl.xml")
)
.mkString
)
val targetDfasdl = new DFASDL(
id = "DST",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/target-dfasdl.xml")
)
.mkString
)
val cookbook: Cookbook = Cookbook(
id = "COOKBOOK",
sources = List(sourceDfasdl),
target = Option(targetDfasdl),
recipes = List(
Recipe(
id = "CopyEmployees",
mode = Recipe.MapOneToOne,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_id"),
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_firstname"),
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_lastname"),
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_birthday")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_id"),
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_firstname"),
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_lastname"),
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_birthday")
)
)
)
),
Recipe(
id = "CopySalaries",
mode = Recipe.MapOneToOne,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "salary_row_employee_id"),
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "salary_row_amount")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "salary_row_employee_id"),
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "salary_row_amount")
)
)
)
)
)
)
val source = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = sourceDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val target = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = targetDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val dummy = TestActorRef(DummyActor.props())
val client = system.actorSelection(dummy.path)
val agent = TestFSMRef(new TenseiAgent("TEST-AGENT", client))
val msg = AgentStartTransformationMessage(
sources = List(source),
target = target,
cookbook = cookbook,
uniqueIdentifier = Option("FOREIGN-KEY-TEST-OneToOne")
)
agent ! msg
expectMsgType[GlobalMessages.TransformationStarted](FiniteDuration(5, SECONDS))
expectMsgType[GlobalMessages.TransformationCompleted](FiniteDuration(7, SECONDS))
val s = connection.createStatement()
withClue("Written data should be correct!") {
val expectedData = Map(
"Einstein" -> new java.math.BigDecimal("3.14"),
"Riemann" -> new java.math.BigDecimal("6.28"),
"Gauß" -> new java.math.BigDecimal("12.56"),
"Listing" -> new java.math.BigDecimal("25.12"),
"Leibnitz" -> new java.math.BigDecimal("50.24")
)
val r = s.executeQuery(
"SELECT t_employees.id AS id, t_employees.lastname AS name, t_salary.amount AS amount FROM t_employees JOIN t_salary ON t_employees.id = t_salary.employee_id"
)
if (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
while (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
}
} else
fail("No results found in database!")
}
connection.close()
}
}
}
describe("using all to all mappings") {
it("should replace changed auto-increment values", DbTest, DbTestFirebird) {
val connection = java.sql.DriverManager.getConnection(
s"jdbc:firebirdsql://$databaseHost:$databasePort//$databasePath/$databaseName",
databaseUser,
databasePass
)
val sourceDfasdl = new DFASDL(
id = "SRC",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/source-dfasdl.xml")
)
.mkString
)
val targetDfasdl = new DFASDL(
id = "DST",
content = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream("/databases/generic/ForeignKeys/target-dfasdl.xml")
)
.mkString
)
val cookbook: Cookbook = Cookbook(
id = "COOKBOOK",
sources = List(sourceDfasdl),
target = Option(targetDfasdl),
recipes = List(
Recipe(
id = "CopyEmployees",
mode = Recipe.MapAllToAll,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id, elementId = "employees_row_id")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id, elementId = "employees_row_id")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_firstname")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_firstname")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_lastname")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_lastname")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "employees_row_birthday")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "employees_row_birthday")
)
)
)
),
Recipe(
id = "CopySalaries",
mode = Recipe.MapAllToAll,
mappings = List(
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id,
elementId = "salary_row_employee_id")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id,
elementId = "salary_row_employee_id")
)
),
MappingTransformation(
sources = List(
ElementReference(dfasdlId = sourceDfasdl.id, elementId = "salary_row_amount")
),
targets = List(
ElementReference(dfasdlId = targetDfasdl.id, elementId = "salary_row_amount")
)
)
)
)
)
)
val source = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = sourceDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val target = ConnectionInformation(
uri = new URI(connection.getMetaData.getURL),
dfasdlRef =
Option(DFASDLReference(cookbookId = cookbook.id, dfasdlId = targetDfasdl.id)),
username = Option(databaseUser),
password = Option(databasePass)
)
val dummy = TestActorRef(DummyActor.props())
val client = system.actorSelection(dummy.path)
val agent = TestFSMRef(new TenseiAgent("TEST-AGENT", client))
val msg = AgentStartTransformationMessage(
sources = List(source),
target = target,
cookbook = cookbook,
uniqueIdentifier = Option("FOREIGN-KEY-TEST-OneToOne")
)
agent ! msg
expectMsgType[GlobalMessages.TransformationStarted](FiniteDuration(5, SECONDS))
expectMsgType[GlobalMessages.TransformationCompleted](FiniteDuration(7, SECONDS))
val s = connection.createStatement()
withClue("Written data should be correct!") {
val expectedData = Map(
"Einstein" -> new java.math.BigDecimal("3.14"),
"Riemann" -> new java.math.BigDecimal("6.28"),
"Gauß" -> new java.math.BigDecimal("12.56"),
"Listing" -> new java.math.BigDecimal("25.12"),
"Leibnitz" -> new java.math.BigDecimal("50.24")
)
val r = s.executeQuery(
"SELECT t_employees.id AS id, t_employees.lastname AS name, t_salary.amount AS amount FROM t_employees JOIN t_salary ON t_employees.id = t_salary.employee_id"
)
if (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
while (r.next()) {
r.getBigDecimal("amount")
.setScale(2, RoundingMode.DOWN)
.compare(expectedData(r.getString("name"))) should be(0)
}
} else
fail("No results found in database!")
}
connection.close()
}
}
}
}
}
| Tensei-Data/tensei-agent | src/it/scala/databases/firebird/ForeignKeysTest.scala | Scala | agpl-3.0 | 25,251 |
package ulang.syntax
import arse.Fixity
import arse._
sealed trait Expr {
def abs(x: FreeVar, index: Int): Expr
def bind(stack: List[FreeVar]): Expr
def free: Set[FreeVar]
def vars: Set[FreeVar]
def mapFree(f: FreeVar => Expr): Expr
def ===(that: Expr): Boolean = (this == that)
def replace(e1: Expr, e2: Expr): Expr = this match {
case `e1` => e2
case App(fun, arg) => App(fun replace (e1, e2), arg replace (e1, e2))
case Lambda(bound, body) => fatal("in replace: cannot replace in " + this)
case _ => this
}
}
case class Op(name: String, typ: Type) extends Expr with ulang.semantics.Data {
override def toString = name // + ":" + typ
def abs(x: FreeVar, index: Int) = this
def bind(stack: List[FreeVar]) = this
def free = Set.empty[FreeVar]
def vars = Set.empty[FreeVar]
def mapFree(f: FreeVar => Expr) = this
}
case class BoundVar(index: Int) extends Expr {
override def toString = "@" + index
def abs(x: FreeVar, index: Int) = this
def bind(stack: List[FreeVar]) = this
def free = Set.empty[FreeVar]
def vars = Set.empty[FreeVar]
def mapFree(f: FreeVar => Expr) = this
}
case class FreeVar(name: String, typ: Type) extends Expr {
override def toString = "$" + name // + ":" + typ
def abs(x: FreeVar, index: Int) = if (x == this) BoundVar(index) else this
def bind(stack: List[FreeVar]) = {
val index = stack.indexOf(this)
if (index >= 0) BoundVar(index)
else this
}
def free = Set(this)
def vars = Set(this)
def mapFree(f: FreeVar => Expr) = f(this)
}
case class App(fun: Expr, arg: Expr) extends Expr {
override def toString = this match {
case FlatApp(op, args) =>
"(" + op + " " + args.mkString(" ") + ")"
}
def abs(x: FreeVar, index: Int) = App(fun abs (x, index), arg abs (x, index))
def bind(stack: List[FreeVar]) = App(fun bind stack, arg bind stack)
def free = fun.free ++ arg.free
def vars = fun.vars ++ arg.vars
def mapFree(f: FreeVar => Expr) = App(fun mapFree f, arg mapFree f)
override def ===(that: Expr): Boolean = that match {
case that: App =>
this.fun === that.fun && this.arg === that.arg
case _ => false
}
}
case class Lambda(bound: FreeVar, body: Expr) extends Expr {
override def toString = "(λ " + bound + ". " + body + ")"
def abs(x: FreeVar, index: Int) = Lambda(bound, body abs (x, index + 1))
def bind(stack: List[FreeVar]) = Lambda(bound, body bind (bound :: stack))
def free = body.free - bound
def vars = body.free + bound
def mapFree(f: FreeVar => Expr) = Lambda(bound, body mapFree f)
override def ===(that: Expr): Boolean = that match {
case that: Lambda =>
this.body === that.body
case _ => false
}
}
case class Case(args: List[Expr], body: Expr)
case class Match(cases: List[Case]) extends Expr {
override def toString = {
val ss = cases.map { case Case(args, rhs) => args.mkString(" ") + ". " + rhs }
ss.mkString("(λ ", " | ", ")")
}
def abs(x: FreeVar, index: Int) = ???
def bind(stack: List[FreeVar]) = ???
def free = ???
def vars = ???
def mapFree(f: FreeVar => Expr) = ???
}
object Op {
import Type._
def equals = Op("=", alpha → (alpha → bool))
val if_then_else = Op("if_then_else", bool → (alpha → (alpha → alpha)))
}
object App {
def apply(fun: Expr, args: List[Expr]): Expr = {
args.foldLeft(fun)(App(_, _))
}
}
object Lambda {
def apply(bound: List[FreeVar], body: Expr): Expr = {
bound.foldRight(body)(Lambda(_, _))
}
} | gernst/ulang | src/ulang/syntax/Expr.scala | Scala | mit | 3,497 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.sortedk
import org.trustedanalytics.sparktk.frame.internal.rdd.{ FrameOrderingUtils, MultiColumnKeyOrdering, FrameRdd }
/**
* Object with methods for sorting the top-k rows in a frame
*/
object SortedKFunctions extends Serializable {
/**
* Return the top-K rows in a frame ordered by column(s).
*
* @param frameRdd Frame to sort
* @param k Number of sorted records to return
* @param columnNamesAndAscending Column names to sort by, true for ascending, false for descending
* @param reduceTreeDepth Depth of reduce tree (governs number of rounds of reduce tasks)
* @return New frame with top-K rows
*/
def takeOrdered(frameRdd: FrameRdd,
k: Int,
columnNamesAndAscending: List[(String, Boolean)],
reduceTreeDepth: Int): FrameRdd = {
require(k > 0, "k should be greater than zero") //TODO: Should we add an upper bound for K
require(columnNamesAndAscending != null && columnNamesAndAscending.nonEmpty, "one or more columnNames is required")
val columnNames = columnNamesAndAscending.map(_._1)
val ascendingPerColumn = columnNamesAndAscending.map(_._2)
val pairRdd = frameRdd.mapRows(row => (row.values(columnNames.toVector).toList, row.data))
implicit val keyOrdering = new MultiColumnKeyOrdering(ascendingPerColumn)
val topRows = FrameOrderingUtils.takeOrderedTree(pairRdd, k, reduceTreeDepth)
// ascending is always true here because we control in the ordering
val topRowsRdd = frameRdd.sparkContext.parallelize(topRows).map { case (key, row) => row }
new FrameRdd(frameRdd.frameSchema, topRowsRdd)
}
}
| ashaarunkumar/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/sortedk/SortedKFunctions.scala | Scala | apache-2.0 | 2,388 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package frontends.scalac
import scala.tools.nsc.Settings
import scala.tools.nsc.reporters.AbstractReporter
import scala.reflect.internal.util.{Position, NoPosition, FakePos, StringOps}
import utils.{Position => LeonPosition, NoPosition => LeonNoPosition, OffsetPosition => LeonOffsetPosition}
/** This implements a reporter that calls the callback with every line that a
regular ConsoleReporter would display. */
class SimpleReporter(val settings: Settings, reporter: leon.Reporter) extends AbstractReporter {
final val ERROR_LIMIT = 5
private def label(severity: Severity): String = severity match {
case ERROR => "error"
case WARNING => "warning"
case INFO => null
}
private def clabel(severity: Severity): String = {
val label0 = label(severity)
if (label0 eq null) "" else label0 + ": "
}
private def getCountString(severity: Severity): String =
StringOps.countElementsAsString(severity.count, label(severity))
/** Prints the message. */
def printMessage(msg: String, pos: LeonPosition, severity: Severity) {
severity match {
case ERROR =>
reporter.error(pos, msg)
case WARNING =>
reporter.warning(pos, msg)
case INFO =>
reporter.info(pos, msg)
}
}
/** Prints the message with the given position indication. */
def printMessage(posIn: Position, msg: String, severity: Severity) {
val pos = if (posIn eq null) NoPosition
else if (posIn.isDefined) posIn.finalPosition
else posIn
pos match {
case FakePos(fmsg) =>
printMessage(fmsg+" "+msg, LeonNoPosition, severity)
case NoPosition =>
printMessage(msg, LeonNoPosition, severity)
case _ =>
val lpos = LeonOffsetPosition(pos.line, pos.column, pos.point, pos.source.file.file)
printMessage(msg, lpos, severity)
}
}
def print(pos: Position, msg: String, severity: Severity) {
printMessage(pos, clabel(severity) + msg, severity)
}
def display(pos: Position, msg: String, severity: Severity) {
severity.count += 1
if (severity != ERROR || severity.count <= ERROR_LIMIT)
print(pos, msg, severity)
}
def displayPrompt(): Unit = {}
}
| epfl-lara/leon | src/main/scala/leon/frontends/scalac/SimpleReporter.scala | Scala | gpl-3.0 | 2,264 |
package controllers
import anorm.Row
import play.api.libs.json.{JsNumber, JsObject, JsValue}
object Month extends REST {
val tableName: String = "months"
val parameters: Map[String, (String, String) => ValidationResult] = Map(
"number" -> validateInt,
"year_id" -> validateInt
)
protected def single(row: Row): JsValue = {
row match {
case Row(id: Long, number: Long, year_id: Long) => {
JsObject(
"id" -> JsNumber(id) ::
"number" -> JsNumber(number) ::
"year_id" -> JsNumber(year_id) ::
Nil
)
}
case _ => throw new IllegalArgumentException("Row provided is invalid!")
}
}
} | ishakir/PokeStat | app/controllers/Month.scala | Scala | mit | 690 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.http.router
import java.lang.reflect.InvocationTargetException
import wvlet.airframe.codec.{MessageCodec, MessageCodecFactory}
import wvlet.airframe.http.{HttpBackend, HttpContext, HttpRequestAdapter}
import wvlet.log.LogSupport
import scala.concurrent.ExecutionContext
import scala.language.higherKinds
/**
* Create the terminal request handler for processing a method with @EndPoint annotation.
*
* This handler will call a controller method with the request parameters build from the method arguments.
*/
class HttpEndpointExecutionContext[Req: HttpRequestAdapter, Resp, F[_]](
protected val backend: HttpBackend[Req, Resp, F],
routeMatch: RouteMatch,
responseHandler: ResponseHandler[Req, Resp],
controller: Any,
codecFactory: MessageCodecFactory
) extends HttpContext[Req, Resp, F]
with LogSupport {
override def apply(request: Req): F[Resp] = {
val route = routeMatch.route
val result = {
// Call the method in this controller
try {
route.call(controller, request, routeMatch.params, this, codecFactory)
} catch {
case e: InvocationTargetException =>
// Return the exception from the target method
throw e.getTargetException
}
}
route.returnTypeSurface.rawType match {
// When a return type is Future[X]
case cl: Class[_] if backend.isFutureType(cl) =>
// Check the type of X
val futureValueSurface = route.returnTypeSurface.typeArgs(0)
futureValueSurface.rawType match {
// If X is the backend Response type, return as is:
case valueCls if backend.isRawResponseType(valueCls) =>
// Use Finagle Future
result.asInstanceOf[F[Resp]]
case other =>
// If X is other type, convert X into an HttpResponse
backend.mapF(
result.asInstanceOf[F[_]],
{ (x: Any) => responseHandler.toHttpResponse(route, request, futureValueSurface, x) }
)
}
case cl: Class[_] if backend.isScalaFutureType(cl) =>
// Check the type of X
val futureValueSurface = route.returnTypeSurface.typeArgs(0)
// TODO: Is using global execution a right choice?
val ex = ExecutionContext.global
futureValueSurface.rawType match {
// If X is the backend Response type, return as is:
case valueCls if backend.isRawResponseType(valueCls) =>
// Convert Scala Future to Finagle Future
backend.toFuture(result.asInstanceOf[scala.concurrent.Future[Resp]], ex)
case other =>
// If X is other type, convert X into an HttpResponse
val scalaFuture = result
.asInstanceOf[scala.concurrent.Future[_]]
.map { x => responseHandler.toHttpResponse(route, request, futureValueSurface, x) }(ex)
backend.toFuture(scalaFuture, ex)
}
case _ =>
// If the route returns non future value, convert it into Future response
backend.toFuture(responseHandler.toHttpResponse(route, request, route.returnTypeSurface, result))
}
}
}
| wvlet/airframe | airframe-http-router/src/main/scala/wvlet/airframe/http/router/HttpEndpointExecutionContext.scala | Scala | apache-2.0 | 3,726 |
package metaconfig.docs
import metaconfig.ConfEncoder
import scalatags.Text.all._
import metaconfig.generic.Setting
import metaconfig.generic.Settings
object Docs {
def htmlSetting(setting: Setting, defaultValue: Any) = tr(
td(code(setting.name)),
td(code(setting.field.tpe)),
td(setting.description),
td(defaultValue.toString)
)
def html[T](
default: T
)(implicit settings: Settings[T], ev: ConfEncoder[T]): String = {
val fields = settings.flat(ConfEncoder[T].writeObj(default)).map {
case (setting, defaultValue) =>
htmlSetting(setting, defaultValue)
}
table(
thead(
tr(
th("Name"),
th("Type"),
th("Description"),
th("Default value")
)
),
tbody(fields)
).toString()
}
}
| olafurpg/metaconfig | metaconfig-docs/src/main/scala/metaconfig/docs/Docs.scala | Scala | apache-2.0 | 812 |
package com.weather.scalacass.util
object CassandraUnitInfo {
val cassYaml = "cu-cassandra-rndport-with-fix-3.yaml"
}
| thurstonsand/fast-cass | src/test/scala_cass3/com/weather/scalacass/util/CassandraUnitInfo.scala | Scala | mit | 121 |
package com.github.vitalsoftware.scalaredox
import com.github.vitalsoftware.scalaredox.models._
import org.specs2.mutable.Specification
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by apatzer on 3/23/17.
*/
class PatientSearchTest extends Specification with RedoxTest {
"query PatientSearch" should {
"not find anyone" in {
val json =
"""
|{
| "Meta": {
| "DataModel": "PatientSearch",
| "EventType": "Query",
| "EventDateTime": "2017-03-14T19:35:06.047Z",
| "Test": true,
| "Destinations": [
| {
| "ID": "0f4bd1d1-451d-4351-8cfd-b767d1b488d6",
| "Name": "Patient Search Endpoint"
| }
| ]
| }
|}
""".stripMargin
val query = validateJsonInput[PatientSearch](json)
val fut = client.get[PatientSearch, PatientSearch](query)
val resp = Await.result(fut, timeout)
resp.isSuccess must beTrue
resp.asOpt.map { searchResult =>
searchResult.Patient must beNone
}.get
}
"find someone" in {
val json =
"""
|{
| "Meta": {
| "DataModel": "PatientSearch",
| "EventType": "Query",
| "EventDateTime": "2017-03-14T19:35:06.047Z",
| "Test": true,
| "Destinations": [
| {
| "ID": "0f4bd1d1-451d-4351-8cfd-b767d1b488d6",
| "Name": "Patient Search Endpoint"
| }
| ]
| },
| "Patient": {
| "Demographics": {
| "FirstName": "Timothy",
| "MiddleName": "Paul",
| "LastName": "Bixby",
| "DOB": "2008-01-06",
| "Sex": "Male"
| },
| "Notes": []
| }
|}
""".stripMargin
val query = validateJsonInput[PatientSearch](json)
val fut = client.get[PatientSearch, PatientSearch](query)
val resp = Await.result(fut, timeout)
val maybe = handleResponse(fut)
maybe must beSome
maybe.map { searchResult =>
searchResult.Patient must beSome
searchResult.Patient.get.Identifiers must not be empty
searchResult.Patient.get.Demographics must beSome
}.get
}
}
}
| vital-software/scala-redox | src/test/scala/com/github/vitalsoftware/scalaredox/PatientSearchTest.scala | Scala | mit | 2,355 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.run
import quasar._
import quasar.common.PhaseResultTell
import quasar.contrib.iota._
import quasar.qscript._
import matryoshka.{Hole => _, _}
import matryoshka.implicits._
import cats.Monad
import cats.syntax.applicative._
final class RegressionQScriptEvaluator[
T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT,
F[_]: Monad: MonadPlannerErr: PhaseResultTell]
extends CountingQScriptEvaluator[T, F] {
def optimize(norm: T[QScriptNormalized[T, ?]]): F[T[QSM]] =
norm.transCata[T[QSM]](QSNormToQSM.inject(_)).pure[F]
}
| slamdata/quasar | run/src/test/scala/quasar/RegressionQScriptEvaluator.scala | Scala | apache-2.0 | 1,162 |
package model
case class TransmittalSheet(
id: Int) {
require(id == 1)
}
| jmarin/pilotazo | src/main/scala/model/TransmittalSheet.scala | Scala | cc0-1.0 | 80 |
@main def test: Unit = {
class Foo
class Bar extends Foo
trait S[-A] {
type T >: A
}
trait PFoo extends S[Foo]
trait PBar extends S[Bar] {
override type T = Bar
}
class PFooBar extends PBar with PFoo { // error
override type T >: Bar // error
}
def patmat[A](s: S[A]): s.T = s match {
case p: (PFoo & s.type) => (new Foo): p.T
}
// ClassCastException: Foo cannot be cast to class Bar
val x: Bar = patmat(new PFooBar: PBar)
} | lampepfl/dotty | tests/neg/i11130.scala | Scala | apache-2.0 | 470 |
/*
* Copyright (c) 2012 Twitter, Inc.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package test
// Specs2
import org.specs2.mutable.Specification
import java.io.File
import org.apache.avro.generic._
import org.apache.avro.specific._
import org.apache.avro.Schema
import org.apache.avro.Schema.{Type => AvroType}
import org.apache.avro.file._
class AvroTypeProvider58Test extends Specification {
"A case class with another record as a field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest58(AvroTypeProviderTest00(1))
val file = new File("tests/src/test/resources/AvroTypeProviderTest58.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest58](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest58](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider59Test extends Specification {
"A case class with an `Float` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest59(AvroTypeProviderTest58(AvroTypeProviderTest00(1)))
val file = new File("tests/src/test/resources/AvroTypeProviderTest59.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest59](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest59](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider60Test extends Specification {
"A case class with an `Long` field" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest60(AvroTypeProviderTest00(1), AvroTypeProviderTest58(AvroTypeProviderTest00(2)))
val file = new File("tests/src/test/resources/AvroTypeProviderTest60.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest60](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest60](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider61Test extends Specification {
"A case class with a field that is list of a user-defined type" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest61(List(AvroTypeProviderTest00(1), AvroTypeProviderTest00(2)))
val file = new File("tests/src/test/resources/AvroTypeProviderTest61.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest61](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest61](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider62Test extends Specification {
"A case class with a field that is list of a nested user-defined type" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest62(List(AvroTypeProviderTest58(AvroTypeProviderTest00(1)), AvroTypeProviderTest58(AvroTypeProviderTest00(2))))
val file = new File("tests/src/test/resources/AvroTypeProviderTest62.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest62](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest62](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
/* //TODO make readable file for this class - not very urgent since this field type is tested in other contexts also
class AvroTypeProvider63Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest63(List(AvroTypeProviderTest00(1), AvroTypeProviderTest00(2)), List(AvroTypeProviderTest60(AvroTypeProviderTest00(3), AvroTypeProviderTest58(AvroTypeProviderTest00(2)))))
val file = new File("tests/src/test/resources/AvroTypeProviderTest63.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest63](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest63](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
*/
class AvroTypeProvider64Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest64(Some(AvroTypeProviderTest00(1)))
val file = new File("tests/src/test/resources/AvroTypeProviderTest64.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest64](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest64](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider65Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest65(None)
val file = new File("tests/src/test/resources/AvroTypeProviderTest65.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest65](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest65](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider66Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest66(Some(AvroTypeProviderTest58(AvroTypeProviderTest00(1))))
val file = new File("tests/src/test/resources/AvroTypeProviderTest66.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest66](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest66](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider67Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest67(Some(AvroTypeProviderTest00(1)), Some(AvroTypeProviderTest60(AvroTypeProviderTest00(4), AvroTypeProviderTest58(AvroTypeProviderTest00(1)))))
val file = new File("tests/src/test/resources/AvroTypeProviderTest67.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest67](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest67](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
class AvroTypeProvider68Test extends Specification {
"A case class with a field that is list of a nested user-defined type in the second position" should {
"serialize and deserialize correctly" in {
val record = AvroTypeProviderTest68(Some(List(Some(AvroTypeProviderTest00(1)), None)), List(None, Some(List(AvroTypeProviderTest01(1F), AvroTypeProviderTest01(2F)))))
val file = new File("tests/src/test/resources/AvroTypeProviderTest68.avro")
val schema = new DataFileReader(file, new GenericDatumReader[GenericRecord]).getSchema
val userDatumReader = new SpecificDatumReader[AvroTypeProviderTest68](schema)
val dataFileReader = new DataFileReader[AvroTypeProviderTest68](file, userDatumReader)
val sameRecord = dataFileReader.next()
sameRecord must ===(record)
}
}
}
| iulianu/avro-scala-macro-annotations | tests/src/test/scala/AvroTypeProviderTests/datatypetests/AvroTypeProviderUserDefinedTypesTest.scala | Scala | apache-2.0 | 9,372 |
package ml.combust.mleap.avro
import java.nio.charset.Charset
import ml.combust.mleap.core.types._
import ml.combust.mleap.runtime.MleapContext
import ml.combust.mleap.runtime.types._
import ml.combust.mleap.tensor.{ByteString, Tensor}
import org.apache.avro.Schema
import scala.language.implicitConversions
import scala.collection.JavaConverters._
import scala.reflect.{ClassTag, classTag}
import scala.util.Try
/**
* Created by hollinwilkins on 10/31/16.
*/
object SchemaConverter {
def tensorSchema[T: ClassTag] = {
val r = Try {
val (name, valuesSchema) = classTag[T].runtimeClass match {
case Tensor.BooleanClass =>
("Boolean", Schema.createArray(Schema.create(Schema.Type.BOOLEAN)))
case Tensor.ByteClass =>
("Byte", Schema.create(Schema.Type.BYTES))
case Tensor.ShortClass =>
("Short", Schema.createArray(Schema.create(Schema.Type.INT)))
case Tensor.IntClass =>
("Int", Schema.createArray(Schema.create(Schema.Type.INT)))
case Tensor.LongClass =>
("Long", Schema.createArray(Schema.create(Schema.Type.LONG)))
case Tensor.FloatClass =>
("Float", Schema.createArray(Schema.create(Schema.Type.FLOAT)))
case Tensor.DoubleClass =>
("Double", Schema.createArray(Schema.create(Schema.Type.DOUBLE)))
case Tensor.StringClass =>
("String", Schema.createArray(Schema.create(Schema.Type.STRING)))
case Tensor.ByteStringClass =>
("ByteString", Schema.createArray(Schema.create(Schema.Type.BYTES)))
case _ => throw new IllegalArgumentException(s"invalid base ${classTag[T].runtimeClass.getName}")
}
val indicesSchema = Schema.createUnion(Schema.createArray(Schema.createArray(Schema.create(Schema.Type.INT))),
Schema.create(Schema.Type.NULL))
Schema.createRecord(s"${name}Tensor", "", "ml.combust.mleap.avro", false,
Seq(new Schema.Field("dimensions", Schema.createArray(Schema.create(Schema.Type.INT)), "", null: AnyRef),
new Schema.Field("values", valuesSchema, "", null: AnyRef),
new Schema.Field("indices", indicesSchema, "", null: AnyRef)).asJava)
}
r.get
}
private val booleanTensorSchema = tensorSchema[Boolean]
private val byteTensorSchema = tensorSchema[Byte]
private val shortTensorSchema = tensorSchema[Short]
private val integerTensorSchema = tensorSchema[Int]
private val longTensorSchema = tensorSchema[Long]
private val floatTensorSchema = tensorSchema[Float]
private val doubleTensorSchema = tensorSchema[Double]
private val stringTensorSchema = tensorSchema[String]
private val byteStringTensorSchema = tensorSchema[ByteString]
val bytesCharset = Charset.forName("UTF-8")
val tensorSchemaDimensionsIndex = 0
val tensorSchemaValuesIndex = 1
val tensorSchemaIndicesIndex = 2
val customSchemaIndex = 0
implicit def mleapToAvro(schema: StructType): Schema = {
val fields = schema.fields.map(mleapToAvroField).asJava
Schema.createRecord("LeapFrame", "", "ml.combust.mleap.avro", false, fields)
}
implicit def mleapToAvroField(field: StructField): Schema.Field = new Schema.Field(field.name, mleapToAvroType(field.dataType), "", null: AnyRef)
def maybeNullableAvroType(base: Schema, isNullable: Boolean): Schema = {
if(isNullable) {
Schema.createUnion(base, Schema.create(Schema.Type.NULL))
} else { base }
}
implicit def mleapBasicToAvroType(basicType: BasicType): Schema = basicType match {
case BasicType.Boolean => Schema.create(Schema.Type.BOOLEAN)
case BasicType.Byte => Schema.create(Schema.Type.INT)
case BasicType.Short => Schema.create(Schema.Type.INT)
case BasicType.Int => Schema.create(Schema.Type.INT)
case BasicType.Long => Schema.create(Schema.Type.LONG)
case BasicType.Float => Schema.create(Schema.Type.FLOAT)
case BasicType.Double => Schema.create(Schema.Type.DOUBLE)
case BasicType.String => Schema.create(Schema.Type.STRING)
case BasicType.ByteString => Schema.create(Schema.Type.BYTES)
}
implicit def mleapToAvroType(dataType: DataType): Schema = dataType match {
case st: ScalarType => maybeNullableAvroType(mleapBasicToAvroType(st.base), st.isNullable)
case lt: ListType => maybeNullableAvroType(Schema.createArray(mleapBasicToAvroType(lt.base)), lt.isNullable)
case tt: TensorType =>
tt.base match {
case BasicType.Boolean => booleanTensorSchema
case BasicType.Byte => byteTensorSchema
case BasicType.Short => shortTensorSchema
case BasicType.Int => integerTensorSchema
case BasicType.Long => longTensorSchema
case BasicType.Float => floatTensorSchema
case BasicType.Double => doubleTensorSchema
case BasicType.String => stringTensorSchema
case BasicType.ByteString => byteStringTensorSchema
case _ => throw new IllegalArgumentException(s"invalid type ${tt.base}")
}
case _ => throw new IllegalArgumentException(s"invalid data type: $dataType")
}
implicit def avroToMleap(schema: Schema)
(implicit context: MleapContext): StructType = schema.getType match {
case Schema.Type.RECORD =>
val fields = schema.getFields.asScala.map(avroToMleapField)
StructType(fields).get
case _ => throw new IllegalArgumentException("invalid avro record type")
}
implicit def avroToMleapField(field: Schema.Field)
(implicit context: MleapContext): StructField = StructField(field.name(), avroToMleapType(field.schema()))
def maybeNullableMleapType(schema: Schema): DataType = {
val types = schema.getTypes.asScala
assert(types.size == 2, "only nullable unions supported (2 type unions)")
types.find(_.getType == Schema.Type.NULL).flatMap {
_ => types.find(_.getType != Schema.Type.NULL)
}.map(avroToMleapType).getOrElse {
throw new IllegalArgumentException(s"unsupported schema: $schema")
}.asNullable
}
def avroToMleapBasicType(base: Schema.Type): BasicType = base match {
case Schema.Type.BOOLEAN => BasicType.Boolean
case Schema.Type.INT => BasicType.Int
case Schema.Type.LONG => BasicType.Long
case Schema.Type.FLOAT => BasicType.Float
case Schema.Type.DOUBLE => BasicType.Double
case Schema.Type.STRING => BasicType.String
case Schema.Type.BYTES => BasicType.ByteString
case _ => throw new IllegalArgumentException("invalid basic type")
}
implicit def avroToMleapType(schema: Schema)
(implicit context: MleapContext): DataType = schema.getType match {
case Schema.Type.BOOLEAN => ScalarType.Boolean
case Schema.Type.INT => ScalarType.Int
case Schema.Type.LONG => ScalarType.Long
case Schema.Type.FLOAT => ScalarType.Float
case Schema.Type.DOUBLE => ScalarType.Double
case Schema.Type.STRING => ScalarType.String
case Schema.Type.BYTES => ScalarType.ByteString
case Schema.Type.ARRAY => ListType(avroToMleapBasicType(schema.getElementType.getType))
case Schema.Type.UNION => maybeNullableMleapType(schema)
case Schema.Type.RECORD =>
schema.getName match {
case "BooleanTensor" => TensorType(BasicType.Boolean)
case "ByteTensor" => TensorType(BasicType.Byte)
case "ShortTensor" => TensorType(BasicType.Short)
case "IntTensor" => TensorType(BasicType.Int)
case "LongTensor" => TensorType(BasicType.Long)
case "FloatTensor" => TensorType(BasicType.Float)
case "DoubleTensor" => TensorType(BasicType.Double)
case "StringTensor" => TensorType(BasicType.String)
case "ByteStringTensor" => TensorType(BasicType.ByteString)
case _ => throw new IllegalArgumentException("invalid avro record")
}
case _ => throw new IllegalArgumentException("invalid avro record")
}
}
| combust-ml/mleap | mleap-avro/src/main/scala/ml/combust/mleap/avro/SchemaConverter.scala | Scala | apache-2.0 | 7,878 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.protocol.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait ExecutionEventFormats { self: sjsonnew.BasicJsonProtocol =>
implicit lazy val ExecutionEventFormat: JsonFormat[sbt.protocol.ExecutionEvent] = new JsonFormat[sbt.protocol.ExecutionEvent] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.protocol.ExecutionEvent = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val success = unbuilder.readField[String]("success")
val commandLine = unbuilder.readField[String]("commandLine")
unbuilder.endObject()
sbt.protocol.ExecutionEvent(success, commandLine)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.protocol.ExecutionEvent, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("success", obj.success)
builder.addField("commandLine", obj.commandLine)
builder.endObject()
}
}
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/protocol/codec/ExecutionEventFormats.scala | Scala | apache-2.0 | 1,151 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.factories.utils
import java.util
import java.util.Collections
import org.apache.flink.table.catalog.ExternalCatalog
import org.apache.flink.table.descriptors.DescriptorProperties
import org.apache.flink.table.descriptors.ExternalCatalogDescriptorValidator.{CATALOG_PROPERTY_VERSION, CATALOG_TYPE}
import org.apache.flink.table.factories.utils.TestExternalCatalogFactory._
import org.apache.flink.table.factories.ExternalCatalogFactory
import org.apache.flink.table.runtime.utils.CommonTestData
/**
* External catalog factory for testing.
*
* This factory provides the in-memory catalog from [[CommonTestData]] as a
* catalog of type "test".
*
* The catalog produces tables intended for either a streaming or batch environment,
* based on the descriptor property {{{ is-streaming }}}.
*/
class TestExternalCatalogFactory extends ExternalCatalogFactory {
override def requiredContext: util.Map[String, String] = {
val context = new util.HashMap[String, String]
context.put(CATALOG_TYPE, CATALOG_TYPE_VALUE_TEST)
context.put(CATALOG_PROPERTY_VERSION, "1")
context
}
override def supportedProperties: util.List[String] =
Collections.singletonList(CATALOG_IS_STREAMING)
override def createExternalCatalog(properties: util.Map[String, String]): ExternalCatalog = {
val props = new DescriptorProperties()
props.putProperties(properties)
CommonTestData.getInMemoryTestCatalog(
isStreaming = props.getOptionalBoolean(CATALOG_IS_STREAMING).orElse(false))
}
}
object TestExternalCatalogFactory {
val CATALOG_TYPE_VALUE_TEST = "test"
val CATALOG_IS_STREAMING = "is-streaming"
}
| shaoxuan-wang/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/factories/utils/TestExternalCatalogFactory.scala | Scala | apache-2.0 | 2,480 |
package notebook.kernel
import java.io.{StringWriter, PrintWriter}
import java.net.URLDecoder
import java.util.ArrayList
import notebook.kernel.repl.common._
import scala.collection.JavaConversions._
import scala.xml.{NodeSeq, Text}
import scala.util.control.NonFatal
import tools.nsc.Settings
import tools.nsc.interpreter._
import tools.nsc.interpreter.Completion.{Candidates, ScalaCompleter}
import tools.nsc.interpreter.Results.{Incomplete => ReplIncomplete, Success => ReplSuccess, Error}
import jline.console.completer.{ArgumentCompleter, Completer}
import org.apache.spark.repl._
import notebook.front.Widget
import notebook.util.Match
class Repl(val compilerOpts: List[String], val jars:List[String]=Nil) extends ReplT {
val LOG = org.slf4j.LoggerFactory.getLogger(classOf[Repl])
def this() = this(Nil)
private lazy val stdoutBytes = new ReplOutputStream
private lazy val stdout = new PrintWriter(stdoutBytes)
private var loop:HackSparkILoop = _
private var _classServerUri:Option[String] = None
private var _initFinished: Boolean = false
private var _evalsUntilInitFinished: Int = 0
def setInitFinished(): Unit = {
_initFinished = true
}
def classServerUri: Option[String] = {
_classServerUri
}
val interp = {
val settings = new Settings
settings.embeddedDefaults[Repl]
if (!compilerOpts.isEmpty) settings.processArguments(compilerOpts, false)
// fix for #52
settings.usejavacp.value = false
// fix for #52
val urls: IndexedSeq[String] = {
import java.net.URLClassLoader
import java.io.File
def urls(cl:ClassLoader, acc:IndexedSeq[String]=IndexedSeq.empty):IndexedSeq[String] = {
if (cl != null) {
val us = if (!cl.isInstanceOf[URLClassLoader]) {
acc
} else {
acc ++ (cl.asInstanceOf[URLClassLoader].getURLs map { u =>
val f = new File(u.getFile)
URLDecoder.decode(f.getAbsolutePath, "UTF8")
})
}
urls(cl.getParent, us)
} else {
acc
}
}
val loader = getClass.getClassLoader
val gurls = urls(loader).distinct//.filter(!_.contains("logback-classic"))//.filter(!_.contains("sbt/"))
gurls
}
val classpath = urls// map {_.toString}
settings.classpath.value = classpath.distinct.mkString(java.io.File.pathSeparator)
//bootclasspath → settings.classpath.isDefault = false → settings.classpath is used
settings.bootclasspath.value += scala.tools.util.PathResolver.Environment.javaBootClassPath
settings.bootclasspath.value += java.io.File.pathSeparator + settings.classpath.value
// LOG the classpath
// debug the classpath → settings.Ylogcp.value = true
//val i = new HackIMain(settings, stdout)
loop = new HackSparkILoop(stdout)
jars.foreach { jar =>
import scala.tools.nsc.util.ClassPath
val f = scala.tools.nsc.io.File(jar).normalize
loop.addedClasspath = ClassPath.join(loop.addedClasspath, f.path)
}
loop.process(settings)
val i = loop.intp
////i.initializeSynchronous()
//classServerUri = Some(i.classServer.uri)
_classServerUri = Some(loop.classServer.uri)
i.asInstanceOf[scala.tools.nsc.interpreter.SparkIMain]
}
private lazy val completion = {
//new JLineCompletion(interp)
new SparkJLineCompletion(interp)
}
private def scalaToJline(tc: ScalaCompleter): Completer = new Completer {
def complete(_buf: String, cursor: Int, candidates: JList[CharSequence]): Int = {
val buf = if (_buf == null) "" else _buf
val Candidates(newCursor, newCandidates) = tc.complete(buf, cursor)
newCandidates foreach (candidates add _)
newCursor
}
}
private lazy val argCompletor = {
val arg = new ArgumentCompleter(new JLineDelimiter, scalaToJline(completion.completer()))
// turns out this is super important a line
arg.setStrict(false)
arg
}
private lazy val stringCompletor = StringCompletorResolver.completor
private def getCompletions(line: String, cursorPosition: Int) = {
val candidates = new ArrayList[CharSequence]()
argCompletor.complete(line, cursorPosition, candidates)
candidates map { _.toString } toList
}
private def listDefinedTerms(request: interp.Request): List[NameDefinition] = {
request.handlers.flatMap { h =>
val maybeTerm = h.definesTerm.map(_.encoded)
val maybeType = h.definesType.map(_.encoded)
val references = h.referencedNames.toList.map(_.encoded)
(maybeTerm, maybeType) match {
case (Some(term), _) =>
val termType = getTypeNameOfTerm(term).getOrElse("<unknown>")
Some(TermDefinition(term, termType, references))
case (_, Some(tpe)) =>
Some(TypeDefinition(tpe, "type", references))
case _ => None
}
}
}
def getTypeNameOfTerm(termName: String): Option[String] = {
val tpe = try {
interp.typeOfTerm(termName).toString
} catch {
case exc: RuntimeException => println("Unable to get symbol type", exc); "<notype>"
}
tpe match {
case "<notype>" => // "<notype>" can be also returned by typeOfTerm
interp.classOfTerm(termName).map(_.getName)
case _ =>
// remove some crap
Some(
tpe
.replace("iwC$", "")
.replaceAll("^\\\\(\\\\)" , "") // 2.11 return types prefixed, like `()Person`
)
}
}
/**
* Evaluates the given code. Swaps out the `println` OutputStream with a version that
* invokes the given `onPrintln` callback everytime the given code somehow invokes a
* `println`.
*
* Uses compile-time implicits to choose a renderer. If a renderer cannot be found,
* then just uses `toString` on result.
*
* I don't think this is thread-safe (largely because I don't think the underlying
* IMain is thread-safe), it certainly isn't designed that way.
*
* @param code
* @param onPrintln
* @return result and a copy of the stdout buffer during the duration of the execution
*/
def evaluate(code: String,
onPrintln: String => Unit = _ => (),
onNameDefinion: NameDefinition => Unit = _ => ()
): (EvaluationResult, String) = {
stdout.flush()
stdoutBytes.reset()
// capture stdout if the code the user wrote was a println, for example
stdoutBytes.aop = onPrintln
val res = Console.withOut(stdoutBytes) {
interp.interpret(code)
}
stdout.flush()
stdoutBytes.aop = _ => ()
val result = res match {
case ReplSuccess =>
val request = interp.prevRequestList.last
val lastHandler: interp.memberHandlers.MemberHandler = request.handlers.last
listDefinedTerms(request).foreach(onNameDefinion)
try {
val evalValue = if (lastHandler.definesValue) { // This is true for def's with no parameters, not sure that executing/outputting this is desirable
// CY: So for whatever reason, line.evalValue attemps to call the $eval method
// on the class...a method that does not exist. Not sure if this is a bug in the
// REPL or some artifact of how we are calling it.
// RH: The above comment may be going stale given the shenanigans I'm pulling below.
val line = request.lineRep
val renderObjectCode =
"""object $rendered {
| %s
| val rendered: _root_.notebook.front.Widget = { %s }
| %s
|}""".stripMargin.format(
request.importsPreamble,
request.fullPath(lastHandler.definesTerm.get.toString),
request.importsTrailer
)
if (line.compile(renderObjectCode)) {
try {
val renderedClass2 = Class.forName(
line.pathTo("$rendered")+"$", true, interp.classLoader
)
val o = renderedClass2.getDeclaredField(interp.global.nme.MODULE_INSTANCE_FIELD.toString).get()
def iws(o:Any):NodeSeq = {
val iw = o.getClass.getMethods.find(_.getName == "$iw")
val o2 = iw map { m =>
m.invoke(o)
}
o2 match {
case Some(o3) =>
iws(o3)
case None =>
val r = o.getClass.getDeclaredMethod("rendered").invoke(o)
val h = r.asInstanceOf[Widget].toHtml
h
}
}
iws(o)
} catch {
case e =>
e.printStackTrace
LOG.error("Ooops, exception in the cell", e)
<span style="color:red;">Ooops, exception in the cell: {e.getMessage}</span>
}
} else {
// a line like println(...) is technically a val, but returns null for some reason
// so wrap it in an option in case that happens...
Option(line.call("$result")) map { result => Text(try { result.toString } catch { case e => "Fail to `toString` the result: " + e.getMessage }) } getOrElse NodeSeq.Empty
}
} else {
NodeSeq.Empty
}
Success(evalValue)
}
catch {
case NonFatal(e) =>
val ex = new StringWriter()
e.printStackTrace(new PrintWriter(ex))
Failure(ex.toString)
}
case ReplIncomplete => Incomplete
case Error => Failure(stdoutBytes.toString)
}
if ( !_initFinished ) {
_evalsUntilInitFinished = _evalsUntilInitFinished + 1
}
(result, stdoutBytes.toString)
}
def addCp(newJars:List[String]) = {
val prevCode = interp.prevRequestList.map(_.originalLine).drop( _evalsUntilInitFinished )
interp.close() // this will close the repl class server, which is needed in order to reuse `-Dspark.replClassServer.port`!
val r = new Repl(compilerOpts, newJars:::jars)
(r, () => prevCode foreach (c => r.evaluate(c, _ => ())))
}
def complete(line: String, cursorPosition: Int): (String, Seq[Match]) = {
def literalCompletion(arg: String) = {
val LiteralReg = """.*"([\\w/]+)""".r
arg match {
case LiteralReg(literal) => Some(literal)
case _ => None
}
}
// CY: Don't ask to explain why this works. Look at JLineCompletion.JLineTabCompletion.complete.mkDotted
// The "regularCompletion" path is the only path that is (likely) to succeed
// so we want access to that parsed version to pull out the part that was "matched"...
// ...just...trust me.
val delim = argCompletor.getDelimiter
val list = delim.delimit(line, cursorPosition)
val bufferPassedToCompletion = list.getCursorArgument
val actCursorPosition = list.getArgumentPosition
val parsed = Parsed.dotted(bufferPassedToCompletion, actCursorPosition) // withVerbosity verbosity
val matchedText = bufferPassedToCompletion.takeRight(actCursorPosition - parsed.position)
literalCompletion(bufferPassedToCompletion) match {
case Some(literal) =>
// strip any leading quotes
stringCompletor.complete(literal)
case None =>
val candidates = getCompletions(line, cursorPosition)
(matchedText, if (candidates.size > 0 && candidates.head.isEmpty) {
List()
} else {
candidates.map(Match(_))
})
}
}
def objectInfo(line: String, position:Int): Seq[String] = {
// CY: The REPL is stateful -- it isn't until you ask to complete
// the thing twice does it give you the method signature (i.e. you
// hit tab twice). So we simulate that here... (nutty, I know)
getCompletions(line, position)
getCompletions(line, position)
}
def sparkContextAvailable: Boolean = {
interp.allImportedNames.exists(_.toString == "sparkContext")
}
def stop(): Unit = {
interp.close()
}
} | meh-ninja/spark-notebook | modules/spark/src/main/scala_2.11/spark-pre1.5/notebook/kernel/Repl.scala | Scala | apache-2.0 | 12,106 |
package models
case class Permission(_id: String,
permissionType: String,
permission: String,
accessRule: AccessRule) extends BaseModel[Permission] {
override def withId(_id: String): Permission = copy(_id = _id)
}
| metaxmx/FridayNightBeer | modules/datamodel/src/main/scala/models/Permission.scala | Scala | apache-2.0 | 296 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.{ByteArrayInputStream, File, FileInputStream, FileOutputStream}
import java.net.{HttpURLConnection, URI, URL}
import java.nio.charset.StandardCharsets
import java.security.SecureRandom
import java.security.cert.X509Certificate
import java.util.{Arrays, Properties}
import java.util.concurrent.{TimeoutException, TimeUnit}
import java.util.jar.{JarEntry, JarOutputStream}
import javax.net.ssl._
import javax.tools.{JavaFileObject, SimpleJavaFileObject, ToolProvider}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.sys.process.{Process, ProcessLogger}
import scala.util.Try
import com.google.common.io.{ByteStreams, Files}
import org.apache.log4j.PropertyConfigurator
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.config._
import org.apache.spark.scheduler._
import org.apache.spark.util.Utils
/**
* Utilities for tests. Included in main codebase since it's used by multiple
* projects.
*
* TODO: See if we can move this to the test codebase by specifying
* test dependencies between projects.
*/
private[spark] object TestUtils {
/**
* Create a jar that defines classes with the given names.
*
* Note: if this is used during class loader tests, class names should be unique
* in order to avoid interference between tests.
*/
def createJarWithClasses(
classNames: Seq[String],
toStringValue: String = "",
classNamesWithBase: Seq[(String, String)] = Seq.empty,
classpathUrls: Seq[URL] = Seq.empty): URL = {
val tempDir = Utils.createTempDir()
val files1 = for (name <- classNames) yield {
createCompiledClass(name, tempDir, toStringValue, classpathUrls = classpathUrls)
}
val files2 = for ((childName, baseName) <- classNamesWithBase) yield {
createCompiledClass(childName, tempDir, toStringValue, baseName, classpathUrls)
}
val jarFile = new File(tempDir, "testJar-%s.jar".format(System.currentTimeMillis()))
createJar(files1 ++ files2, jarFile)
}
/**
* Create a jar file containing multiple files. The `files` map contains a mapping of
* file names in the jar file to their contents.
*/
def createJarWithFiles(files: Map[String, String], dir: File = null): URL = {
val tempDir = Option(dir).getOrElse(Utils.createTempDir())
val jarFile = File.createTempFile("testJar", ".jar", tempDir)
val jarStream = new JarOutputStream(new FileOutputStream(jarFile))
files.foreach { case (k, v) =>
val entry = new JarEntry(k)
jarStream.putNextEntry(entry)
ByteStreams.copy(new ByteArrayInputStream(v.getBytes(StandardCharsets.UTF_8)), jarStream)
}
jarStream.close()
jarFile.toURI.toURL
}
/**
* Create a jar file that contains this set of files. All files will be located in the specified
* directory or at the root of the jar.
*/
def createJar(files: Seq[File], jarFile: File, directoryPrefix: Option[String] = None): URL = {
val jarFileStream = new FileOutputStream(jarFile)
val jarStream = new JarOutputStream(jarFileStream, new java.util.jar.Manifest())
for (file <- files) {
// The `name` for the argument in `JarEntry` should use / for its separator. This is
// ZIP specification.
val prefix = directoryPrefix.map(d => s"$d/").getOrElse("")
val jarEntry = new JarEntry(prefix + file.getName)
jarStream.putNextEntry(jarEntry)
val in = new FileInputStream(file)
ByteStreams.copy(in, jarStream)
in.close()
}
jarStream.close()
jarFileStream.close()
jarFile.toURI.toURL
}
// Adapted from the JavaCompiler.java doc examples
private val SOURCE = JavaFileObject.Kind.SOURCE
private def createURI(name: String) = {
URI.create(s"string:///${name.replace(".", "/")}${SOURCE.extension}")
}
private[spark] class JavaSourceFromString(val name: String, val code: String)
extends SimpleJavaFileObject(createURI(name), SOURCE) {
override def getCharContent(ignoreEncodingErrors: Boolean): String = code
}
/** Creates a compiled class with the source file. Class file will be placed in destDir. */
def createCompiledClass(
className: String,
destDir: File,
sourceFile: JavaSourceFromString,
classpathUrls: Seq[URL]): File = {
val compiler = ToolProvider.getSystemJavaCompiler
// Calling this outputs a class file in pwd. It's easier to just rename the files than
// build a custom FileManager that controls the output location.
val options = if (classpathUrls.nonEmpty) {
Seq("-classpath", classpathUrls.map { _.getFile }.mkString(File.pathSeparator))
} else {
Seq.empty
}
compiler.getTask(null, null, null, options.asJava, null, Arrays.asList(sourceFile)).call()
val fileName = className + ".class"
val result = new File(fileName)
assert(result.exists(), "Compiled file not found: " + result.getAbsolutePath())
val out = new File(destDir, fileName)
// renameTo cannot handle in and out files in different filesystems
// use google's Files.move instead
Files.move(result, out)
assert(out.exists(), "Destination file not moved: " + out.getAbsolutePath())
out
}
/** Creates a compiled class with the given name. Class file will be placed in destDir. */
def createCompiledClass(
className: String,
destDir: File,
toStringValue: String = "",
baseClass: String = null,
classpathUrls: Seq[URL] = Seq.empty): File = {
val extendsText = Option(baseClass).map { c => s" extends ${c}" }.getOrElse("")
val sourceFile = new JavaSourceFromString(className,
"public class " + className + extendsText + " implements java.io.Serializable {" +
" @Override public String toString() { return \\"" + toStringValue + "\\"; }}")
createCompiledClass(className, destDir, sourceFile, classpathUrls)
}
/**
* Run some code involving jobs submitted to the given context and assert that the jobs spilled.
*/
def assertSpilled(sc: SparkContext, identifier: String)(body: => Unit): Unit = {
val listener = new SpillListener
withListener(sc, listener) { _ =>
body
}
assert(listener.numSpilledStages > 0, s"expected $identifier to spill, but did not")
}
/**
* Run some code involving jobs submitted to the given context and assert that the jobs
* did not spill.
*/
def assertNotSpilled(sc: SparkContext, identifier: String)(body: => Unit): Unit = {
val listener = new SpillListener
withListener(sc, listener) { _ =>
body
}
assert(listener.numSpilledStages == 0, s"expected $identifier to not spill, but did")
}
/**
* Asserts that exception message contains the message. Please note this checks all
* exceptions in the tree.
*/
def assertExceptionMsg(exception: Throwable, msg: String): Unit = {
var e = exception
var contains = e.getMessage.contains(msg)
while (e.getCause != null && !contains) {
e = e.getCause
contains = e.getMessage.contains(msg)
}
assert(contains, s"Exception tree doesn't contain the expected message: $msg")
}
/**
* Test if a command is available.
*/
def testCommandAvailable(command: String): Boolean = {
val attempt = Try(Process(command).run(ProcessLogger(_ => ())).exitValue())
attempt.isSuccess && attempt.get == 0
}
/**
* Returns the response code from an HTTP(S) URL.
*/
def httpResponseCode(
url: URL,
method: String = "GET",
headers: Seq[(String, String)] = Nil): Int = {
val connection = url.openConnection().asInstanceOf[HttpURLConnection]
connection.setRequestMethod(method)
headers.foreach { case (k, v) => connection.setRequestProperty(k, v) }
// Disable cert and host name validation for HTTPS tests.
if (connection.isInstanceOf[HttpsURLConnection]) {
val sslCtx = SSLContext.getInstance("SSL")
val trustManager = new X509TrustManager {
override def getAcceptedIssuers(): Array[X509Certificate] = null
override def checkClientTrusted(x509Certificates: Array[X509Certificate], s: String) {}
override def checkServerTrusted(x509Certificates: Array[X509Certificate], s: String) {}
}
val verifier = new HostnameVerifier() {
override def verify(hostname: String, session: SSLSession): Boolean = true
}
sslCtx.init(null, Array(trustManager), new SecureRandom())
connection.asInstanceOf[HttpsURLConnection].setSSLSocketFactory(sslCtx.getSocketFactory())
connection.asInstanceOf[HttpsURLConnection].setHostnameVerifier(verifier)
}
try {
connection.connect()
connection.getResponseCode()
} finally {
connection.disconnect()
}
}
/**
* Runs some code with the given listener installed in the SparkContext. After the code runs,
* this method will wait until all events posted to the listener bus are processed, and then
* remove the listener from the bus.
*/
def withListener[L <: SparkListener](sc: SparkContext, listener: L) (body: L => Unit): Unit = {
sc.addSparkListener(listener)
try {
body(listener)
} finally {
sc.listenerBus.waitUntilEmpty(TimeUnit.SECONDS.toMillis(10))
sc.listenerBus.removeListener(listener)
}
}
/**
* Wait until at least `numExecutors` executors are up, or throw `TimeoutException` if the waiting
* time elapsed before `numExecutors` executors up. Exposed for testing.
*
* @param numExecutors the number of executors to wait at least
* @param timeout time to wait in milliseconds
*/
private[spark] def waitUntilExecutorsUp(
sc: SparkContext,
numExecutors: Int,
timeout: Long): Unit = {
val finishTime = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeout)
while (System.nanoTime() < finishTime) {
if (sc.statusTracker.getExecutorInfos.length > numExecutors) {
return
}
// Sleep rather than using wait/notify, because this is used only for testing and wait/notify
// add overhead in the general case.
Thread.sleep(10)
}
throw new TimeoutException(
s"Can't find $numExecutors executors before $timeout milliseconds elapsed")
}
/**
* config a log4j properties used for testsuite
*/
def configTestLog4j(level: String): Unit = {
val pro = new Properties()
pro.put("log4j.rootLogger", s"$level, console")
pro.put("log4j.appender.console", "org.apache.log4j.ConsoleAppender")
pro.put("log4j.appender.console.target", "System.err")
pro.put("log4j.appender.console.layout", "org.apache.log4j.PatternLayout")
pro.put("log4j.appender.console.layout.ConversionPattern",
"%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n")
PropertyConfigurator.configure(pro)
}
/**
* Lists files recursively.
*/
def recursiveList(f: File): Array[File] = {
require(f.isDirectory)
val current = f.listFiles
current ++ current.filter(_.isDirectory).flatMap(recursiveList)
}
/**
* Set task resource requirement.
*/
def setTaskResourceRequirement(
conf: SparkConf,
resourceName: String,
resourceCount: Int): SparkConf = {
val key = s"${SPARK_TASK_RESOURCE_PREFIX}${resourceName}${SPARK_RESOURCE_AMOUNT_SUFFIX}"
conf.set(key, resourceCount.toString)
}
}
/**
* A `SparkListener` that detects whether spills have occurred in Spark jobs.
*/
private class SpillListener extends SparkListener {
private val stageIdToTaskMetrics = new mutable.HashMap[Int, ArrayBuffer[TaskMetrics]]
private val spilledStageIds = new mutable.HashSet[Int]
def numSpilledStages: Int = synchronized {
spilledStageIds.size
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = synchronized {
stageIdToTaskMetrics.getOrElseUpdate(
taskEnd.stageId, new ArrayBuffer[TaskMetrics]) += taskEnd.taskMetrics
}
override def onStageCompleted(stageComplete: SparkListenerStageCompleted): Unit = synchronized {
val stageId = stageComplete.stageInfo.stageId
val metrics = stageIdToTaskMetrics.remove(stageId).toSeq.flatten
val spilled = metrics.map(_.memoryBytesSpilled).sum > 0
if (spilled) {
spilledStageIds += stageId
}
}
}
| icexelloss/spark | core/src/main/scala/org/apache/spark/TestUtils.scala | Scala | apache-2.0 | 13,094 |
package com.airbnb.aerosolve.training
import com.typesafe.config.Config
import org.slf4j.{Logger, LoggerFactory}
import org.apache.spark.rdd.RDD
import scala.util.Random
/*
* A logistic regression trainer for generating Platt's scaling weights (slope and offset)
* The objective function to minimize is the negative log-likelihood function.
* 'trainBatchMLE' implements batch gradient descent, which collects data from RDD and run training on local machine
* 'trainSGD' implements Stochastic Gradient Descent (SGD), which runs in parallel model.
* Params:
* learning_rate - a number between 0 and 1.
* iterations - maximal number of iterations to run.
* num_bags - number of parallel models to make (sgd only).
* tolerance - if the difference between previous iteration weights and current weights are
* smaller than tolerance, the training will stop.
* rate_decay - reduce learning rate: learning_rate = learning_rate * rate_decay after each epoch
*/
object ScoreCalibrator {
private final val log: Logger = LoggerFactory.getLogger("ScoreCalibrator")
def sigmoid(x: Double) = 1.0 / (1.0 + math.exp(-x))
def trainSGD(config : Config,
input : RDD[(Double, Boolean)]) : Array[Double] = {
/* Config info:
* iterations - maximal number of iterations to run trainMiniSGD
* numBags - number of parallel model to train
* learning_rate - learning rate
* rate_decay - reduce learning rate: learning_rate = learning_rate * rate_decay after each epoch
* tolerance - if the difference between previous iteration weights and current weights are
* smaller than tolerance, the training will stop.
*/
val maxIter : Int = config.getInt("iterations")
var learningRate : Double = config.getDouble("learning_rate")
val numBags : Int = config.getInt("num_bags")
val tolerance : Double = config.getDouble("tolerance")
val rate_decay : Double = config.getDouble("rate_decay")
var params = Array(0.0, 1.0)
var iter = 0
var old_offset = 0.0
var old_slope = 1.0
val partitionedInput = input.repartition(numBags)
do {
old_offset = params(0)
old_slope = params(1)
params = trainMiniSGD(partitionedInput, numBags, learningRate, params(0), params(1))
iter += 1
learningRate = learningRate * rate_decay
log.info("Iteration %d: offset = %f, slope = %f".format(iter, params(0), params(1)))
} while((math.abs(old_offset - params(0)) > tolerance
|| math.abs(old_slope - params(1)) > tolerance)
&& iter <= maxIter)
params
}
def trainMiniBatch(input : RDD[(Double, Boolean)],
numBags : Int,
learningRate : Double,
offset : Double,
slope : Double) : Array[Double] = {
val result = input
.mapPartitions(partition => {
// run mini-batch training on each partition
var a = offset
var b = slope
var count = 0
var gradientA = 0.0
var gradientB = 0.0
partition.foreach(x => { // x is (Double, Boolean)
// compute gradient at a given data point
val diff = sigmoid(a + b * x._1) - (if (x._2) 1.0 else 0.0)
gradientA += diff
gradientB += diff * x._1
count += 1
})
a -= learningRate * gradientA / count
b -= learningRate * gradientB / count
Seq[(Double, Double)]((a, b)).iterator
})
.reduce((x, y) => (x._1 + y._1, x._2 + y._2))
Array(result._1 / numBags, result._2 / numBags)
}
def trainMiniSGD(input : RDD[(Double, Boolean)],
numBags : Int,
learningRate : Double,
offset : Double,
slope : Double) : Array[Double] = {
val result = input
.mapPartitions(partition => {
// run mini-batch training on each partition
var a = offset
var b = slope
partition.foreach(x => { // x is (Double, Boolean)
// compute gradient at a given data point
val diff = sigmoid(a + b * x._1) - (if (x._2) 1.0 else 0.0)
a -= learningRate * diff
b -= learningRate * diff * x._1
})
Seq[(Double, Double)]((a, b)).iterator
})
.reduce((x, y) => (x._1 + y._1, x._2 + y._2))
Array(result._1 / numBags, result._2 / numBags)
}
// Batch Gradient Descent: Maximum Likelihood Estimate (MLE)
def trainBatchMLE(config : Config,
input : Array[(Double, Boolean)]) : Array[Double] = {
val maxIter : Int = config.getInt("iterations")
var learningRate : Double = config.getDouble("learning_rate")
val tolerance : Double = config.getDouble("tolerance")
val rateDecay : Double = config.getDouble("rate_decay")
val score = input.map(x => x._1) // Double
val label = input.map(x => x._2) // Boolean
// Transforming the label to target probability
val n = label.size
val transformedLabel = label.map(x => if (x) 1.0 else 0.0)
// Run Batch Gradient Descent
// y = sigmoid(a+bx), a is the offset, b is the scale
// initialization
var a = 0.0
var b = 1.0
var old_a = 0.0
var old_b = 1.0
var iter = 0
do {
log.info("Iteration %d, a = %f; b = %f".format(iter, a, b))
old_a = a
old_b = b
// predicted probability
val predProb = score.map(x => sigmoid(a + b * x))
//An array of predProb(j) - transformedLabel(j)
val predDiff = predProb.zip(transformedLabel).map(x => (x._1 - x._2).toFloat)
val gradientA = predDiff.sum / n
val gradientB = predDiff.zip(score).map(x => x._1 * x._2).sum / n
a -= learningRate * gradientA
b -= learningRate * gradientB
learningRate = learningRate * rateDecay
iter += 1
log.info("Iteration %d, a = %f; b = %f".format(iter, a, b))
} while((math.abs(old_a - a) > tolerance ||
math.abs(old_b - b) > tolerance) && iter < maxIter)
Array(a, b)
}
} | zcqqq/aerosolve | training/src/main/scala/com/airbnb/aerosolve/training/ScoreCalibrator.scala | Scala | apache-2.0 | 5,952 |
/**Copyright (c) 2012-2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
package registry
// Java
import java.lang.{ Float => JFloat }
// Specs2
import org.specs2.Specification
// Joda
import org.joda.time.DateTime
// Json4s
import org.json4s._
import org.json4s.jackson.JsonMethods.parse
// Snowplow
import com.snowplowanalytics.iglu.client.SchemaKey
// Scala weather
import com.snowplowanalytics.weather._
object WeatherEnrichmentSpec {
val OwmApiKey = "OWM_KEY"
}
import WeatherEnrichmentSpec._
class WeatherEnrichmentSpec extends Specification { def is =
skipAllIf(sys.env.get(OwmApiKey).isEmpty) ^ // Actually only e4 and e6 need to be skipped
s2"""
This is a specification to test the WeatherEnrichment
Fail event for null time $e1
Fail event for invalid key $e4
Weather enrichment client is lazy $e2
Extract weather stamp $e3
Extract humidity $e5
Extract configuration $e6
Check time stamp transformation $e7
"""
lazy val validAppKey = sys.env.get(OwmApiKey).getOrElse(throw new IllegalStateException(s"No ${OwmApiKey} environment variable found, test should have been skipped"))
object invalidEvent {
var lat: JFloat = 70.98224f
var lon: JFloat = 70.98224f
var time: DateTime = null
}
object validEvent {
var lat: JFloat = 20.713052f
var lon: JFloat = 70.98224f
var time: DateTime = new DateTime("2017-05-01T23:56:01.003+00:00")
}
def e1 = {
val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(invalidEvent.lat), Option(invalidEvent.lon), Option(invalidEvent.time))
stamp.toEither must beLeft.like { case e => e must contain("tstamp: None") }
}
def e2 = WeatherEnrichment("KEY", 0, 1, "history.openweathermap.org", 5) must not(throwA[IllegalArgumentException])
def e3 = {
val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight
}
def e4 = {
val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beLeft.like { case e => e must contain("AuthorizationError") }
}
def e5 = {
val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 15)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight.like {
case weather: JValue => {
val temp = weather.findField { case JField("humidity", _) => true; case _ => false }
temp must beSome(("humidity", JDouble(97.0)))
}
}
}
def e6 = {
val configJson = parse(
"""
|{
| "enabled": true,
| "vendor": "com.snowplowanalytics.snowplow.enrichments",
| "name": "weather_enrichment_config",
| "parameters": {
| "apiKey": "{{KEY}}",
| "cacheSize": 5100,
| "geoPrecision": 1,
| "apiHost": "history.openweathermap.org",
| "timeout": 5
| }
|}
""".stripMargin)
val config = WeatherEnrichmentConfig.parse(configJson, SchemaKey("com.snowplowanalytics.snowplow.enrichments", "weather_enrichment_config", "jsonschema", "1-0-0"))
config.toEither must beRight(WeatherEnrichment(apiKey = "{{KEY}}", geoPrecision = 1, cacheSize = 5100, apiHost = "history.openweathermap.org", timeout = 5))
}
def e7 = {
implicit val formats = DefaultFormats
val enr = WeatherEnrichment(validAppKey, 2, 1, "history.openweathermap.org", 15)
val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time))
stamp.toEither must beRight.like { // successful request
case weather: JValue => {
val e = (weather \\ "data").extractOpt[TransformedWeather]
e.map(_.dt) must beSome.like { // succesfull transformation
case dt => dt must equalTo("2017-05-02T00:00:00.000Z") // closest stamp storing on server
}
}
}
}
}
| aldemirenes/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/WeatherEnrichmentSpec.scala | Scala | apache-2.0 | 5,071 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.