code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.server.cio
import io.ktor.client.request.*
import io.ktor.client.statement.*
import io.ktor.http.*
import io.ktor.http.content.*
import io.ktor.network.selector.*
import io.ktor.network.sockets.*
import io.ktor.server.cio.*
import io.ktor.server.request.*
import io.ktor.server.response.*
import io.ktor.server.routing.*
import io.ktor.server.testing.suites.*
import io.ktor.utils.io.*
import kotlinx.coroutines.CompletableDeferred
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
import kotlin.test.Test
import kotlin.test.assertEquals
import kotlin.test.assertFalse
import kotlin.test.assertTrue
class CIOHttpServerTest : HttpServerCommonTestSuite<CIOApplicationEngine, CIOApplicationEngine.Configuration>(CIO) {
init {
enableHttp2 = false
enableSsl = false
}
@Test
fun testGracefulShutdown() = runTest {
val server = createAndStartServer {
get("/") {
delay(100)
call.respond("OK")
}
}
val body = CompletableDeferred<String?>()
launch {
withUrl("/") {
body.complete(bodyAsText())
}
}
launch {
delay(20)
server.stopSuspend(
gracePeriodMillis = 10_000,
timeoutMillis = 20_000,
)
}
assertEquals("OK", body.await())
}
@Test
fun testChunkedResponse() = runTest {
createAndStartServer {
get("/") {
val byteStream = ByteChannel(autoFlush = true)
byteStream.writeStringUtf8("test")
byteStream.close(null)
call.respond(object : OutgoingContent.ReadChannelContent() {
override val status: HttpStatusCode = HttpStatusCode.OK
override val headers: Headers = Headers.Empty
override fun readFrom() = byteStream
})
}
}
withUrl("/") {
assertEquals("test", bodyAsText())
}
}
@Test
fun testExpectedContinue() = runTest {
createAndStartServer {
post("/") {
val body = call.receiveText()
call.respondText(body)
}
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
val body = "Hello world"
writePostHeaders(writeChannel, body.length)
val continueResponse = readChannel.readLineStrict()
assertEquals("HTTP/1.1 100 Continue", continueResponse)
assertEquals("", readChannel.readLineStrict())
writePostBody(writeChannel, body)
val response = readAvailable(readChannel)
assertTrue(response.contains("HTTP/1.1 200 OK"))
assertTrue(response.contains(body))
}
}
@Test
fun testExpectedContinueRespondBeforeReadingBody() = runTest {
createAndStartServer {
post("/") {
val length = call.request.headers[HttpHeaders.ContentLength]?.toInt() ?: 0
if (length > 5) {
call.respond(HttpStatusCode.BadRequest)
return@post
}
val body = call.receiveText()
call.respondText(body)
}
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
val longBody = "Hello world"
writePostHeaders(writeChannel, longBody.length)
val badRequestResponse = readAvailable(readChannel)
assertTrue(badRequestResponse.contains("HTTP/1.1 400 Bad Request"))
}
}
@Test
fun testExpectedContinueExpectationFailed() = runTest {
createAndStartServer {
post("/") {
val body = call.receiveText()
call.respondText(body)
}
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
val longBody = "Hello world"
writePostHeaders(writeChannel, longBody.length, expectedHeader = "invalid-100-continue")
val expectationFailedResponse = readAvailable(readChannel)
assertTrue(expectationFailedResponse.contains("HTTP/1.1 417 Expectation Failed"))
}
}
@Test
fun testExpectedContinueConnection() = runTest {
createAndStartServer {
post("/") {
val body = call.receiveText()
call.respond(body)
}
post("/check-length") {
val length = call.request.headers[HttpHeaders.ContentLength]?.toInt() ?: 0
if (length == 0) {
call.respond(HttpStatusCode.BadRequest)
return@post
}
call.respondText("ok")
}
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
writePostHeaders(writeChannel, path = "/check-length")
val response = readAvailable(readChannel)
assertTrue(response.contains("Connection: close"))
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
writePostHeaders(writeChannel, expectedHeader = "invalid")
val response = readAvailable(readChannel)
assertTrue(response.contains("Connection: close"))
}
}
@Test
fun testExpectedIgnoreHTTP1_0() = runTest {
createAndStartServer {
post("/") {
val body = call.receiveText()
call.respond(body)
}
}
withClientSocket {
val writeChannel = openWriteChannel()
val readChannel = openReadChannel()
val body = "Hello world"
writePostHeaders(writeChannel, body.length, httpVersion = "HTTP/1.0")
writePostBody(writeChannel, body)
val response = readAvailable(readChannel)
assertFalse(response.contains("100 Continue"))
}
}
@Test
fun testLotsOfHeaders() = runTest {
val count = 500
val implicitHeadersCount = 4
createAndStartServer {
get("/headers") {
call.respond("${call.request.headers.entries().size} headers received")
}
}
withUrl("/headers", {
repeat(count) {
header("HeaderName$it", "HeaderContent$it")
}
}) {
assertEquals(HttpStatusCode.OK, status)
assertEquals("${count + implicitHeadersCount} headers received", bodyAsText())
}
}
private suspend fun readAvailable(channel: ByteReadChannel): String {
val buffer = ByteArray(1024)
val length = channel.readAvailable(buffer)
return buffer.decodeToString(0, 0 + length)
}
private suspend fun withClientSocket(block: suspend Socket.() -> Unit) {
SelectorManager().use {
aSocket(it).tcp().connect(TEST_SERVER_HOST, port).use { socket ->
block(socket)
}
}
}
private suspend fun writePostHeaders(
channel: ByteWriteChannel,
length: Int = 0,
path: String = "/",
expectedHeader: String = "100-continue",
httpVersion: String = "HTTP/1.1"
) {
channel.apply {
writeStringUtf8("POST $path $httpVersion\r\n")
writeStringUtf8("Host: $TEST_SERVER_HOST\r\n")
writeStringUtf8("Content-Type: text/plain\r\n")
writeStringUtf8("Content-Length: $length\r\n")
writeStringUtf8("Expect: $expectedHeader\r\n")
writeStringUtf8("Connection: close\r\n")
writeStringUtf8("\r\n")
flush()
}
}
private suspend fun writePostBody(channel: ByteWriteChannel, body: String) {
channel.apply {
writeStringUtf8("$body\r\n")
writeStringUtf8("\r\n")
flush()
}
}
companion object {
private const val TEST_SERVER_HOST = "127.0.0.1"
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-cio/common/test/io/ktor/tests/server/cio/CIOEngineTest.kt |
from hachoir_core.field import BasicFieldSet, GenericFieldSet, ParserError, createRawField
from hachoir_core.error import HACHOIR_ERRORS
# getgaps(int, int, [listof (int, int)]) -> generator of (int, int)
# Gets all the gaps not covered by a block in `blocks` from `start` for `length` units.
def getgaps(start, length, blocks):
'''
Example:
>>> list(getgaps(0, 20, [(15,3), (6,2), (6,2), (1,2), (2,3), (11,2), (9,5)]))
[(0, 1), (5, 1), (8, 1), (14, 1), (18, 2)]
'''
# done this way to avoid mutating the original
blocks = sorted(blocks, key=lambda b: b[0])
end = start+length
for s, l in blocks:
if s > start:
yield (start, s-start)
start = s
if s+l > start:
start = s+l
if start < end:
yield (start, end-start)
class NewRootSeekableFieldSet(GenericFieldSet):
def seekBit(self, address, relative=True):
if not relative:
address -= self.absolute_address
if address < 0:
raise ParserError("Seek below field set start (%s.%s)" % divmod(address, 8))
self._current_size = address
return None
def seekByte(self, address, relative=True):
return self.seekBit(address*8, relative)
def _fixLastField(self):
"""
Try to fix last field when we know current field set size.
Returns new added field if any, or None.
"""
assert self._size is not None
# Stop parser
message = ["stop parser"]
self._field_generator = None
# If last field is too big, delete it
while self._size < self._current_size:
field = self._deleteField(len(self._fields)-1)
message.append("delete field %s" % field.path)
assert self._current_size <= self._size
blocks = [(x.absolute_address, x.size) for x in self._fields]
fields = []
for start, length in getgaps(self.absolute_address, self._size, blocks):
self.seekBit(start, relative=False)
field = createRawField(self, length, "unparsed[]")
self.setUniqueFieldName(field)
self._fields.append(field.name, field)
fields.append(field)
message.append("found unparsed segment: start %s, length %s" % (start, length))
self.seekBit(self._size, relative=False)
message = ", ".join(message)
if fields:
self.warning("[Autofix] Fix parser error: " + message)
return fields
def _stopFeeding(self):
new_field = None
if self._size is None:
if self._parent:
self._size = self._current_size
new_field = self._fixLastField()
self._field_generator = None
return new_field
class NewSeekableFieldSet(NewRootSeekableFieldSet):
def __init__(self, parent, name, description=None, size=None):
assert issubclass(parent.__class__, BasicFieldSet)
NewRootSeekableFieldSet.__init__(self, parent, name, parent.stream, description, size) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.standalone.fir.test.cases.generated.cases.symbols;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators.AnalysisApiFirStandaloneModeTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.symbols.AbstractSymbolByJavaPsiTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/symbols/symbolByJavaPsi")
@TestDataPath("$PROJECT_ROOT")
public class FirStandaloneNormalAnalysisSourceModuleSymbolByJavaPsiTestGenerated extends AbstractSymbolByJavaPsiTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirStandaloneModeTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Standalone
)
);
}
@Test
public void testAllFilesPresentInSymbolByJavaPsi() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/symbols/symbolByJavaPsi"), Pattern.compile("^(.+)\\.kt$"), null, true, "withTestCompilerPluginEnabled");
}
@Test
@TestMetadata("javaConstructor.kt")
public void testJavaConstructor() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaConstructor.kt");
}
@Test
@TestMetadata("javaField.kt")
public void testJavaField() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaField.kt");
}
@Test
@TestMetadata("javaFieldFromSuperclass.kt")
public void testJavaFieldFromSuperclass() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaFieldFromSuperclass.kt");
}
@Test
@TestMetadata("javaGetter.kt")
public void testJavaGetter() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaGetter.kt");
}
@Test
@TestMetadata("javaGetterImplementingKotlinProperty.kt")
public void testJavaGetterImplementingKotlinProperty() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaGetterImplementingKotlinProperty.kt");
}
@Test
@TestMetadata("javaInterfaceOneMethod.kt")
public void testJavaInterfaceOneMethod() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaInterfaceOneMethod.kt");
}
@Test
@TestMetadata("javaInterfaceOneMethodFromLibrary.kt")
public void testJavaInterfaceOneMethodFromLibrary() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaInterfaceOneMethodFromLibrary.kt");
}
@Test
@TestMetadata("javaInterfaceTwoMethods.kt")
public void testJavaInterfaceTwoMethods() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaInterfaceTwoMethods.kt");
}
@Test
@TestMetadata("javaInterfaceTwoMethodsFromLibrary.kt")
public void testJavaInterfaceTwoMethodsFromLibrary() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaInterfaceTwoMethodsFromLibrary.kt");
}
@Test
@TestMetadata("javaSetter.kt")
public void testJavaSetter() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaSetter.kt");
}
@Test
@TestMetadata("javaSetterImplementingKotlinProperty.kt")
public void testJavaSetterImplementingKotlinProperty() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaSetterImplementingKotlinProperty.kt");
}
@Test
@TestMetadata("javaStaticField.kt")
public void testJavaStaticField() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaStaticField.kt");
}
@Test
@TestMetadata("javaStaticFieldFromSuperclass.kt")
public void testJavaStaticFieldFromSuperclass() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/javaStaticFieldFromSuperclass.kt");
}
@Test
@TestMetadata("packagePrivateProtectedClass.kt")
public void testPackagePrivateProtectedClass() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedClass.kt");
}
@Test
@TestMetadata("packagePrivateProtectedField.kt")
public void testPackagePrivateProtectedField() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedField.kt");
}
@Test
@TestMetadata("packagePrivateProtectedMethod.kt")
public void testPackagePrivateProtectedMethod() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedMethod.kt");
}
@Test
@TestMetadata("packagePrivateProtectedStaticClass.kt")
public void testPackagePrivateProtectedStaticClass() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedStaticClass.kt");
}
@Test
@TestMetadata("packagePrivateProtectedStaticField.kt")
public void testPackagePrivateProtectedStaticField() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedStaticField.kt");
}
@Test
@TestMetadata("packagePrivateProtectedStaticMethod.kt")
public void testPackagePrivateProtectedStaticMethod() {
runTest("analysis/analysis-api/testData/symbols/symbolByJavaPsi/packagePrivateProtectedStaticMethod.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-standalone/tests-gen/org/jetbrains/kotlin/analysis/api/standalone/fir/test/cases/generated/cases/symbols/FirStandaloneNormalAnalysisSourceModuleSymbolByJavaPsiTestGenerated.java |
//! Adaptors from `AsyncRead`/`AsyncWrite` to Stream/Sink
//!
//! Raw I/O objects work with byte sequences, but higher-level code usually
//! wants to batch these into meaningful chunks, called "frames".
//!
//! This module contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as transports.
//!
//! # Example encoding using `LinesCodec`
//!
//! The following example demonstrates how to use a codec such as [`LinesCodec`] to
//! write framed data. [`FramedWrite`] can be used to achieve this. Data sent to
//! [`FramedWrite`] are first framed according to a specific codec, and then sent to
//! an implementor of [`AsyncWrite`].
//!
//! ```
//! use futures::sink::SinkExt;
//! use tokio_util::codec::LinesCodec;
//! use tokio_util::codec::FramedWrite;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let buffer = Vec::new();
//! let messages = vec!["Hello", "World"];
//! let encoder = LinesCodec::new();
//!
//! // FramedWrite is a sink which means you can send values into it
//! // asynchronously.
//! let mut writer = FramedWrite::new(buffer, encoder);
//!
//! // To be able to send values into a FramedWrite, you need to bring the
//! // `SinkExt` trait into scope.
//! writer.send(messages[0]).await.unwrap();
//! writer.send(messages[1]).await.unwrap();
//!
//! let buffer = writer.get_ref();
//!
//! assert_eq!(buffer.as_slice(), "Hello\nWorld\n".as_bytes());
//! # }
//!```
//!
//! # Example decoding using `LinesCodec`
//! The following example demonstrates how to use a codec such as [`LinesCodec`] to
//! read a stream of framed data. [`FramedRead`] can be used to achieve this. [`FramedRead`]
//! will keep reading from an [`AsyncRead`] implementor until a whole frame, according to a codec,
//! can be parsed.
//!
//!```
//! use tokio_stream::StreamExt;
//! use tokio_util::codec::LinesCodec;
//! use tokio_util::codec::FramedRead;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let message = "Hello\nWorld".as_bytes();
//! let decoder = LinesCodec::new();
//!
//! // FramedRead can be used to read a stream of values that are framed according to
//! // a codec. FramedRead will read from its input (here `buffer`) until a whole frame
//! // can be parsed.
//! let mut reader = FramedRead::new(message, decoder);
//!
//! // To read values from a FramedRead, you need to bring the
//! // `StreamExt` trait into scope.
//! let frame1 = reader.next().await.unwrap().unwrap();
//! let frame2 = reader.next().await.unwrap().unwrap();
//!
//! assert!(reader.next().await.is_none());
//! assert_eq!(frame1, "Hello");
//! assert_eq!(frame2, "World");
//! # }
//! ```
//!
//! # The Decoder trait
//!
//! A [`Decoder`] is used together with [`FramedRead`] or [`Framed`] to turn an
//! [`AsyncRead`] into a [`Stream`]. The job of the decoder trait is to specify
//! how sequences of bytes are turned into a sequence of frames, and to
//! determine where the boundaries between frames are. The job of the
//! `FramedRead` is to repeatedly switch between reading more data from the IO
//! resource, and asking the decoder whether we have received enough data to
//! decode another frame of data.
//!
//! The main method on the `Decoder` trait is the [`decode`] method. This method
//! takes as argument the data that has been read so far, and when it is called,
//! it will be in one of the following situations:
//!
//! 1. The buffer contains less than a full frame.
//! 2. The buffer contains exactly a full frame.
//! 3. The buffer contains more than a full frame.
//!
//! In the first situation, the decoder should return `Ok(None)`.
//!
//! In the second situation, the decoder should clear the provided buffer and
//! return `Ok(Some(the_decoded_frame))`.
//!
//! In the third situation, the decoder should use a method such as [`split_to`]
//! or [`advance`] to modify the buffer such that the frame is removed from the
//! buffer, but any data in the buffer after that frame should still remain in
//! the buffer. The decoder should also return `Ok(Some(the_decoded_frame))` in
//! this case.
//!
//! Finally the decoder may return an error if the data is invalid in some way.
//! The decoder should _not_ return an error just because it has yet to receive
//! a full frame.
//!
//! It is guaranteed that, from one call to `decode` to another, the provided
//! buffer will contain the exact same data as before, except that if more data
//! has arrived through the IO resource, that data will have been appended to
//! the buffer. This means that reading frames from a `FramedRead` is
//! essentially equivalent to the following loop:
//!
//! ```no_run
//! use tokio::io::AsyncReadExt;
//! # // This uses async_stream to create an example that compiles.
//! # fn foo() -> impl futures_core::Stream<Item = std::io::Result<bytes::BytesMut>> { async_stream::try_stream! {
//! # use tokio_util::codec::Decoder;
//! # let mut decoder = tokio_util::codec::BytesCodec::new();
//! # let io_resource = &mut &[0u8, 1, 2, 3][..];
//!
//! let mut buf = bytes::BytesMut::new();
//! loop {
//! // The read_buf call will append to buf rather than overwrite existing data.
//! let len = io_resource.read_buf(&mut buf).await?;
//!
//! if len == 0 {
//! while let Some(frame) = decoder.decode_eof(&mut buf)? {
//! yield frame;
//! }
//! break;
//! }
//!
//! while let Some(frame) = decoder.decode(&mut buf)? {
//! yield frame;
//! }
//! }
//! # }}
//! ```
//! The example above uses `yield` whenever the `Stream` produces an item.
//!
//! ## Example decoder
//!
//! As an example, consider a protocol that can be used to send strings where
//! each frame is a four byte integer that contains the length of the frame,
//! followed by that many bytes of string data. The decoder fails with an error
//! if the string data is not valid utf-8 or too long.
//!
//! Such a decoder can be written like this:
//! ```
//! use tokio_util::codec::Decoder;
//! use bytes::{BytesMut, Buf};
//!
//! struct MyStringDecoder {}
//!
//! const MAX: usize = 8 * 1024 * 1024;
//!
//! impl Decoder for MyStringDecoder {
//! type Item = String;
//! type Error = std::io::Error;
//!
//! fn decode(
//! &mut self,
//! src: &mut BytesMut
//! ) -> Result<Option<Self::Item>, Self::Error> {
//! if src.len() < 4 {
//! // Not enough data to read length marker.
//! return Ok(None);
//! }
//!
//! // Read length marker.
//! let mut length_bytes = [0u8; 4];
//! length_bytes.copy_from_slice(&src[..4]);
//! let length = u32::from_le_bytes(length_bytes) as usize;
//!
//! // Check that the length is not too large to avoid a denial of
//! // service attack where the server runs out of memory.
//! if length > MAX {
//! return Err(std::io::Error::new(
//! std::io::ErrorKind::InvalidData,
//! format!("Frame of length {} is too large.", length)
//! ));
//! }
//!
//! if src.len() < 4 + length {
//! // The full string has not yet arrived.
//! //
//! // We reserve more space in the buffer. This is not strictly
//! // necessary, but is a good idea performance-wise.
//! src.reserve(4 + length - src.len());
//!
//! // We inform the Framed that we need more bytes to form the next
//! // frame.
//! return Ok(None);
//! }
//!
//! // Use advance to modify src such that it no longer contains
//! // this frame.
//! let data = src[4..4 + length].to_vec();
//! src.advance(4 + length);
//!
//! // Convert the data to a string, or fail if it is not valid utf-8.
//! match String::from_utf8(data) {
//! Ok(string) => Ok(Some(string)),
//! Err(utf8_error) => {
//! Err(std::io::Error::new(
//! std::io::ErrorKind::InvalidData,
//! utf8_error.utf8_error(),
//! ))
//! },
//! }
//! }
//! }
//! ```
//!
//! # The Encoder trait
//!
//! An [`Encoder`] is used together with [`FramedWrite`] or [`Framed`] to turn
//! an [`AsyncWrite`] into a [`Sink`]. The job of the encoder trait is to
//! specify how frames are turned into a sequences of bytes. The job of the
//! `FramedWrite` is to take the resulting sequence of bytes and write it to the
//! IO resource.
//!
//! The main method on the `Encoder` trait is the [`encode`] method. This method
//! takes an item that is being written, and a buffer to write the item to. The
//! buffer may already contain data, and in this case, the encoder should append
//! the new frame to the buffer rather than overwrite the existing data.
//!
//! It is guaranteed that, from one call to `encode` to another, the provided
//! buffer will contain the exact same data as before, except that some of the
//! data may have been removed from the front of the buffer. Writing to a
//! `FramedWrite` is essentially equivalent to the following loop:
//!
//! ```no_run
//! use tokio::io::AsyncWriteExt;
//! use bytes::Buf; // for advance
//! # use tokio_util::codec::Encoder;
//! # async fn next_frame() -> bytes::Bytes { bytes::Bytes::new() }
//! # async fn no_more_frames() { }
//! # #[tokio::main] async fn main() -> std::io::Result<()> {
//! # let mut io_resource = tokio::io::sink();
//! # let mut encoder = tokio_util::codec::BytesCodec::new();
//!
//! const MAX: usize = 8192;
//!
//! let mut buf = bytes::BytesMut::new();
//! loop {
//! tokio::select! {
//! num_written = io_resource.write(&buf), if !buf.is_empty() => {
//! buf.advance(num_written?);
//! },
//! frame = next_frame(), if buf.len() < MAX => {
//! encoder.encode(frame, &mut buf)?;
//! },
//! _ = no_more_frames() => {
//! io_resource.write_all(&buf).await?;
//! io_resource.shutdown().await?;
//! return Ok(());
//! },
//! }
//! }
//! # }
//! ```
//! Here the `next_frame` method corresponds to any frames you write to the
//! `FramedWrite`. The `no_more_frames` method corresponds to closing the
//! `FramedWrite` with [`SinkExt::close`].
//!
//! ## Example encoder
//!
//! As an example, consider a protocol that can be used to send strings where
//! each frame is a four byte integer that contains the length of the frame,
//! followed by that many bytes of string data. The encoder will fail if the
//! string is too long.
//!
//! Such an encoder can be written like this:
//! ```
//! use tokio_util::codec::Encoder;
//! use bytes::BytesMut;
//!
//! struct MyStringEncoder {}
//!
//! const MAX: usize = 8 * 1024 * 1024;
//!
//! impl Encoder<String> for MyStringEncoder {
//! type Error = std::io::Error;
//!
//! fn encode(&mut self, item: String, dst: &mut BytesMut) -> Result<(), Self::Error> {
//! // Don't send a string if it is longer than the other end will
//! // accept.
//! if item.len() > MAX {
//! return Err(std::io::Error::new(
//! std::io::ErrorKind::InvalidData,
//! format!("Frame of length {} is too large.", item.len())
//! ));
//! }
//!
//! // Convert the length into a byte array.
//! // The cast to u32 cannot overflow due to the length check above.
//! let len_slice = u32::to_le_bytes(item.len() as u32);
//!
//! // Reserve space in the buffer.
//! dst.reserve(4 + item.len());
//!
//! // Write the length and string to the buffer.
//! dst.extend_from_slice(&len_slice);
//! dst.extend_from_slice(item.as_bytes());
//! Ok(())
//! }
//! }
//! ```
//!
//! [`AsyncRead`]: tokio::io::AsyncRead
//! [`AsyncWrite`]: tokio::io::AsyncWrite
//! [`Stream`]: futures_core::Stream
//! [`Sink`]: futures_sink::Sink
//! [`SinkExt`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html
//! [`SinkExt::close`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html#method.close
//! [`FramedRead`]: struct@crate::codec::FramedRead
//! [`FramedWrite`]: struct@crate::codec::FramedWrite
//! [`Framed`]: struct@crate::codec::Framed
//! [`Decoder`]: trait@crate::codec::Decoder
//! [`decode`]: fn@crate::codec::Decoder::decode
//! [`encode`]: fn@crate::codec::Encoder::encode
//! [`split_to`]: fn@bytes::BytesMut::split_to
//! [`advance`]: fn@bytes::Buf::advance
mod bytes_codec;
pub use self::bytes_codec::BytesCodec;
mod decoder;
pub use self::decoder::Decoder;
mod encoder;
pub use self::encoder::Encoder;
mod framed_impl;
#[allow(unused_imports)]
pub(crate) use self::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame};
mod framed;
pub use self::framed::{Framed, FramedParts};
mod framed_read;
pub use self::framed_read::FramedRead;
mod framed_write;
pub use self::framed_write::FramedWrite;
pub mod length_delimited;
pub use self::length_delimited::{LengthDelimitedCodec, LengthDelimitedCodecError};
mod lines_codec;
pub use self::lines_codec::{LinesCodec, LinesCodecError};
mod any_delimiter_codec;
pub use self::any_delimiter_codec::{AnyDelimiterCodec, AnyDelimiterCodecError}; | rust | github | https://github.com/tokio-rs/tokio | tokio-util/src/codec/mod.rs |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_TENSOR_HANDLE_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_TENSOR_HANDLE_H_
#include <algorithm>
#include <cstddef>
#include <memory>
#include <queue>
#include <string>
#include <unordered_map>
#include <variant>
#include <vector>
// clang-format off
// Required for IS_MOBILE_PLATFORM
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/platform.h"
// clang-format on
#include "absl/types/variant.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include "tensorflow/core/common_runtime/function.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h"
#endif // IS_MOBILE_PLATFORM
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
class EagerContext;
// Associates a Tensor and a Device, used in the eager runtime. Internal version
// of the TFE_TensorHandle struct and the python EagerTensor class
// (unrelated to python TensorHandle).
class TensorHandle : public ImmediateExecutionTensorHandle {
// TensorHandle for dtype != DT_RESOURCE
TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
Device* resource_device, EagerContext* ctx);
// TensorHandle for dtype == DT_RESOURCE
TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
EagerContext* ctx);
TensorHandle(Device* d, Device* op_device, Device* resource_device,
tensorflow::DataType dtype, EagerContext* ctx);
#if !defined(IS_MOBILE_PLATFORM)
TensorHandle(int64_t op_id, int32_t output_num,
const std::string& remote_task, tensorflow::DataType dtype,
Device* device, EagerContext* ctx, bool unknown_device);
TensorHandle(int64_t op_id, int32_t output_num, tensorflow::DataType dtype,
Device* device, bool is_ready, EagerContext* ctx);
#endif // IS_MOBILE_PLATFORM
public:
// TensorHandle with no assigned device
static TensorHandle* CreateLocalHandle(const tensorflow::Tensor& t);
static TensorHandle* CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device, EagerContext* ctx);
static TensorHandle* CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device,
Device* resource_device,
EagerContext* ctx);
static TensorHandle* CreateEmptyLocalHandle(Device* d, Device* op_device,
Device* resource_device,
tensorflow::DataType dtype,
EagerContext* ctx);
// Create a handle which packs the given handles of the same dtype and shape.
// If handles are on different devices, assign the packed handle to a
// CompositeDevice.
//
// The new tensor handle shares ownership of the given handle: their reference
// count will be increased by one after a call to `CreatePackedHandle`.
// TODO(b/170414377): Use `TensorHandlePtr` instead.
static absl::Status CreatePackedHandle(std::vector<TensorHandle*>&& handles,
tensorflow::DataType dtype,
const tensorflow::TensorShape& shape,
const std::string& device_name,
EagerContext* ctx,
TensorHandle** packed_handle);
static absl::Status CreatePackedHandle(std::vector<TensorHandle*>&& handles,
EagerContext* ctx,
TensorHandle** packed_handle);
#if !defined(IS_MOBILE_PLATFORM)
// An unshaped remote handle refers to a tensor on a remote worker. It's not
// ready until the shape is set. It controls the lifetime of the remote
// tensor.
static TensorHandle* CreateUnshapedRemoteHandle(
int64_t op_id, int32_t output_num, const std::string& remote_task,
tensorflow::DataType dtype, Device* d, EagerContext* ctx,
bool unknown_device = false);
// A lazy remote handle refers to a tensor on a remote worker. The lifetime of
// the remote tensor is controlled by the remote worker, but not by the lazy
// remote handle. Lazy handles are normally created on a default function
// device.
static TensorHandle* CreateLazyRemoteHandle(int64_t op_id, int32_t output_num,
tensorflow::DataType dtype,
Device* d, bool is_ready,
EagerContext* ctx);
#endif // IS_MOBILE_PLATFORM
// Templated struct `AutoReleaser` in
// core/runtime_fallback/runtime/kernel_utils.h needs a Release() method
// defined.
void Release();
tensorflow::DataType DataType() const override;
absl::Status Shape(tensorflow::PartialTensorShape* shape) const override;
absl::Status NumDims(int* num_dims) const override;
absl::Status NumElements(int64_t* num_elements) const override;
absl::Status Dim(int dim_index, int64_t* dim) const override;
const char* DeviceName(absl::Status* status) const override;
const char* BackingDeviceName(absl::Status* status) const override;
const char* DeviceType(absl::Status* status) const override;
int DeviceId(absl::Status* status) const override;
AbstractTensorInterface* Resolve(absl::Status* status) override;
// Subclasses may return True to instruct the string formatter
// to use SummarizeValue instead of the NumPy formatter.
bool PreferCustomSummarizer() const override {
return dtype == DT_VARIANT || dtype == DT_RESOURCE;
}
// Return the Tensor from the default device.
absl::Status Tensor(const tensorflow::Tensor** t) const;
// Return the Tensor from the specified device which could be either the
// default device or a local mirror. The device pointer should be nullptr if
// requesting the HostCPU.
absl::Status TensorFromDevice(const Device* d,
const tensorflow::Tensor** t) const;
// Return the TensorValue from the specified device which could be either the
// default device or a local mirror. The device pointer should be nullptr if
// requesting the HostCPU.
absl::Status TensorValue(const Device* d, tensorflow::TensorValue* t);
Device* device() const { return device_; }
Device* op_device() const { return op_device_; }
Device* resource_device() const { return resource_device_; }
int64_t resource_remote_device_incarnation() const {
return resource_remote_device_incarnation_;
}
// If the devices are unknown at creation time, block until the actual devices
// are set (data is ready).
absl::Status WaitUnknownDevice() const;
Device* DeviceOrHostCPU(const EagerContext& ctx) const;
absl::Status Shape(tensorflow::TensorShape* shape);
absl::Status Unprotect(const Device* d);
// Checks if a mirror tensor exists for the specified device. Mirrors are only
// maintained for local devices, like CPUs & GPUs. Note a mirror may be empty,
// as it is still to be set by an async operation.
bool HasLocalMirror(const Device* d) const;
// Add an empty mirror placeholder for the specified device. The expectation
// is this will be populated by a call to SetTensor.
absl::Status AddEmptyLocalMirror(const Device* d);
// Add a local mirror. This will fail if an empty local mirror was previously
// added. For that case, SetTensor should be used instead.
absl::Status AddLocalMirror(tensorflow::Tensor&& tensor, const Device* d);
#if !defined(IS_MOBILE_PLATFORM)
bool HasRemoteMirror(const Device* d, uint64_t context_view_id) const;
bool HasResourceShapeMirror(const Device* d, uint64_t context_view_id) const;
absl::Status AddUnshapedRemoteMirror(const Device* d, int64_t op_id,
int output_num,
const std::string& remote_task,
EagerContext* ctx);
absl::Status AddResourceShapeMirror(const Device* d, int64_t op_id,
int output_num, EagerContext* ctx);
// Return the op_id and output num if the handle refers to a remote tensor.
// If wait_until_ready is true, block until the remote tensor is ready on the
// given remote worker.
absl::Status RemoteAddress(const Device* d, bool wait_until_ready,
int64_t* op_id, int32_t* output_num) const;
// Called on an async remote tensor once it's shape has been determined. This
// transitions the tensor handle from a non-ready to a ready state by
// replacing the backing data abstraction to allow for the shape to be
// queried.
// creating a TensorHandle (e.g. a remote output of a remote function).
// This method or Poison must be called exactly once for remote tensors that
// were created without a known shape.
absl::Status SetRemoteShape(const TensorShape& shape, const Device* d,
uint64_t context_view_id);
// If op_device is not empty, reset the devices of a remote tensor which is
// created without known devices (e.g. function outputs).
absl::Status SetRemoteShapeAndDevice(const TensorShape& shape,
const Device* d,
uint64_t context_view_id,
std::string op_device);
// Poisons either this handle or a remote mirror with error `status`.
// Poisoning means that the handle will become ready and methods trying
// to access the remote shape will return this error `status`.
// Exactly one of SetRemoteShape or PoisonRemote methods must be called on a
// unshaped handle on a remote device.
void PoisonRemote(absl::Status status, const Device* d,
uint64_t context_view_id);
#endif
// Sets the `tensor` for this async non-ready handle making it ready.
// This method or Poison must be called exactly once for non-ready async
// handles to make them ready.
absl::Status SetTensor(tensorflow::Tensor&& tensor, const Device* d);
// Poisons either this handle or a local mirror with error `status`.
// Poisoning means that the handle will become ready and methods trying
// to access the actual tensor or shape will return this error `status`.
// Exactly one of SetTensor or Poison methods must be called on a non-ready
// tensor for a specific device.
void Poison(absl::Status status, const Device* d);
// TODO(b/154282629): Consider moving it to EagerContext.
// Copies to the tensor on the given device `d`, or to host iff `d` is null.
absl::Status CopyToDevice(const EagerContext& ctx, tensorflow::Device* d,
tensorflow::Tensor* output) const;
absl::Status InferenceShape(
shape_inference::InferenceContext* inference_context,
shape_inference::ShapeHandle* shape_handle);
void SetInferenceShape(shape_inference::InferenceContext* inference_context,
const shape_inference::ShapeHandle& shape_handle);
absl::Status CopyInferenceShape(TensorHandle* other);
// dtype for the handle. It must be the same as t.dtype() once the handle is
// ready.
const tensorflow::DataType dtype;
enum HandleType { LOCAL = 0, PACKED = 1, REMOTE = 2 };
HandleType Type() const;
std::string TypeString() const;
void SetResourceHandleDtypeAndShape(
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes);
// If this TensorHandle is 1) a local tensor, and 2) a resource handle,
// return data types and shapes of the underlying resource.
absl::Status GetResourceHandleDtypesAndShapes(
std::vector<DtypeAndPartialTensorShape>* result);
// Returns the number of packed handles. 0 if the handle type is not PACKED.
int NumPackedHandles() const;
// It's called on a packed TensorHandle. Extract a handle with the given
// index.
absl::Status ExtractPackedHandle(int index, TensorHandle** handle) const;
// For LLVM style RTTI.
static bool classof(const AbstractTensorHandle* ptr) {
return ptr->getKind() == kEager;
}
tensorflow::FullTypeDef FullType() const override { return full_type_; }
void SetFullType(FullTypeDef& full_type) { full_type_ = full_type; }
private:
friend class PackedTensorHandleTest;
TensorHandle(std::vector<TensorHandle*>&& handles, Device* device,
tensorflow::DataType dtype, const tensorflow::TensorShape& shape,
EagerContext* ctx);
~TensorHandle() override;
// The TensorHandleData can either represent a local or remote tensor handle.
// Further, it can be in a non-ready state. It would become ready with a call
// to either SetTensor or SetRemoteShape which replaces the underlying data
// with a ready version of the tensor handle data.
bool IsReady() const;
absl::Status WaitReady(const char* caller) const;
tensorflow::Device* device_;
// Device in which the op producing this tensor was executed. Equals to
// device_ for constant tensors.
// Can be nullptr if the op producing this tensor was a function executed
// with function library runtime.
tensorflow::Device* op_device_;
// If the tensor dtype is DT_RESOURCE, resource_device_ holds the device
// backing the resource. Else resource_device_ is nullptr.
tensorflow::Device* resource_device_;
// Incarnation ID of the resource device if it locates on a remote device, or
// 0 if it locates on a local device.
int64_t resource_remote_device_incarnation_;
// If true, the handle refers to a remote tensor which is created without
// known devices. The actual devices are set by SetRemoteShape. The devices
// should be accessed once the handle is ready.
const bool unknown_device_ = false;
mutable mutex mu_;
// Map of local mirrors. This can include both ready and non-ready mirrors.
std::unordered_map<const tensorflow::Device*, LocalTensorHandleData>
local_mirrors_ TF_GUARDED_BY(mu_);
#if !defined(IS_MOBILE_PLATFORM)
// TODO(yujingzhang): Remove resource_shape_mirrors_ once scalable per-replica
// variable is ready, since we could get the shape locally without remote copy
// then.
std::unordered_map<std::string, RemoteTensorHandleData>
resource_shape_mirrors_ TF_GUARDED_BY(mu_);
std::unordered_map<std::string, RemoteTensorHandleData> remote_mirrors_
TF_GUARDED_BY(mu_);
#endif
// `ctx` is only guaranteed to be set if the handle is not "ready". This is
// typically true when the handle was produced during async execution.
// `ctx` object is not owned and should outlive this handle.
//
// TODO(b/150614042): Reference count EagerContext to ensure that 'device_' of
// a TensorHandle does not outlive the EagerContext from which it came?
EagerContext* const ctx_;
// If this TensorHandle 1) is a local tensor, and 2) is a resource handle or
// refers to a remote resource handle, we store data types and shapes for
// the underlying resource.
std::vector<DtypeAndPartialTensorShape> handle_dtypes_and_shapes_;
// A handle data which refers to multiple TensorHandles of the same dtype and
// shape.
class PackedTensorHandleData {
public:
// Initialize handle data from list of tensor handles.
// Ownership of the tensor handles is shared between the
// `PackedTensorHandleData` and the caller (the reference count for the
// given handles is incremented).
// TODO(b/170414377): Use `TensorHandlePtr` instead.
PackedTensorHandleData(std::vector<TensorHandle*>&& handles,
const TensorShape& shape);
~PackedTensorHandleData();
absl::Status Shape(TensorShape* shape) const;
absl::Status NumDims(int* num_dims) const;
absl::Status Dim(int dim_index, int64_t* dim) const;
absl::Status NumElements(int64_t* num_elements) const;
absl::Status Unprotect();
bool IsReady() const;
absl::Status WaitReady(const char* caller) const;
void Poison(absl::Status status);
std::string DebugString() const;
// Number of packed handles.
int NumPackedHandles() const;
// Extract a handle on the given index.
absl::Status ExtractPackedHandle(int index, TensorHandle** handle) const;
private:
// TODO(b/170414377): Use `TensorHandlePtr` instead.
const std::vector<TensorHandle*> handles_;
const TensorShape shape_;
mutable mutex mu_;
absl::Status is_poisoned_ TF_GUARDED_BY(mu_);
};
// Does not need synchronization because it can be accessed only after
// WaitReady() has returned. At that point, data_ is immutable.
#if !defined(IS_MOBILE_PLATFORM)
std::variant<LocalTensorHandleData, PackedTensorHandleData,
RemoteTensorHandleData>
data_;
#else
absl::variant<LocalTensorHandleData, PackedTensorHandleData> data_;
#endif
PartialTensorShape inference_shape_;
FullTypeDef full_type_;
};
// Returns the device backing the resource. Else, returns nullptr.
Device* GetResourceDevice(const ResourceHandle& handle, EagerContext* ctx);
class TensorHandleInterface : public ImmediateExecutionTensorHandle {
public:
};
template <typename T>
inline TensorHandle* TensorHandleFromInterface(T* handle) {
return down_cast<TensorHandle*>(handle);
}
} // namespace tensorflow
#endif // TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_TENSOR_HANDLE_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/common_runtime/eager/tensor_handle.h |
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/op_registration/op_registration.h>
#include <torch/library.h>
namespace at {
// If an operator doesn't have a batching rule implemented then we fallback
// to this implementation. The fallback only works on out-of-place operators
// that return only tensors with new memory. (e.g., no in-place operators, no
// view operations).
//
// The fallback effectively takes all of the BatchedTensors in `stack`, slices
// them, and runs `op` on all of the corresponding slices to produce slices
// of the outputs. The output slices then get `torch.stack`ed to create the
// final returns.
//
// The performance of the fallback is not very good because it introduces an
// extra copy from stacking the sliced outputs. Because of this, we prefer to
// write batching rules for operators whenever possible.
void batchedTensorForLoopFallback(
const c10::OperatorHandle& op,
torch::jit::Stack* stack);
} // namespace at | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/LegacyBatchedFallback.h |
#!/usr/bin/env python
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = gr.sig_source_f(fs, gr.GR_SIN_WAVE, fi, 1)
fm = blks2.nbfm_tx (fs, 4*fs, max_dev=10000, tau=75e-6)
sigs.append(s)
fmtx.append(fm)
syntaps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = gr.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = blks2.pfb_channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = gr.head(gr.sizeof_gr_complex, N)
noise = gr.noise_source_c(gr.GR_GAUSSIAN, noise_level)
addnoise = gr.add_cc()
snk_synth = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(gr.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin.internals;
import org.apache.kafka.clients.MetadataUpdater;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestHeader;
import org.apache.kafka.common.requests.RequestUtils;
import org.apache.kafka.common.utils.LogContext;
import org.slf4j.Logger;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
/**
* Manages the metadata for KafkaAdminClient.
*
* This class is not thread-safe. It is only accessed from the AdminClient
* service thread (which also uses the NetworkClient).
*/
public class AdminMetadataManager {
private final Logger log;
/**
* The minimum amount of time that we should wait between subsequent
* retries, when fetching metadata.
*/
private final long refreshBackoffMs;
/**
* The minimum amount of time that we should wait before triggering an
* automatic metadata refresh.
*/
private final long metadataExpireMs;
/**
* True if we are communicating directly with the controller quorum as specified by KIP-919.
*/
private final boolean usingBootstrapControllers;
/**
* Used to update the NetworkClient metadata.
*/
private final AdminMetadataUpdater updater;
/**
* The current metadata state.
*/
private State state = State.QUIESCENT;
/**
* The time in wall-clock milliseconds when we last updated the metadata.
*/
private long lastMetadataUpdateMs = 0;
/**
* The time in wall-clock milliseconds when we last attempted to fetch new
* metadata.
*/
private long lastMetadataFetchAttemptMs = 0;
/**
* The time in wall-clock milliseconds when we started attempts to fetch metadata. If empty,
* metadata has not been requested. This is the start time based on which rebootstrap is
* triggered if metadata is not obtained for the configured rebootstrap trigger interval.
* Set to Optional.of(0L) to force rebootstrap immediately.
*/
private Optional<Long> metadataAttemptStartMs = Optional.empty();
/**
* The current cluster information.
*/
private Cluster cluster = Cluster.empty();
/**
* If this is non-null, it is a fatal exception that will terminate all attempts at communication.
*/
private ApiException fatalException = null;
/**
* The cluster with which the metadata was bootstrapped.
*/
private Cluster bootstrapCluster;
public class AdminMetadataUpdater implements MetadataUpdater {
@Override
public List<Node> fetchNodes() {
return cluster.nodes();
}
@Override
public boolean isUpdateDue(long now) {
return false;
}
@Override
public long maybeUpdate(long now) {
return Long.MAX_VALUE;
}
@Override
public void handleServerDisconnect(long now, String destinationId, Optional<AuthenticationException> maybeFatalException) {
maybeFatalException.ifPresent(AdminMetadataManager.this::updateFailed);
AdminMetadataManager.this.requestUpdate();
}
@Override
public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
// Do nothing
}
@Override
public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse) {
// Do nothing
}
@Override
public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) {
return AdminMetadataManager.this.needsRebootstrap(now, rebootstrapTriggerMs);
}
@Override
public void rebootstrap(long now) {
AdminMetadataManager.this.rebootstrap(now);
}
@Override
public void close() {
}
}
/**
* The current AdminMetadataManager state.
*/
enum State {
QUIESCENT,
UPDATE_REQUESTED,
UPDATE_PENDING
}
public AdminMetadataManager(
LogContext logContext,
long refreshBackoffMs,
long metadataExpireMs,
boolean usingBootstrapControllers
) {
this.log = logContext.logger(AdminMetadataManager.class);
this.refreshBackoffMs = refreshBackoffMs;
this.metadataExpireMs = metadataExpireMs;
this.usingBootstrapControllers = usingBootstrapControllers;
this.updater = new AdminMetadataUpdater();
}
public boolean usingBootstrapControllers() {
return usingBootstrapControllers;
}
public AdminMetadataUpdater updater() {
return updater;
}
public boolean isReady() {
if (fatalException != null) {
log.debug("Metadata is not usable: failed to get metadata.", fatalException);
throw fatalException;
}
if (cluster.nodes().isEmpty()) {
log.trace("Metadata is not ready: bootstrap nodes have not been " +
"initialized yet.");
return false;
}
if (cluster.isBootstrapConfigured()) {
log.trace("Metadata is not ready: we have not fetched metadata from " +
"the bootstrap nodes yet.");
return false;
}
log.trace("Metadata is ready to use.");
return true;
}
public Node controller() {
return cluster.controller();
}
public Node nodeById(int nodeId) {
return cluster.nodeById(nodeId);
}
public void requestUpdate() {
if (state == State.QUIESCENT) {
state = State.UPDATE_REQUESTED;
log.debug("Requesting metadata update.");
}
}
public void clearController() {
if (cluster.controller() != null) {
log.trace("Clearing cached controller node {}.", cluster.controller());
this.cluster = new Cluster(cluster.clusterResource().clusterId(),
cluster.nodes(),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
null);
}
}
/**
* Determine if the AdminClient should fetch new metadata.
*/
public long metadataFetchDelayMs(long now) {
switch (state) {
case QUIESCENT:
// Calculate the time remaining until the next periodic update.
// We want to avoid making many metadata requests in a short amount of time,
// so there is a metadata refresh backoff period.
return Math.max(delayBeforeNextAttemptMs(now), delayBeforeNextExpireMs(now));
case UPDATE_REQUESTED:
// Respect the backoff, even if an update has been requested
return delayBeforeNextAttemptMs(now);
default:
// An update is already pending, so we don't need to initiate another one.
return Long.MAX_VALUE;
}
}
private long delayBeforeNextExpireMs(long now) {
long timeSinceUpdate = now - lastMetadataUpdateMs;
return Math.max(0, metadataExpireMs - timeSinceUpdate);
}
private long delayBeforeNextAttemptMs(long now) {
long timeSinceAttempt = now - lastMetadataFetchAttemptMs;
return Math.max(0, refreshBackoffMs - timeSinceAttempt);
}
public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) {
return metadataAttemptStartMs.filter(startMs -> now - startMs > rebootstrapTriggerMs).isPresent();
}
/**
* Transition into the UPDATE_PENDING state. Updates lastMetadataFetchAttemptMs.
*/
public void transitionToUpdatePending(long now) {
this.state = State.UPDATE_PENDING;
this.lastMetadataFetchAttemptMs = now;
if (metadataAttemptStartMs.isEmpty())
metadataAttemptStartMs = Optional.of(now);
}
public void updateFailed(Throwable exception) {
// We depend on pending calls to request another metadata update
this.state = State.QUIESCENT;
if (RequestUtils.isFatalException(exception)) {
log.warn("Fatal error during metadata update", exception);
// avoid unchecked/unconfirmed cast to ApiException
if (exception instanceof ApiException) {
this.fatalException = (ApiException) exception;
}
if (exception instanceof UnsupportedVersionException) {
if (usingBootstrapControllers) {
log.warn("The remote node is not a CONTROLLER that supports the KIP-919 " +
"DESCRIBE_CLUSTER api.", exception);
} else {
log.warn("The remote node is not a BROKER that supports the METADATA api.", exception);
}
}
} else {
log.info("Metadata update failed", exception);
}
}
/**
* Receive new metadata, and transition into the QUIESCENT state.
* Updates lastMetadataUpdateMs, cluster, and authException.
*/
public void update(Cluster cluster, long now) {
if (cluster.isBootstrapConfigured()) {
log.debug("Setting bootstrap cluster metadata {}.", cluster);
bootstrapCluster = cluster;
} else {
log.debug("Updating cluster metadata to {}", cluster);
this.lastMetadataUpdateMs = now;
}
this.state = State.QUIESCENT;
this.fatalException = null;
this.metadataAttemptStartMs = Optional.empty();
if (!cluster.nodes().isEmpty()) {
this.cluster = cluster;
}
}
public void initiateRebootstrap() {
this.metadataAttemptStartMs = Optional.of(0L);
}
/**
* Rebootstrap metadata with the cluster previously used for bootstrapping.
*/
public void rebootstrap(long now) {
log.info("Rebootstrapping with {}", this.bootstrapCluster);
update(bootstrapCluster, now);
this.metadataAttemptStartMs = Optional.of(now);
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java |
doctests = """
Basic class construction.
>>> class C:
... def meth(self): print("Hello")
...
>>> C.__class__ is type
True
>>> a = C()
>>> a.__class__ is C
True
>>> a.meth()
Hello
>>>
Use *args notation for the bases.
>>> class A: pass
>>> class B: pass
>>> bases = (A, B)
>>> class C(*bases): pass
>>> C.__bases__ == bases
True
>>>
Use a trivial metaclass.
>>> class M(type):
... pass
...
>>> class C(metaclass=M):
... def meth(self): print("Hello")
...
>>> C.__class__ is M
True
>>> a = C()
>>> a.__class__ is C
True
>>> a.meth()
Hello
>>>
Use **kwds notation for the metaclass keyword.
>>> kwds = {'metaclass': M}
>>> class C(**kwds): pass
...
>>> C.__class__ is M
True
>>> a = C()
>>> a.__class__ is C
True
>>>
Use a metaclass with a __prepare__ static method.
>>> class M(type):
... @staticmethod
... def __prepare__(*args, **kwds):
... print("Prepare called:", args, kwds)
... return dict()
... def __new__(cls, name, bases, namespace, **kwds):
... print("New called:", kwds)
... return type.__new__(cls, name, bases, namespace)
... def __init__(cls, *args, **kwds):
... pass
...
>>> class C(metaclass=M):
... def meth(self): print("Hello")
...
Prepare called: ('C', ()) {}
New called: {}
>>>
Also pass another keyword.
>>> class C(object, metaclass=M, other="haha"):
... pass
...
Prepare called: ('C', (<class 'object'>,)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (object,)
True
>>> a = C()
>>> a.__class__ is C
True
>>>
Check that build_class doesn't mutate the kwds dict.
>>> kwds = {'metaclass': type}
>>> class C(**kwds): pass
...
>>> kwds == {'metaclass': type}
True
>>>
Use various combinations of explicit keywords and **kwds.
>>> bases = (object,)
>>> kwds = {'metaclass': M, 'other': 'haha'}
>>> class C(*bases, **kwds): pass
...
Prepare called: ('C', (<class 'object'>,)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (object,)
True
>>> class B: pass
>>> kwds = {'other': 'haha'}
>>> class C(B, metaclass=M, *bases, **kwds): pass
...
Prepare called: ('C', (<class 'test.test_metaclass.B'>, <class 'object'>)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (B, object)
True
>>>
Check for duplicate keywords.
>>> class C(metaclass=type, metaclass=type): pass
...
Traceback (most recent call last):
[...]
SyntaxError: keyword argument repeated
>>>
Another way.
>>> kwds = {'metaclass': type}
>>> class C(metaclass=type, **kwds): pass
...
Traceback (most recent call last):
[...]
TypeError: __build_class__() got multiple values for keyword argument 'metaclass'
>>>
Use a __prepare__ method that returns an instrumented dict.
>>> class LoggingDict(dict):
... def __setitem__(self, key, value):
... print("d[%r] = %r" % (key, value))
... dict.__setitem__(self, key, value)
...
>>> class Meta(type):
... @staticmethod
... def __prepare__(name, bases):
... return LoggingDict()
...
>>> class C(metaclass=Meta):
... foo = 2+2
... foo = 42
... bar = 123
...
d['__module__'] = 'test.test_metaclass'
d['__qualname__'] = 'C'
d['foo'] = 4
d['foo'] = 42
d['bar'] = 123
>>>
Use a metaclass that doesn't derive from type.
>>> def meta(name, bases, namespace, **kwds):
... print("meta:", name, bases)
... print("ns:", sorted(namespace.items()))
... print("kw:", sorted(kwds.items()))
... return namespace
...
>>> class C(metaclass=meta):
... a = 42
... b = 24
...
meta: C ()
ns: [('__module__', 'test.test_metaclass'), ('__qualname__', 'C'), ('a', 42), ('b', 24)]
kw: []
>>> type(C) is dict
True
>>> print(sorted(C.items()))
[('__module__', 'test.test_metaclass'), ('__qualname__', 'C'), ('a', 42), ('b', 24)]
>>>
And again, with a __prepare__ attribute.
>>> def prepare(name, bases, **kwds):
... print("prepare:", name, bases, sorted(kwds.items()))
... return LoggingDict()
...
>>> meta.__prepare__ = prepare
>>> class C(metaclass=meta, other="booh"):
... a = 1
... a = 2
... b = 3
...
prepare: C () [('other', 'booh')]
d['__module__'] = 'test.test_metaclass'
d['__qualname__'] = 'C'
d['a'] = 1
d['a'] = 2
d['b'] = 3
meta: C ()
ns: [('__module__', 'test.test_metaclass'), ('__qualname__', 'C'), ('a', 2), ('b', 3)]
kw: [('other', 'booh')]
>>>
The default metaclass must define a __prepare__() method.
>>> type.__prepare__()
{}
>>>
Make sure it works with subclassing.
>>> class M(type):
... @classmethod
... def __prepare__(cls, *args, **kwds):
... d = super().__prepare__(*args, **kwds)
... d["hello"] = 42
... return d
...
>>> class C(metaclass=M):
... print(hello)
...
42
>>> print(C.hello)
42
>>>
Test failures in looking up the __prepare__ method work.
>>> class ObscureException(Exception):
... pass
>>> class FailDescr:
... def __get__(self, instance, owner):
... raise ObscureException
>>> class Meta(type):
... __prepare__ = FailDescr()
>>> class X(metaclass=Meta):
... pass
Traceback (most recent call last):
[...]
test.test_metaclass.ObscureException
"""
import sys
# Trace function introduces __locals__ which causes various tests to fail.
if hasattr(sys, 'gettrace') and sys.gettrace():
__test__ = {}
else:
__test__ = {'doctests' : doctests}
def test_main(verbose=False):
from test import support
from test import test_metaclass
support.run_doctest(test_metaclass, verbose)
if __name__ == "__main__":
test_main(verbose=True) | unknown | codeparrot/codeparrot-clean | ||
//go:build !enterprise
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package vault
import (
"context"
"time"
)
func (c *Core) UpdateTransformCallCounts(ctx context.Context, currentMonth time.Time) (uint64, error) {
// No-op in OSS
return 0, nil
}
func (c *Core) GetStoredTransformCallCounts(ctx context.Context, month time.Time) (uint64, error) {
return 0, nil
} | go | github | https://github.com/hashicorp/vault | vault/consumption_billing_util_stubs_oss.go |
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
)
class KrasViewIE(InfoExtractor):
IE_DESC = 'Красвью'
_VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)'
_TEST = {
'url': 'http://krasview.ru/video/512228',
'md5': '3b91003cf85fc5db277870c8ebd98eae',
'info_dict': {
'id': '512228',
'ext': 'mp4',
'title': 'Снег, лёд, заносы',
'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.',
'duration': 27,
'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': 'Not accessible from Travis CI server',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flashvars = json.loads(js_to_json(self._search_regex(
r'video_Init\(({.+?})', webpage, 'flashvars')))
video_url = flashvars['url']
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage)
duration = int_or_none(flashvars.get('duration'))
width = int_or_none(self._og_search_property(
'video:width', webpage, 'video width', default=None))
height = int_or_none(self._og_search_property(
'video:height', webpage, 'video height', default=None))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'width': width,
'height': height,
} | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
import unittest
from pants.base.scm_project_tree import ScmProjectTree
from pants.scm.git import Git
from pants_test.base.pants_ignore_test_base import PantsIgnoreTestBase
class ScmPantsIgnoreTest(unittest.TestCase, PantsIgnoreTestBase):
"""
Common test cases are defined in PantsIgnoreTestBase.
Special test cases can be defined here.
"""
def mk_project_tree(self, build_root, ignore_patterns=None):
return ScmProjectTree(build_root, Git(worktree=build_root), 'HEAD', ignore_patterns)
def setUp(self):
super(ScmPantsIgnoreTest, self).setUp()
self.prepare()
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'config', 'user.email', 'you@example.com'])
subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
subprocess.check_call(['git', 'add', '.'])
subprocess.check_call(['git', 'commit', '-m' 'initial commit'])
def tearDown(self):
super(ScmPantsIgnoreTest, self).tearDown()
self.cleanup() | unknown | codeparrot/codeparrot-clean | ||
// _sysconfig provides data for the Python sysconfig module
#ifndef Py_BUILD_CORE_BUILTIN
# define Py_BUILD_CORE_MODULE 1
#endif
#include "Python.h"
#include "pycore_importdl.h" // _PyImport_DynLoadFiletab
#include "pycore_long.h" // _PyLong_GetZero, _PyLong_GetOne
/*[clinic input]
module _sysconfig
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=0a7c02d3e212ac97]*/
#include "clinic/_sysconfig.c.h"
#ifdef MS_WINDOWS
static int
add_string_value(PyObject *dict, const char *key, const char *str_value)
{
PyObject *value = PyUnicode_FromString(str_value);
if (value == NULL) {
return -1;
}
int err = PyDict_SetItemString(dict, key, value);
Py_DECREF(value);
return err;
}
#endif
/*[clinic input]
@permit_long_summary
_sysconfig.config_vars
Returns a dictionary containing build variables intended to be exposed by sysconfig.
[clinic start generated code]*/
static PyObject *
_sysconfig_config_vars_impl(PyObject *module)
/*[clinic end generated code: output=9c41cdee63ea9487 input=fdda9cab12ca19fe]*/
{
PyObject *config = PyDict_New();
if (config == NULL) {
return NULL;
}
#ifdef MS_WINDOWS
if (add_string_value(config, "EXT_SUFFIX", PYD_TAGGED_SUFFIX) < 0) {
Py_DECREF(config);
return NULL;
}
if (add_string_value(config, "SOABI", PYD_SOABI) < 0) {
Py_DECREF(config);
return NULL;
}
#endif
#ifdef Py_GIL_DISABLED
PyObject *py_gil_disabled = _PyLong_GetOne();
#else
PyObject *py_gil_disabled = _PyLong_GetZero();
#endif
if (PyDict_SetItemString(config, "Py_GIL_DISABLED", py_gil_disabled) < 0) {
Py_DECREF(config);
return NULL;
}
#ifdef Py_DEBUG
PyObject *py_debug = _PyLong_GetOne();
#else
PyObject *py_debug = _PyLong_GetZero();
#endif
if (PyDict_SetItemString(config, "Py_DEBUG", py_debug) < 0) {
Py_DECREF(config);
return NULL;
}
return config;
}
PyDoc_STRVAR(sysconfig__doc__,
"A helper for the sysconfig module.");
static struct PyMethodDef sysconfig_methods[] = {
_SYSCONFIG_CONFIG_VARS_METHODDEF
{NULL, NULL}
};
static PyModuleDef_Slot sysconfig_slots[] = {
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
static PyModuleDef sysconfig_module = {
.m_base = PyModuleDef_HEAD_INIT,
.m_name = "_sysconfig",
.m_doc = sysconfig__doc__,
.m_methods = sysconfig_methods,
.m_slots = sysconfig_slots,
};
PyMODINIT_FUNC
PyInit__sysconfig(void)
{
return PyModuleDef_Init(&sysconfig_module);
} | c | github | https://github.com/python/cpython | Modules/_sysconfig.c |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"bytes"
"fmt"
"strings"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/states/statefile"
"github.com/hashicorp/terraform/internal/states/statemgr"
)
// StatePullCommand is a Command implementation that allows downloading
// and outputting state information from remote state.
type StatePullCommand struct {
Meta
StateMeta
}
func (c *StatePullCommand) Run(args []string) int {
args = c.Meta.process(args)
cmdFlags := c.Meta.defaultFlagSet("state pull")
if err := cmdFlags.Parse(args); err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error()))
return 1
}
if diags := c.Meta.checkRequiredVersion(); diags != nil {
c.showDiagnostics(diags)
return 1
}
// Load the backend
view := arguments.ViewHuman
b, diags := c.backend(".", view)
if diags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
// This is a read-only command
c.ignoreRemoteVersionConflict(b)
// Get the state manager for the current workspace
env, err := c.Workspace()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err))
return 1
}
stateMgr, sDiags := b.StateMgr(env)
if sDiags.HasErrors() {
c.Ui.Error(fmt.Sprintf(errStateLoadingState, sDiags.Err()))
return 1
}
if err := stateMgr.RefreshState(); err != nil {
c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err))
return 1
}
// Get a statefile object representing the latest snapshot
stateFile := statemgr.Export(stateMgr)
if stateFile != nil { // we produce no output if the statefile is nil
var buf bytes.Buffer
err = statefile.Write(stateFile, &buf)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err))
return 1
}
c.Ui.Output(buf.String())
}
return 0
}
func (c *StatePullCommand) Help() string {
helpText := `
Usage: terraform [global options] state pull [options]
Pull the state from its location, upgrade the local copy, and output it
to stdout.
This command "pulls" the current state and outputs it to stdout.
As part of this process, Terraform will upgrade the state format of the
local copy to the current version.
The primary use of this is for state stored remotely. This command
will still work with local state but is less useful for this.
`
return strings.TrimSpace(helpText)
}
func (c *StatePullCommand) Synopsis() string {
return "Pull current state and output to stdout"
} | go | github | https://github.com/hashicorp/terraform | internal/command/state_pull.go |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Monolog\Handler\FingersCrossed;
use Monolog\Handler\FingersCrossed\ActivationStrategyInterface;
use Monolog\LogRecord;
use Symfony\Component\HttpFoundation\RequestStack;
use Symfony\Component\HttpKernel\Exception\HttpExceptionInterface;
/**
* Activation strategy that ignores certain HTTP codes.
*
* @author Shaun Simmons <shaun@envysphere.com>
* @author Pierrick Vignand <pierrick.vignand@gmail.com>
*/
final class HttpCodeActivationStrategy implements ActivationStrategyInterface
{
/**
* @param array $exclusions each exclusion must have a "code" and "urls" keys
*/
public function __construct(
private RequestStack $requestStack,
private array $exclusions,
private ActivationStrategyInterface $inner,
) {
foreach ($exclusions as $exclusion) {
if (!\array_key_exists('code', $exclusion)) {
throw new \LogicException('An exclusion must have a "code" key.');
}
if (!\array_key_exists('urls', $exclusion)) {
throw new \LogicException('An exclusion must have a "urls" key.');
}
}
}
public function isHandlerActivated(LogRecord $record): bool
{
$isActivated = $this->inner->isHandlerActivated($record);
if (
$isActivated
&& isset($record->context['exception'])
&& $record->context['exception'] instanceof HttpExceptionInterface
&& ($request = $this->requestStack->getMainRequest())
) {
foreach ($this->exclusions as $exclusion) {
if ($record->context['exception']->getStatusCode() !== $exclusion['code']) {
continue;
}
if (\count($exclusion['urls'])) {
return !preg_match('{('.implode('|', $exclusion['urls']).')}i', $request->getPathInfo());
}
return false;
}
}
return $isActivated;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bridge/Monolog/Handler/FingersCrossed/HttpCodeActivationStrategy.php |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer) | unknown | codeparrot/codeparrot-clean | ||
{
"title": "V39 TimeSeriesTable Transformation Migration Test Dashboard",
"schemaVersion": 38,
"panels": [
{
"type": "table",
"title": "Panel with TimeSeriesTable Transformation - Single Stat",
"id": 1,
"transformations": [
{
"id": "timeSeriesTable",
"options": {
"refIdToStat": {
"A": "mean"
}
}
}
]
},
{
"type": "table",
"title": "Panel with TimeSeriesTable Transformation - Multiple Stats",
"id": 2,
"transformations": [
{
"id": "timeSeriesTable",
"options": {
"refIdToStat": {
"A": "mean",
"B": "max",
"C": "min",
"D": "sum"
}
}
}
]
},
{
"type": "graph",
"title": "Panel with TimeSeriesTable Transformation - Mixed with Other Transforms",
"id": 3,
"transformations": [
{
"id": "reduce",
"options": {
"reducers": ["mean"]
}
},
{
"id": "timeSeriesTable",
"options": {
"refIdToStat": {
"A": "last",
"B": "first"
}
}
},
{
"id": "organize",
"options": {
"excludeByName": {}
}
}
]
},
{
"type": "stat",
"title": "Panel with Non-TimeSeriesTable Transformation (Should Remain Unchanged)",
"id": 4,
"transformations": [
{
"id": "reduce",
"options": {
"reducers": ["mean", "max"]
}
}
]
},
{
"type": "table",
"title": "Panel with TimeSeriesTable - Empty RefIdToStat",
"id": 5,
"transformations": [
{
"id": "timeSeriesTable",
"options": {
"refIdToStat": {}
}
}
]
},
{
"type": "table",
"title": "Panel with TimeSeriesTable - No Options (Should Skip)",
"id": 6,
"transformations": [
{
"id": "timeSeriesTable"
}
]
},
{
"type": "table",
"title": "Panel with TimeSeriesTable - Invalid Options (Should Skip)",
"id": 7,
"transformations": [
{
"id": "timeSeriesTable",
"options": {
"someOtherOption": "value"
}
}
]
},
{
"type": "graph",
"title": "Panel with No Transformations (Should Remain Unchanged)",
"id": 8
},
{
"type": "row",
"title": "Row with Nested Panels Having TimeSeriesTable Transformations",
"id": 9,
"collapsed": false,
"panels": [
{
"type": "table",
"title": "Nested Panel with TimeSeriesTable",
"id": 10,
"transformations": [
{
"id": "timeSeriesTable",
"options": {
"refIdToStat": {
"NestedA": "median",
"NestedB": "stdDev"
}
}
}
]
}
]
}
]
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/input/v39.transform_timeseries_table.json |
# -*- coding: latin-1 -*-
import xbmcgui
class DialogProgress:
def __init__(self):
self.dlg = xbmcgui.DialogProgress()
self.__reset__()
def __reset__(self):
self.head = ''
self.firstline = ''
self.secondline = None
self.thirdline = None
self.percent = 0
def isCanceled(self):
return self.dlg.iscanceled()
def update(self, percent=None, firstline=None, secondline=None, thirdline=None):
if firstline:
self.firstline = firstline
if secondline:
self.secondline = secondline
if thirdline:
self.thirdline = thirdline
if percent:
self.percent = percent
if self.secondline and self.thirdline:
self.dlg.update(self.percent, self.firstline, self.secondline, self.thirdline)
elif self.secondline:
self.dlg.update(self.percent, self.firstline, self.secondline)
else:
self.dlg.update(self.percent, self.firstline)
def create(self, head, firstline = None, secondline=None, thirdline=None):
if firstline:
self.firstline = firstline
if secondline:
self.secondline = secondline
if thirdline:
self.thirdline = thirdline
if self.secondline and self.thirdline:
self.dlg.create(head, self.firstline, self.secondline, self.thirdline)
elif self.secondline:
self.dlg.create(head, self.firstline, self.secondline)
else:
self.dlg.create(head, self.firstline)
def close(self):
self.dlg.close()
self.__reset__() | unknown | codeparrot/codeparrot-clean | ||
DOCUMENTATION:
name: subset
author: Ansible Core
version_added: "2.4"
aliases: [issubset]
short_description: is the list a subset of this other list
description:
- Validate if the first list is a sub set (is included) of the second list.
options:
_input:
description: List.
type: list
elements: raw
required: True
_superset:
description: List to test against.
type: list
elements: raw
required: True
EXAMPLES: |
big: [1,2,3,4,5]
small: [3,4]
issmallinbig: '{{ small is subset(big) }}'
RETURN:
_value:
description: Returns V(True) if the specified list is a subset of the provided list, V(False) otherwise.
type: boolean | unknown | github | https://github.com/ansible/ansible | lib/ansible/plugins/test/issubset.yml |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# TODO:
# Ability to set CPU/Memory reservations
try:
import json
except ImportError:
import simplejson as json
HAS_PYSPHERE = False
try:
from pysphere import VIServer, VIProperty, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
from pysphere import VIException, VIApiException, FaultTypes
HAS_PYSPHERE = True
except ImportError:
pass
DOCUMENTATION = '''
---
module: vsphere_guest
short_description: Create/delete/manage a guest VM through VMware vSphere.
description:
- Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7
version_added: "1.6"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
guest:
description:
- The virtual server name you wish to manage.
required: true
user:
description:
- Username to connect to vcenter as.
required: true
default: null
password:
description:
- Password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
esxi:
description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
required: false
default: null
state:
description:
- Indicate desired state of the vm.
default: present
choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured']
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_hardware:
description:
- A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi'].
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
vm_hw_version:
description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
vmware_guest_facts:
description:
- Gather facts from vCenter on a particular VM
required: false
default: null
force:
description:
- Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy.
default: "no"
choices: [ "yes", "no" ]
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: Richard Hoop <wrhoop@gmail.com>
requirements: [ pysphere ]
'''
EXAMPLES = '''
# Create a new VM on an ESX server
# Returns changed = False when the VM already exists
# Returns changed = True and a adds ansible_facts from the new VM
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: powered_on
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Reconfigure the CPU and Memory on the newly created VM
# Will return the changes made
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: reconfigured
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 4096
num_cpus: 4
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
vmware_guest_facts: yes
# Typical output of a vsphere_facts run on a guest
- hw_eth0:
- addresstype: "assigned"
label: "Network adapter 1"
macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55"
summary: "VM Network"
hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest"
hw_memtotal_mb: 2048
hw_name: "centos64Guest"
hw_processor_count: 2
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
# Remove a vm from vSphere
# The VM must be powered_off of you need to use force to force a shutdown
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: absent
force: yes
'''
def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1):
# add a scsi controller
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation('add')
if type == "lsi":
# For RHEL5
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
elif type == "paravirtual":
# For RHEL6
scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
elif type == "lsi_sas":
scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def(
"scsi_ctrl").pyclass()
elif type == "bus_logic":
scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding scsi controller to vm spec. No scsi controller"
" type of: %s" % (type))
scsi_ctrl.set_element_busNumber(int(bus_num))
scsi_ctrl.set_element_key(int(disk_ctrl_key))
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
# Add the scsi controller to the VM spec.
devices.append(scsi_ctrl_spec)
return disk_ctrl_key
def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0):
# add a vmdk disk
# Verify the datastore exists
datastore_name, ds = find_datastore(module, s, datastore, config_target)
# create a new disk - file based - for the vm
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
"disk_backing").pyclass()
disk_backing.set_element_fileName(datastore_name)
disk_backing.set_element_diskMode("persistent")
if type != "thick":
disk_backing.set_element_thinProvisioned(1)
disk_ctlr.set_element_key(key)
disk_ctlr.set_element_controllerKey(int(disk_ctrl_key))
disk_ctlr.set_element_unitNumber(int(disk_number))
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(int(size))
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None):
# Add a cd-rom
# Make sure the datastore exists.
if vm_cd_iso_path:
iso_location = vm_cd_iso_path.split('/', 1)
datastore, ds = find_datastore(
module, s, iso_location[0], config_target)
iso_path = iso_location[1]
# find ide controller
ide_ctlr = None
for dev in default_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation('add')
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
if type == "iso":
iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass()
ds_ref = iso.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
iso.set_element_datastore(ds_ref)
iso.set_element_fileName("%s %s" % (datastore, iso_path))
cd_ctrl.set_element_backing(iso)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
elif type == "client":
client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("")
cd_ctrl.set_element_backing(client)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding cdrom of type %s to vm spec. "
" cdrom type can either be iso or client" % (type))
devices.append(cd_spec)
def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"):
# add a NIC
# Different network card types are: "VirtualE1000",
# "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2",
# "VirtualVmxnet3"
nic_spec = config.new_deviceChange()
nic_spec.set_element_operation("add")
if nic_type == "e1000":
nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
elif nic_type == "e1000e":
nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
elif nic_type == "pcnet32":
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet":
nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet2":
nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet3":
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding nic to vm spec. No nic type of: %s" %
(nic_type))
if network_type == "standard":
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
elif network_type == "dvs":
# Get the portgroup key
portgroupKey = find_portgroup_key(module, s, nfmor, network_name)
# Get the dvswitch uuid
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(portgroupKey)
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
else:
s.disconnect()
module.fail_json(
msg="Error adding nic backing to vm spec. No network type of:"
" %s" % (network_type))
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does.
ds = None
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
if not ds:
s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" %
(datastore))
datastore_name = "[%s]" % datastore
return datastore_name, ds
def find_portgroup_key(module, s, nfmor, network_name):
# Find a portgroups key given the portgroup name.
# Grab all the distributed virtual portgroup's names and key's.
dvpg_mors = s._retrieve_properties_traversal(
property_names=['name', 'key'],
from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# Get the correct portgroup managed object.
dvpg_mor = None
for dvpg in dvpg_mors:
if dvpg_mor:
break
for p in dvpg.PropSet:
if p.Name == "name" and p.Val == network_name:
dvpg_mor = dvpg
if dvpg_mor:
break
# If dvpg_mor is empty we didn't find the named portgroup.
if dvpg_mor is None:
s.disconnect()
module.fail_json(
msg="Could not find the distributed virtual portgroup named"
" %s" % network_name)
# Get the portgroup key
portgroupKey = None
for p in dvpg_mor.PropSet:
if p.Name == "key":
portgroupKey = p.Val
return portgroupKey
def find_dvswitch_uuid(module, s, nfmor, portgroupKey):
# Find a dvswitch's uuid given a portgroup key.
# Function searches all dvswitches in the datacenter to find the switch
# that has the portgroup key.
# Grab the dvswitch uuid and portgroup properties
dvswitch_mors = s._retrieve_properties_traversal(
property_names=['uuid', 'portgroup'],
from_node=nfmor, obj_type='DistributedVirtualSwitch')
dvswitch_mor = None
# Get the dvswitches managed object
for dvswitch in dvswitch_mors:
if dvswitch_mor:
break
for p in dvswitch.PropSet:
if p.Name == "portgroup":
pg_mors = p.Val.ManagedObjectReference
for pg_mor in pg_mors:
if dvswitch_mor:
break
key_mor = s._get_object_properties(
pg_mor, property_names=['key'])
for key in key_mor.PropSet:
if key.Val == portgroupKey:
dvswitch_mor = dvswitch
# Get the switches uuid
dvswitch_uuid = None
for p in dvswitch_mor.PropSet:
if p.Name == "uuid":
dvswitch_uuid = p.Val
return dvswitch_uuid
def spec_singleton(spec, request, vm):
if not spec:
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
return spec
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
if current_datastore_name in vm_disk['descriptor']:
id_list.append(vm_disk['device']['key'])
return id_list
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
changes = {}
request = VI.ReconfigVM_TaskRequestMsg()
shutdown = False
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
# Change Memory
if vm_hardware['memory_mb']:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not memoryHotAddEnabled:
shutdown = True
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
shutdown = True
else:
# Fail on no hot add and no force
if not memoryHotAddEnabled:
module.fail_json(
msg="memoryHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and memory shrink
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
module.fail_json(
msg="Cannot lower memory on a live VM. force is "
"required for shutdown")
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ====( Config Memory )====#
if vm_hardware['num_cpus']:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not cpuHotAddEnabled:
shutdown = True
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
shutdown = True
else:
# Fail on no hot add and no force
if not cpuHotAddEnabled:
module.fail_json(
msg="cpuHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and cpu shrink without hot remove
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
module.fail_json(
msg="Cannot lower CPU on a live VM without "
"cpuHotRemove. force is required for shutdown")
spec.set_element_numCPUs(int(vm_hardware['num_cpus']))
changes['cpu'] = vm_hardware['num_cpus']
if len(changes):
if shutdown and vm.is_powered_on():
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e)
)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off():
try:
vm.power_on(sync_run=True)
except Exception, e:
module.fail_json(
msg='Failed to power on vm %s : %s' % (guest, e)
)
vsphere_client.disconnect()
if changed:
module.exit_json(changed=True, changes=changes)
module.exit_json(changed=False)
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computerResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
# Get resource pool managed reference
# Requires that a cluster name be specified.
if resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
rpmor = crprops.resourcePool._obj
# CREATE VM CONFIGURATION
# get config target
request = VI.QueryConfigTargetRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval
# get default devices
request = VI.QueryConfigOptionRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval
default_devs = config_option.DefaultDevice
# add parameters to the create vm task
create_vm_request = VI.CreateVM_TaskRequestMsg()
config = create_vm_request.new_config()
if vm_hw_version:
config.set_element_version(vm_hw_version)
vmfiles = config.new_files()
datastore_name, ds = find_datastore(
module, vsphere_client, vm_disk['disk1']['datastore'], config_target)
vmfiles.set_element_vmPathName(datastore_name)
config.set_element_files(vmfiles)
config.set_element_name(guest)
if 'notes' in vm_extra_config:
config.set_element_annotation(vm_extra_config['notes'])
config.set_element_memoryMB(int(vm_hardware['memory_mb']))
config.set_element_numCPUs(int(vm_hardware['num_cpus']))
config.set_element_guestId(vm_hardware['osid'])
devices = []
# Attach all the hardware we want to the VM spec.
# Add a scsi controller to the VM spec.
disk_ctrl_key = add_scsi_controller(
module, vsphere_client, config, devices, vm_hardware['scsi'])
if vm_disk:
disk_num = 0
disk_key = 0
for disk in sorted(vm_disk.iterkeys()):
try:
datastore = vm_disk[disk]['datastore']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. datastore needs to be"
" specified." % disk)
try:
disksize = int(vm_disk[disk]['size_gb'])
# Convert the disk size to kiloboytes
disksize = disksize * 1024 * 1024
except (KeyError, ValueError):
vsphere_client.disconnect()
module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk)
try:
disktype = vm_disk[disk]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be"
" specified." % disk)
# Add the disk to the VM spec.
add_disk(
module, vsphere_client, config_target, config,
devices, datastore, disktype, disksize, disk_ctrl_key,
disk_num, disk_key)
disk_num = disk_num + 1
disk_key = disk_key + 1
if 'vm_cdrom' in vm_hardware:
cdrom_iso_path = None
cdrom_type = None
try:
cdrom_type = vm_hardware['vm_cdrom']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom type needs to be"
" specified." % vm_hardware['vm_cdrom'])
if cdrom_type == 'iso':
try:
cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom iso_path needs"
" to be specified." % vm_hardware['vm_cdrom'])
# Add a CD-ROM device to the VM.
add_cdrom(module, vsphere_client, config_target, config, devices,
default_devs, cdrom_type, cdrom_iso_path)
if vm_nic:
for nic in sorted(vm_nic.iterkeys()):
try:
nictype = vm_nic[nic]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be "
" specified." % nic)
try:
network = vm_nic[nic]['network']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network needs to be "
" specified." % nic)
try:
network_type = vm_nic[nic]['network_type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network_type needs to be "
" specified." % nic)
# Add the nic to the VM spec.
add_nic(module, vsphere_client, nfmor, config, devices,
nictype, network, network_type)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, vsphere_client)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error creating vm: %s" %
task.get_error_message())
else:
# We always need to get the vm because we are going to gather facts
vm = vsphere_client.get_vm_by_name(guest)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if vm_extra_config:
vm.set_extra_config(vm_extra_config)
# Power on the VM if it was requested
power_state(vm, state, True)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=gather_facts(vm),
changed=True,
changes="Created VM %s" % guest)
def delete_vm(vsphere_client, module, guest, vm, force):
try:
if vm.is_powered_on():
if force:
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e))
else:
module.fail_json(
msg='You must use either shut the vm down first or '
'use force ')
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = vsphere_client._proxy.Destroy_Task(request)._returnval
task = VITask(ret, vsphere_client)
# Wait for the task to finish
status = task.wait_for_state(
[task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error removing vm: %s %s" %
task.get_error_message())
module.exit_json(changed=True, changes="VM %s deleted" % guest)
except Exception, e:
module.fail_json(
msg='Failed to delete vm %s : %s' % (guest, e))
def power_state(vm, state, force):
"""
Correctly set the power status for a VM determined by the current and
requested states. force is forceful
"""
power_status = vm.get_status()
check_status = ' '.join(state.split("_")).upper()
# Need Force
if not force and power_status in [
'SUSPENDED', 'POWERING ON',
'RESETTING', 'BLOCKED ON MSG'
]:
return "VM is in %s power state. Force is required!" % power_status
# State is already true
if power_status == check_status:
return False
else:
try:
if state == 'powered_off':
vm.power_off(sync_run=True)
elif state == 'powered_on':
vm.power_on(sync_run=True)
elif state == 'restarted':
if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'):
vm.reset(sync_run=False)
else:
return "Cannot restart VM in the current state %s" \
% power_status
return True
except Exception, e:
return e
return False
def gather_facts(vm):
"""
Gather facts for VM directly from vsphere.
"""
vm.get_properties()
facts = {
'module_hw': True,
'hw_name': vm.properties.name,
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
}
ifidx = 0
for entry in vm.properties.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ifidx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
ifidx += 1
return facts
class DefaultVMConfig(object):
"""
Shallow and deep dict comparison for interfaces
"""
def __init__(self, check_dict, interface_dict):
self.check_dict, self.interface_dict = check_dict, interface_dict
self.set_current, self.set_past = set(
check_dict.keys()), set(interface_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
self.recursive_missing = None
def shallow_diff(self):
return self.set_past - self.intersect
def recursive_diff(self):
if not self.recursive_missing:
self.recursive_missing = []
for key, value in self.interface_dict.items():
if isinstance(value, dict):
for k, v in value.items():
if k in self.check_dict[key]:
if not isinstance(self.check_dict[key][k], v):
try:
if v == int:
self.check_dict[key][k] = int(self.check_dict[key][k])
elif v == basestring:
self.check_dict[key][k] = str(self.check_dict[key][k])
else:
raise ValueError
except ValueError:
self.recursive_missing.append((k, v))
else:
self.recursive_missing.append((k, v))
return self.recursive_missing
def config_check(name, passed, default, module):
"""
Checks that the dict passed for VM configuration matches the required
interface declared at the top of __main__
"""
diff = DefaultVMConfig(passed, default)
if len(diff.shallow_diff()):
module.fail_json(
msg="Missing required key/pair [%s]. %s must contain %s" %
(', '.join(diff.shallow_diff()), name, default))
if diff.recursive_diff():
module.fail_json(
msg="Config mismatch for %s on %s" %
(name, diff.recursive_diff()))
return True
def main():
vm = None
proto_vm_hardware = {
'memory_mb': int,
'num_cpus': int,
'scsi': basestring,
'osid': basestring
}
proto_vm_disk = {
'disk1': {
'datastore': basestring,
'size_gb': int,
'type': basestring
}
}
proto_vm_nic = {
'nic1': {
'type': basestring,
'network': basestring,
'network_type': basestring
}
}
proto_esxi = {
'datacenter': basestring,
'hostname': basestring
}
module = AnsibleModule(
argument_spec=dict(
vcenter_hostname=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=True, type='str'),
state=dict(
required=False,
choices=[
'powered_on',
'powered_off',
'present',
'absent',
'restarted',
'reconfigured'
],
default='present'),
vmware_guest_facts=dict(required=False, choices=BOOLEANS),
guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}),
vm_hardware=dict(required=False, type='dict', default={}),
vm_extra_config=dict(required=False, type='dict', default={}),
vm_hw_version=dict(required=False, default=None, type='str'),
resource_pool=dict(required=False, default=None, type='str'),
cluster=dict(required=False, default=None, type='str'),
force=dict(required=False, choices=BOOLEANS, default=False),
esxi=dict(required=False, type='dict', default={}),
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts']],
required_together=[
['state', 'force'],
[
'state',
'vm_disk',
'vm_nic',
'vm_hardware',
'esxi'
],
['resource_pool', 'cluster']
],
)
if not HAS_PYSPHERE:
module.fail_json(msg='pysphere module required')
vcenter_hostname = module.params['vcenter_hostname']
username = module.params['username']
password = module.params['password']
vmware_guest_facts = module.params['vmware_guest_facts']
state = module.params['state']
guest = module.params['guest']
force = module.params['force']
vm_disk = module.params['vm_disk']
vm_nic = module.params['vm_nic']
vm_hardware = module.params['vm_hardware']
vm_extra_config = module.params['vm_extra_config']
vm_hw_version = module.params['vm_hw_version']
esxi = module.params['esxi']
resource_pool = module.params['resource_pool']
cluster = module.params['cluster']
# CONNECT TO THE SERVER
viserver = VIServer()
try:
viserver.connect(vcenter_hostname, username, password)
except VIApiException, err:
module.fail_json(msg="Cannot connect to %s: %s" %
(vcenter_hostname, err))
# Check if the VM exists before continuing
try:
vm = viserver.get_vm_by_name(guest)
except Exception:
pass
if vm:
# Run for facts only
if vmware_guest_facts:
try:
module.exit_json(ansible_facts=gather_facts(vm))
except Exception, e:
module.fail_json(
msg="Fact gather failed with exception %s" % e)
# Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force)
# Failure
if isinstance(state_result, basestring):
module.fail_json(msg=state_result)
else:
module.exit_json(changed=state_result)
# Just check if there
elif state == 'present':
module.exit_json(changed=False)
# Fail on reconfig without params
elif state == 'reconfigured':
reconfigure_vm(
vsphere_client=viserver,
vm=vm,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
state=state,
force=force
)
elif state == 'absent':
delete_vm(
vsphere_client=viserver,
module=module,
guest=guest,
vm=vm,
force=force)
# VM doesn't exist
else:
# Fail for fact gather task
if vmware_guest_facts:
module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm"
% guest)
if state in ['restarted', 'reconfigured']:
module.fail_json(
msg="No such VM %s. States ["
"restarted, reconfigured] required an existing VM" % guest)
elif state == 'absent':
module.exit_json(changed=False, msg="vm %s not present" % guest)
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']:
# Check the guest_config
config_check("vm_disk", vm_disk, proto_vm_disk, module)
config_check("vm_nic", vm_nic, proto_vm_nic, module)
config_check("vm_hardware", vm_hardware, proto_vm_hardware, module)
config_check("esxi", esxi, proto_esxi, module)
create_vm(
vsphere_client=viserver,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
vm_hw_version=vm_hw_version,
state=state
)
viserver.disconnect()
module.exit_json(
changed=False,
vcenter=vcenter_hostname)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0) | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_cancel(osv.Model):
""" lunch cancel """
_name = 'lunch.cancel'
_description = 'cancel lunch order'
def cancel(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').cancel(cr, uid, ids, context=context) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.7
# TODO add error checking/doc strings
# TODO this needs more testing, especially testing change in core state
# figure out when to lock core
# how the fuk to import?
# get emitter isnt working. am i sure tmpname is ok to use?
# catch all kill signals?
# maybe each systemd service can map to multiple fireman services?
# Filter seems to be not working? Systemd updates my file descriptor
# even when filtered actions occur. Its fault. Wasting resources.
import systemd.journal as journal
import logging
import select
import os
import sys
sys.path.append("core")
import core_api as core
# Systemd journal object
global j
# Mapping from systemd service names to fireman service names
global services
services = {}
# File descriptor to select journal changes
global journal_fd
# File descriptor to select core changes
global core_fd
# Maps systemd service names to status
global service_statuses
service_statuses = {}
def update_services():
"""This function gets the service list from the core.
It should start everything with a clean slate to make sure
services are left in a consistent state.
"""
global services
global service_statuses
global journal_fd
global j
# Re-initialise
services = {}
service_statuses = {}
j = journal.Reader()
journal_fd = j.fileno()
j.this_boot()
j.this_machine()
# Only listen to messages from init. It's trustworthy.
j.add_match(_PID="1")
logging.debug("Getting services.")
service_tuples = core.get_service_names()
for name,systemd_name in service_tuples:
logging.debug("Service found. Name: " + name + ", systemd name: "
+ systemd_name)
# Store the service information
services[systemd_name] = name
# Add a filter. This is the efficient place to filter.
j.add_match(UNIT=systemd_name)
def startup():
global journal_fd
global core_fd
logging.debug("Listener started. Getting service list.")
core.set_master_config("core/master.conf")
# These two actions must occur "atomically"
core.get_lock()
update_services()
core_fd = core.get_service_emitter()
core.release_lock()
read_journal()
def cleanup():
global j
try:
core.get_lock()
core.drop_service_emitter(core_fd)
core.release_lock()
except:
pass
j.close()
def read_journal():
global j
global services
global core_fd
global journal_fd
j.process()
if(services != {}):
new_service_statuses = {}
for entry in j:
if entry['UNIT'] in services:
m = entry['MESSAGE']
logging.debug("Got journal message: " + m)
# Is this good? What if their output format changes?
action = m.split(None,1)
if action[0] in ["Starting","Started",
"Stopping","Stopped"]:
new_service_statuses[entry['UNIT']] = action[0]
logging.debug("It's a " + action[0] + " message.")
else:
logging.debug("Unknown message: " + entry['MESSAGE'])
else:
logging.debug("This shouldn't happen. Does it matter?")
# We stored all changes. Now we check if we need to update.
# This handles multiple changes well.
core.get_lock()
for s in new_service_statuses:
status = new_service_statuses[s]
if s in service_statuses:
# Previous status of this service
s_old = service_statuses[s]
else:
s_old = None
if((status in ["Starting","Started"])
and (s_old in ["Stopping","Stopped",None])):
logging.debug("Asking core to start " + services[s])
core.start_service(services[s])
elif((status in ["Stopping","Stopped"])
and (s_old in ["Starting","Started",None])):
logging.debug("Asking core to stop " + services[s])
core.stop_service(services[s])
# Update the status
service_statuses[s] = status
core.release_lock()
def body():
global j
global services
global core_fd
global journal_fd
ready = []
logging.debug("Entering listener body. Going to select")
while ready == []:
try:
ready,_,_ = select.select([journal_fd,core_fd],[],[])
except:
pass
for r in ready:
# Process journal
if r == journal_fd:
logging.debug("New journal entries!")
read_journal()
# Check for core changes
if r == core_fd:
logging.debug("New services!")
# We need to update services. First we must lock and read data.
core.get_lock()
while os.read(core_fd,1) != "":
pass
update_services()
core.release_lock()
read_journal()
logging.basicConfig(level=logging.DEBUG)
try:
startup()
while True:
body()
except Exception as e:
logging.debug("Service listener closing.")
cleanup()
raise
exit(0) | unknown | codeparrot/codeparrot-clean | ||
# Prometheus configuration for cockroach clusters.
# Requires prometheus 2.X
#
# Run with:
# $ prometheus -config.file=prometheus.yml
global:
scrape_interval: 10s
evaluation_interval: 10s
rule_files:
- "rules/alerts.rules.yml"
- "rules/aggregation.rules.yml"
# Alert manager running on the same host:
alerting:
alertmanagers:
- path_prefix: "/alertmanager/"
static_configs:
- targets:
- localhost:9093
scrape_configs:
- job_name: 'cockroachdb'
metrics_path: '/_status/vars'
# Insecure mode:
scheme: 'http'
# Secure mode:
# scheme: 'https'
tls_config:
insecure_skip_verify: true
static_configs:
- targets: ['localhost:8080']
labels:
cluster: 'my-cockroachdb-cluster' | unknown | github | https://github.com/cockroachdb/cockroach | monitoring/prometheus.yml |
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = Chi(1.5)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(
oneSample[0]), " last=", repr(oneSample[size - 1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
size = 100
for i in range(2):
msg = ''
if FittingTest.Kolmogorov(distribution.getSample(size), distribution).getBinaryQualityMeasure():
msg = "accepted"
else:
msg = "rejected"
print(
"Kolmogorov test for the generator, sample size=", size, " is", msg)
size *= 10
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", repr(point))
# Show PDF and CDF of point
eps = 1e-5
DDF = distribution.computeDDF(point)
print("ddf =", repr(DDF))
print("ddf (FD)=", repr(NumericalPoint(1, (distribution.computePDF(
point + NumericalPoint(1, eps)) - distribution.computePDF(point + NumericalPoint(1, -eps))) / (2.0 * eps))))
PDF = distribution.computePDF(point)
print("pdf = %.12g" % PDF)
print("pdf (FD)= %.9f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps), ))
CDF = distribution.computeCDF(point)
print("cdf= %.12g" % CDF)
CF = distribution.computeCharacteristicFunction(point[0])
print("characteristic function= (%.12g%+.12gj)" % (CF.real, CF.imag))
PDFgr = distribution.computePDFGradient(point)
print("pdf gradient =", repr(PDFgr))
PDFgrFD = NumericalPoint(1)
PDFgrFD[0] = (Chi(distribution.getNu() + eps).computePDF(point) -
Chi(distribution.getNu() - eps).computePDF(point)) / (2.0 * eps)
print("pdf gradient (FD)=", repr(PDFgrFD))
CDFgr = distribution.computeCDFGradient(point)
print("cdf gradient =", repr(CDFgr))
CDFgrFD = NumericalPoint(1)
CDFgrFD[0] = (Chi(distribution.getNu() + eps).computeCDF(point) -
Chi(distribution.getNu() - eps).computeCDF(point)) / (2.0 * eps)
print("cdf gradient (FD)=", repr(CDFgrFD))
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=", distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
# Specific to this distribution
nu = distribution.getNu()
print("nu=", nu)
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
except:
import sys
print("t_Chi_std.py", sys.exc_info()[0], sys.exc_info()[1]) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.LeaveGroupRequestData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.Readable;
import java.util.Collections;
import java.util.List;
public class LeaveGroupRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<LeaveGroupRequest> {
private final String groupId;
private final List<MemberIdentity> members;
public Builder(String groupId, List<MemberIdentity> members) {
this(groupId, members, ApiKeys.LEAVE_GROUP.oldestVersion(), ApiKeys.LEAVE_GROUP.latestVersion());
}
Builder(String groupId, List<MemberIdentity> members, short oldestVersion, short latestVersion) {
super(ApiKeys.LEAVE_GROUP, oldestVersion, latestVersion);
this.groupId = groupId;
this.members = members;
if (members.isEmpty()) {
throw new IllegalArgumentException("leaving members should not be empty");
}
}
/**
* Based on the request version to choose fields.
*/
@Override
public LeaveGroupRequest build(short version) {
final LeaveGroupRequestData data;
// Starting from version 3, all the leave group request will be in batch.
if (version >= 3) {
data = new LeaveGroupRequestData()
.setGroupId(groupId)
.setMembers(members);
} else {
if (members.size() != 1) {
throw new UnsupportedVersionException("Version " + version + " leave group request only " +
"supports single member instance than " + members.size() + " members");
}
data = new LeaveGroupRequestData()
.setGroupId(groupId)
.setMemberId(members.get(0).memberId());
}
return new LeaveGroupRequest(data, version);
}
@Override
public String toString() {
return "(type=LeaveGroupRequest" +
", groupId=" + groupId +
", members=" + MessageUtil.deepToString(members.iterator()) +
")";
}
}
private final LeaveGroupRequestData data;
private LeaveGroupRequest(LeaveGroupRequestData data, short version) {
super(ApiKeys.LEAVE_GROUP, version);
this.data = data;
}
@Override
public LeaveGroupRequestData data() {
return data;
}
public LeaveGroupRequestData normalizedData() {
if (version() >= 3) {
return data;
} else {
return new LeaveGroupRequestData()
.setGroupId(data.groupId())
.setMembers(Collections.singletonList(
new MemberIdentity().setMemberId(data.memberId())));
}
}
public List<MemberIdentity> members() {
// Before version 3, leave group request is still in single mode
return version() <= 2 ? Collections.singletonList(
new MemberIdentity()
.setMemberId(data.memberId())) : data.members();
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
LeaveGroupResponseData responseData = new LeaveGroupResponseData()
.setErrorCode(Errors.forException(e).code());
if (version() >= 1) {
responseData.setThrottleTimeMs(throttleTimeMs);
}
return new LeaveGroupResponse(responseData);
}
public static LeaveGroupRequest parse(Readable readable, short version) {
return new LeaveGroupRequest(new LeaveGroupRequestData(readable, version), version);
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupRequest.java |
from libsaas import http, parsers
from libsaas.services import base
from .resource import (
serialize_param, TrelloFieldMixin, TrelloFilterMixin,
TrelloResource, TrelloCollection,
TrelloReadonlyResource, TrelloReadonlyCollection)
class Board(TrelloReadonlyResource, TrelloFieldMixin):
path = 'board'
class Cards(TrelloReadonlyCollection, TrelloFilterMixin):
path = 'cards'
class CheckItems(TrelloCollection):
path = 'checkItems'
class CheckItem(TrelloResource):
path = 'checkItems'
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class Checklist(TrelloResource, TrelloFieldMixin):
path = 'checklists'
@base.apimethod
def get(self, **kwargs):
"""
Fetch a single object.
Upstream documentation:
https://trello.com/docs/api/checklist/index.html
"""
params = base.get_params(None, kwargs, serialize_param=serialize_param)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.resource(Board)
def board(self):
"""
Returns a single board
"""
return Board(self)
@base.resource(Cards)
def cards(self):
"""
Returns all cards
"""
return Cards(self)
@base.resource(CheckItems)
def checkitems(self):
"""
Returns all checkitems
"""
return CheckItems(self)
@base.resource(CheckItem)
def checkitem(self, checkitem_id):
"""
Returns a single checkitem
"""
return CheckItem(self, checkitem_id)
class Checklists(TrelloCollection):
path = 'checklists'
def get(self, *args, **kwargs):
raise base.MethodNotSupported() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"fmt"
"strings"
"testing"
"go.uber.org/zap"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
etcdctlcmd "go.etcd.io/etcd/etcdctl/v3/ctlv3/command"
"go.etcd.io/etcd/tests/v3/framework/config"
intf "go.etcd.io/etcd/tests/v3/framework/interfaces"
)
type integrationRunner struct{}
func NewIntegrationRunner() intf.TestRunner {
return &integrationRunner{}
}
func (e integrationRunner) TestMain(m *testing.M) {
testutil.MustTestMainWithLeakDetection(m)
}
func (e integrationRunner) BeforeTest(tb testing.TB) {
BeforeTest(tb)
}
func (e integrationRunner) NewCluster(ctx context.Context, tb testing.TB, opts ...config.ClusterOption) intf.Cluster {
var err error
cfg := config.NewClusterConfig(opts...)
integrationCfg := ClusterConfig{
Size: cfg.ClusterSize,
QuotaBackendBytes: cfg.QuotaBackendBytes,
DisableStrictReconfigCheck: !cfg.StrictReconfigCheck,
AuthToken: cfg.AuthToken,
SnapshotCount: cfg.SnapshotCount,
}
integrationCfg.ClientTLS, err = tlsInfo(tb, cfg.ClientTLS)
if err != nil {
tb.Fatalf("ClientTLS: %s", err)
}
integrationCfg.PeerTLS, err = tlsInfo(tb, cfg.PeerTLS)
if err != nil {
tb.Fatalf("PeerTLS: %s", err)
}
if cfg.ClusterContext != nil {
if ctx, ok := cfg.ClusterContext.(*ClusterContext); ok && ctx != nil {
integrationCfg.UseTCP = !ctx.UseUnix
integrationCfg.UseIP = !ctx.UseUnix
}
}
return &integrationCluster{
Cluster: NewCluster(tb, &integrationCfg),
t: tb,
ctx: ctx,
}
}
func tlsInfo(tb testing.TB, cfg config.TLSConfig) (*transport.TLSInfo, error) {
switch cfg {
case config.NoTLS:
return nil, nil
case config.AutoTLS:
tls, err := transport.SelfCert(zap.NewNop(), tb.TempDir(), []string{"localhost"}, 1)
if err != nil {
return nil, fmt.Errorf("failed to generate cert: %w", err)
}
return &tls, nil
case config.ManualTLS:
return &TestTLSInfo, nil
default:
return nil, fmt.Errorf("config %q not supported", cfg)
}
}
type integrationCluster struct {
*Cluster
t testing.TB
ctx context.Context
}
func (c *integrationCluster) Members() (ms []intf.Member) {
for _, m := range c.Cluster.Members {
ms = append(ms, integrationMember{Member: m, t: c.t})
}
return ms
}
func (c *integrationCluster) TemplateEndpoints(tb testing.TB, pattern string) []string {
tb.Helper()
var endpoints []string
for _, m := range c.Cluster.Members {
ent := pattern
ent = strings.ReplaceAll(ent, "${MEMBER_PORT}", m.GRPCPortNumber())
ent = strings.ReplaceAll(ent, "${MEMBER_NAME}", m.Name)
endpoints = append(endpoints, ent)
}
return endpoints
}
func templateAuthority(tb testing.TB, pattern string, m *Member) string {
tb.Helper()
authority := pattern
authority = strings.ReplaceAll(authority, "${MEMBER_PORT}", m.GRPCPortNumber())
authority = strings.ReplaceAll(authority, "${MEMBER_NAME}", m.Name)
return authority
}
func (c *integrationCluster) AssertAuthority(tb testing.TB, expectedAuthorityPattern string) {
tb.Helper()
const filterMethod = "/etcdserverpb.KV/Put"
for _, m := range c.Cluster.Members {
expectedAuthority := templateAuthority(tb, expectedAuthorityPattern, m)
requestsFound := 0
for _, r := range m.RecordedRequests() {
if r.FullMethod != filterMethod {
continue
}
if r.Authority == expectedAuthority {
requestsFound++
} else {
tb.Errorf("Got unexpected authority header, member %q, request %q, got %q, expected %q", m.Name, r.FullMethod, r.Authority, expectedAuthority)
}
}
if requestsFound == 0 {
tb.Errorf("Expect at least one request with matched authority header value was recorded by the server intercepter on member %s but got 0", m.Name)
}
}
}
type integrationMember struct {
*Member
t testing.TB
}
func (m integrationMember) Client() intf.Client {
return integrationClient{Client: m.Member.Client}
}
func (m integrationMember) Start(ctx context.Context) error {
return m.Member.Restart(m.t)
}
func (m integrationMember) Stop() {
m.Member.Stop(m.t)
}
func (c *integrationCluster) Close() error {
c.Terminate(c.t)
return nil
}
func (c *integrationCluster) Client(opts ...config.ClientOption) (intf.Client, error) {
cc, err := c.ClusterClient(c.t, opts...)
if err != nil {
return nil, err
}
return integrationClient{Client: cc}, nil
}
type integrationClient struct {
*clientv3.Client
}
func (c integrationClient) Get(ctx context.Context, key string, o config.GetOptions) (*clientv3.GetResponse, error) {
if o.Timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel()
}
var clientOpts []clientv3.OpOption
if o.Revision != 0 {
clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision)))
}
if o.End != "" {
clientOpts = append(clientOpts, clientv3.WithRange(o.End))
}
if o.Serializable {
clientOpts = append(clientOpts, clientv3.WithSerializable())
}
if o.Prefix {
clientOpts = append(clientOpts, clientv3.WithPrefix())
}
if o.Limit != 0 {
clientOpts = append(clientOpts, clientv3.WithLimit(int64(o.Limit)))
}
if o.FromKey {
clientOpts = append(clientOpts, clientv3.WithFromKey())
}
if o.CountOnly {
clientOpts = append(clientOpts, clientv3.WithCountOnly())
}
if o.KeysOnly {
clientOpts = append(clientOpts, clientv3.WithKeysOnly())
}
if o.SortBy != clientv3.SortByKey || o.Order != clientv3.SortNone {
clientOpts = append(clientOpts, clientv3.WithSort(o.SortBy, o.Order))
}
if o.MaxCreateRevision != 0 {
clientOpts = append(clientOpts, clientv3.WithMaxCreateRev(int64(o.MaxCreateRevision)))
}
if o.MinCreateRevision != 0 {
clientOpts = append(clientOpts, clientv3.WithMinCreateRev(int64(o.MinCreateRevision)))
}
if o.MaxModRevision != 0 {
clientOpts = append(clientOpts, clientv3.WithMaxModRev(int64(o.MaxModRevision)))
}
if o.MinModRevision != 0 {
clientOpts = append(clientOpts, clientv3.WithMinModRev(int64(o.MinModRevision)))
}
return c.Client.Get(ctx, key, clientOpts...)
}
func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) (*clientv3.PutResponse, error) {
if opts.Timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, opts.Timeout)
defer cancel()
}
var clientOpts []clientv3.OpOption
if opts.LeaseID != 0 {
clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID))
}
return c.Client.Put(ctx, key, value, clientOpts...)
}
func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) {
var clientOpts []clientv3.OpOption
if o.Prefix {
clientOpts = append(clientOpts, clientv3.WithPrefix())
}
if o.FromKey {
clientOpts = append(clientOpts, clientv3.WithFromKey())
}
if o.End != "" {
clientOpts = append(clientOpts, clientv3.WithRange(o.End))
}
return c.Client.Delete(ctx, key, clientOpts...)
}
func (c integrationClient) Compact(ctx context.Context, rev int64, o config.CompactOption) (*clientv3.CompactResponse, error) {
if o.Timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel()
}
var clientOpts []clientv3.CompactOption
if o.Physical {
clientOpts = append(clientOpts, clientv3.WithCompactPhysical())
}
return c.Client.Compact(ctx, rev, clientOpts...)
}
func (c integrationClient) Status(ctx context.Context) ([]*clientv3.StatusResponse, error) {
endpoints := c.Client.Endpoints()
var resp []*clientv3.StatusResponse
for _, ep := range endpoints {
status, err := c.Client.Status(ctx, ep)
if err != nil {
return nil, err
}
resp = append(resp, status)
}
return resp, nil
}
func (c integrationClient) HashKV(ctx context.Context, rev int64) ([]*clientv3.HashKVResponse, error) {
endpoints := c.Client.Endpoints()
var resp []*clientv3.HashKVResponse
for _, ep := range endpoints {
hashKV, err := c.Client.HashKV(ctx, ep, rev)
if err != nil {
return nil, err
}
resp = append(resp, hashKV)
}
return resp, nil
}
func (c integrationClient) Health(ctx context.Context) error {
cli := healthpb.NewHealthClient(c.Client.ActiveConnection())
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
if err != nil {
return err
}
if resp.Status != healthpb.HealthCheckResponse_SERVING {
return fmt.Errorf("status expected %s, got %s", healthpb.HealthCheckResponse_SERVING, resp.Status)
}
return nil
}
func (c integrationClient) Defragment(ctx context.Context, o config.DefragOption) error {
if o.Timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, o.Timeout)
defer cancel()
}
for _, ep := range c.Endpoints() {
_, err := c.Client.Defragment(ctx, ep)
if err != nil {
return err
}
}
return nil
}
func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
var leaseOpts []clientv3.LeaseOption
if o.WithAttachedKeys {
leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys())
}
return c.Client.TimeToLive(ctx, id, leaseOpts...)
}
func (c integrationClient) Leases(ctx context.Context) (*clientv3.LeaseLeasesResponse, error) {
return c.Client.Leases(ctx)
}
func (c integrationClient) KeepAliveOnce(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) {
return c.Client.KeepAliveOnce(ctx, id)
}
func (c integrationClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) {
return c.Client.Revoke(ctx, id)
}
func (c integrationClient) AuthEnable(ctx context.Context) error {
_, err := c.Client.AuthEnable(ctx)
return err
}
func (c integrationClient) AuthDisable(ctx context.Context) error {
_, err := c.Client.AuthDisable(ctx)
return err
}
func (c integrationClient) AuthStatus(ctx context.Context) (*clientv3.AuthStatusResponse, error) {
return c.Client.AuthStatus(ctx)
}
func (c integrationClient) UserAdd(ctx context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) {
return c.Client.UserAddWithOptions(ctx, name, password, &clientv3.UserAddOptions{
NoPassword: opts.NoPassword,
})
}
func (c integrationClient) UserGet(ctx context.Context, name string) (*clientv3.AuthUserGetResponse, error) {
return c.Client.UserGet(ctx, name)
}
func (c integrationClient) UserList(ctx context.Context) (*clientv3.AuthUserListResponse, error) {
return c.Client.UserList(ctx)
}
func (c integrationClient) UserDelete(ctx context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) {
return c.Client.UserDelete(ctx, name)
}
func (c integrationClient) UserChangePass(ctx context.Context, user, newPass string) error {
_, err := c.Client.UserChangePassword(ctx, user, newPass)
return err
}
func (c integrationClient) UserGrantRole(ctx context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) {
return c.Client.UserGrantRole(ctx, user, role)
}
func (c integrationClient) UserRevokeRole(ctx context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) {
return c.Client.UserRevokeRole(ctx, user, role)
}
func (c integrationClient) RoleAdd(ctx context.Context, name string) (*clientv3.AuthRoleAddResponse, error) {
return c.Client.RoleAdd(ctx, name)
}
func (c integrationClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) {
return c.Client.RoleGrantPermission(ctx, name, key, rangeEnd, permType)
}
func (c integrationClient) RoleGet(ctx context.Context, role string) (*clientv3.AuthRoleGetResponse, error) {
return c.Client.RoleGet(ctx, role)
}
func (c integrationClient) RoleList(ctx context.Context) (*clientv3.AuthRoleListResponse, error) {
return c.Client.RoleList(ctx)
}
func (c integrationClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) {
return c.Client.RoleRevokePermission(ctx, role, key, rangeEnd)
}
func (c integrationClient) RoleDelete(ctx context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) {
return c.Client.RoleDelete(ctx, role)
}
func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) {
txn := c.Client.Txn(ctx)
var cmps []clientv3.Cmp
for _, c := range compares {
cmp, err := etcdctlcmd.ParseCompare(c)
if err != nil {
return nil, err
}
cmps = append(cmps, *cmp)
}
succOps := getOps(ifSucess)
failOps := getOps(ifFail)
txnrsp, err := txn.
If(cmps...).
Then(succOps...).
Else(failOps...).
Commit()
return txnrsp, err
}
func getOps(ss []string) []clientv3.Op {
var ops []clientv3.Op
for _, s := range ss {
s = strings.TrimSpace(s)
args := etcdctlcmd.Argify(s)
switch args[0] {
case "get":
ops = append(ops, clientv3.OpGet(args[1]))
case "put":
ops = append(ops, clientv3.OpPut(args[1], args[2]))
case "del":
ops = append(ops, clientv3.OpDelete(args[1]))
}
}
return ops
}
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
var opOpts []clientv3.OpOption
if opts.Prefix {
opOpts = append(opOpts, clientv3.WithPrefix())
}
if opts.Revision != 0 {
opOpts = append(opOpts, clientv3.WithRev(opts.Revision))
}
if opts.RangeEnd != "" {
opOpts = append(opOpts, clientv3.WithRange(opts.RangeEnd))
}
return c.Client.Watch(ctx, key, opOpts...)
}
func (c integrationClient) MemberAdd(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
return c.Client.MemberAdd(ctx, peerAddrs)
}
func (c integrationClient) MemberAddAsLearner(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) {
return c.Client.MemberAddAsLearner(ctx, peerAddrs)
}
func (c integrationClient) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) {
return c.Client.MemberRemove(ctx, id)
}
func (c integrationClient) MemberList(ctx context.Context, serializable bool) (*clientv3.MemberListResponse, error) {
if serializable {
return c.Client.MemberList(ctx, clientv3.WithSerializable())
}
return c.Client.MemberList(ctx)
} | go | github | https://github.com/etcd-io/etcd | tests/framework/integration/integration.go |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
def enable_logging(debug=False, path=None, stream=None):
"""Enable logging to a file at path and/or a console stream.
This function is available for debugging purposes. If you wish to
log this package's message in your application, the standard library
``logging`` package will receive these messages in any handlers you
create.
:param bool debug: Set this to ``True`` to receive debug messages,
which includes HTTP requests and responses,
or ``False`` for warning messages.
:param str path: If a *path* is specified, logging output will
written to that file in addition to sys.stderr.
The path is passed to logging.FileHandler,
which will append messages the file (and create
it if needed).
:param stream: One of ``None `` or ``sys.stdout`` or ``sys.stderr``.
If it is ``None``, nothing is logged to a stream.
If it isn't ``None``, console output is logged
to this stream.
:rtype: None
"""
if path is None and stream is None:
raise ValueError("path and/or stream must be set")
logger = logging.getLogger('openstack')
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(name)s %(message)s')
if stream is not None:
console = logging.StreamHandler(stream)
console.setFormatter(formatter)
logger.addHandler(console)
if path is not None:
file_handler = logging.FileHandler(path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG if debug else logging.WARNING)
def urljoin(*args):
"""A custom version of urljoin that simply joins strings into a path.
The real urljoin takes into account web semantics like when joining a url
like /path this should be joined to http://host/path as it is an anchored
link. We generally won't care about that in client.
"""
return '/'.join(str(a or '').strip('/') for a in args) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Red Hat, Inc.
# Tim Bielawa <tbielawa@redhat.com>
# Magnus Hedemark <mhedemar@redhat.com>
# Copyright 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: xml
short_description: Manage bits and pieces of XML files or strings
description:
- A CRUD-like interface to managing bits of XML files.
- You might also be interested in a brief tutorial from U(http://www.w3schools.com/xpath/)
and U(https://developer.mozilla.org/en-US/docs/Web/XPath).
version_added: '2.4'
options:
path:
description:
- Path to the file to operate on. File must exist ahead of time.
- This parameter is required, unless C(xmlstring) is given.
required: yes
aliases: [ dest, file ]
xmlstring:
description:
- A string containing XML on which to operate.
- This parameter is required, unless C(path) is given.
required: yes
xpath:
description:
- A valid XPath expression describing the item(s) you want to manipulate.
- Operates on the document root, C(/), by default.
default: /
namespaces:
description:
- The namespace C(prefix:uri) mapping for the XPath expression.
- Needs to be a C(dict), not a C(list) of items.
state:
description:
- Set or remove an xpath selection (node(s), attribute(s)).
default: present
choices: [ absent, present ]
aliases: [ ensure ]
attribute:
description:
- The attribute to select when using parameter C(value).
- This is a string, not prepended with C(@).
value:
description:
- Desired state of the selected attribute.
- Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
- Elements default to no value (but present).
- Attributes default to an empty string.
add_children:
description:
- Add additional child-element(s) to a selected element for a given C(xpath).
- Child elements must be given in a list and each item may be either a string
(eg. C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- This parameter requires C(xpath) to be set.
set_children:
description:
- Set the child-element(s) of a selected element for a given C(xpath).
- Removes any existing children.
- Child elements must be specified as in C(add_children).
- This parameter requires C(xpath) to be set.
count:
description:
- Search for a given C(xpath) and provide the count of any matches.
- This parameter requires C(xpath) to be set.
type: bool
default: 'no'
print_match:
description:
- Search for a given C(xpath) and print out any matches.
- This parameter requires C(xpath) to be set.
type: bool
default: 'no'
pretty_print:
description:
- Pretty print XML output.
type: bool
default: 'no'
content:
description:
- Search for a given C(xpath) and get content.
- This parameter requires C(xpath) to be set.
choices: [ attribute, text ]
input_type:
description:
- Type of input for C(add_children) and C(set_children).
choices: [ xml, yaml ]
default: yaml
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
requirements:
- lxml >= 2.3.0
notes:
- Use the C(--check) and C(--diff) options when testing your expressions.
- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter.
- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
- More information about this module is available from the community wiki at U(https://github.com/ansible/community/wiki/Module:-xml)
author:
- Tim Bielawa (@tbielawa)
- Magnus Hedemark (@magnus919)
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Remove the subjective attribute of the rating element
xml:
path: /foo/bar.xml
xpath: /business/rating/@subjective
state: absent
- name: Set the rating to 11
xml:
path: /foo/bar.xml
xpath: /business/rating
value: 11
# Retrieve and display the number of nodes
- name: Get count of beers nodes
xml:
path: /foo/bar.xml
xpath: /business/beers/beer
count: yes
register: hits
- debug:
var: hits.count
- name: Add a phonenumber element to the business element
xml:
path: /foo/bar.xml
xpath: /business/phonenumber
value: 555-555-1234
- name: Add several more beers to the beers element
xml:
path: /foo/bar.xml
xpath: /business/beers
add_children:
- beer: Old Rasputin
- beer: Old Motor Oil
- beer: Old Curmudgeon
- name: Add a validxhtml element to the website element
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
- name: Add an empty validatedon attribute to the validxhtml element
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml/@validatedon
- name: Add or modify an attribute, add element if needed
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
attribute: validatedon
value: 1976-08-05
# How to read an attrribute value and access it in Ansible
- name: Read attribute value
xml:
path: /foo/bar.xml
xpath: /business/website/validxhtml
content: attribute
attribute: validatedon
register: xmlresp
- name: Show attribute value
debug:
var: xmlresp.matches[0].validxhtml.validatedon
- name: Remove all children from the website element (option 1)
xml:
path: /foo/bar.xml
xpath: /business/website/*
state: absent
- name: Remove all children from the website element (option 2)
xml:
path: /foo/bar.xml
xpath: /business/website
children: []
# In case of namespaces, like in below XML, they have to be explicitely stated
# NOTE: there's the prefix "x" in front of the "bar", too
#<?xml version='1.0' encoding='UTF-8'?>
#<foo xmlns="http://x.test" xmlns:attr="http://z.test">
# <bar>
# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
# </bar>
#</foo>
- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
xml:
path: foo.xml
xpath: /x:foo/x:bar/y:baz
namespaces:
x: http://x.test
y: http://y.test
z: http://z.test
attribute: z:my_namespaced_attribute
value: 'false'
'''
RETURN = r'''
actions:
description: A dictionary with the original xpath, namespaces and state.
type: dict
returned: success
sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
backup_file:
description: The name of the backup file that was created
type: str
returned: when backup=yes
sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
count:
description: The count of xpath matches.
type: int
returned: when parameter 'count' is set
sample: 2
matches:
description: The xpath matches found.
type: list
returned: when parameter 'print_match' is set
msg:
description: A message related to the performed action(s).
type: string
returned: always
xmlstring:
description: An XML string of the resulting output.
type: string
returned: when parameter 'xmlstring' is set
'''
import copy
import json
import os
import re
import traceback
from collections import MutableMapping
from distutils.version import LooseVersion
from io import BytesIO
try:
from lxml import etree, objectify
HAS_LXML = True
except ImportError:
HAS_LXML = False
from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_bytes, to_native
_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
_XPSTR = "('(?:.*)'|\"(?:.*)\")"
_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
def has_changed(doc):
orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
return (orig_obj != obj)
def do_print_match(module, tree, xpath, namespaces):
match = tree.xpath(xpath, namespaces=namespaces)
match_xpaths = []
for m in match:
match_xpaths.append(tree.getpath(m))
match_str = json.dumps(match_xpaths)
msg = "selector '%s' match: %s" % (xpath, match_str)
finish(module, tree, xpath, namespaces, changed=False, msg=msg)
def count_nodes(module, tree, xpath, namespaces):
""" Return the count of nodes matching the xpath """
hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
msg = "found %d nodes" % hits
finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
def is_node(tree, xpath, namespaces):
""" Test if a given xpath matches anything and if that match is a node.
For now we just assume you're only searching for one specific thing."""
if xpath_matches(tree, xpath, namespaces):
# OK, it found something
match = tree.xpath(xpath, namespaces=namespaces)
if isinstance(match[0], etree._Element):
return True
return False
def is_attribute(tree, xpath, namespaces):
""" Test if a given xpath matches and that match is an attribute
An xpath attribute search will only match one item"""
if xpath_matches(tree, xpath, namespaces):
match = tree.xpath(xpath, namespaces=namespaces)
if isinstance(match[0], etree._ElementStringResult):
return True
elif isinstance(match[0], etree._ElementUnicodeResult):
return True
return False
def xpath_matches(tree, xpath, namespaces):
""" Test if a node exists """
if tree.xpath(xpath, namespaces=namespaces):
return True
return False
def delete_xpath_target(module, tree, xpath, namespaces):
""" Delete an attribute or element from a tree """
try:
for result in tree.xpath(xpath, namespaces=namespaces):
# Get the xpath for this result
if is_attribute(tree, xpath, namespaces):
# Delete an attribute
parent = result.getparent()
# Pop this attribute match out of the parent
# node's 'attrib' dict by using this match's
# 'attrname' attribute for the key
parent.attrib.pop(result.attrname)
elif is_node(tree, xpath, namespaces):
# Delete an element
result.getparent().remove(result)
else:
raise Exception("Impossible error")
except Exception as e:
module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
else:
finish(module, tree, xpath, namespaces, changed=True)
def replace_children_of(children, match):
for element in match.getchildren():
match.remove(element)
match.extend(children)
def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
matches = tree.xpath(xpath, namespaces=namespaces)
# Create a list of our new children
children = children_to_nodes(module, children, in_type)
children_as_string = [etree.tostring(c) for c in children]
changed = False
# xpaths always return matches as a list, so....
for match in matches:
# Check if elements differ
if len(match.getchildren()) == len(children):
for idx, element in enumerate(match.getchildren()):
if etree.tostring(element) != children_as_string[idx]:
replace_children_of(children, match)
changed = True
break
else:
replace_children_of(children, match)
changed = True
return changed
def set_target_children(module, tree, xpath, namespaces, children, in_type):
changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
# Write it out
finish(module, tree, xpath, namespaces, changed=changed)
def add_target_children(module, tree, xpath, namespaces, children, in_type):
if is_node(tree, xpath, namespaces):
new_kids = children_to_nodes(module, children, in_type)
for node in tree.xpath(xpath, namespaces=namespaces):
node.extend(new_kids)
finish(module, tree, xpath, namespaces, changed=True)
else:
finish(module, tree, xpath, namespaces)
def _extract_xpstr(g):
return g[1:-1]
def split_xpath_last(xpath):
"""split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
xpath = xpath.strip()
m = _RE_SPLITSIMPLELAST.match(xpath)
if m:
# requesting an element to exist
return (m.group(1), [(m.group(2), None)])
m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
if m:
# requesting an element to exist with an inner text
return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
if m:
# requesting an attribute to exist
return (m.group(1), [(m.group(2), None)])
m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
if m:
# requesting an attribute to exist with a value
return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
m = _RE_SPLITSUBLAST.match(xpath)
if m:
content = [x.strip() for x in m.group(3).split(" and ")]
return (m.group(1), [('/' + m.group(2), content)])
m = _RE_SPLITONLYEQVALUE.match(xpath)
if m:
# requesting a change of inner text
return (m.group(1), [("", _extract_xpstr(m.group(2)))])
return (xpath, [])
def nsnameToClark(name, namespaces):
if ":" in name:
(nsname, rawname) = name.split(":")
# return "{{%s}}%s" % (namespaces[nsname], rawname)
return "{{{0}}}{1}".format(namespaces[nsname], rawname)
# no namespace name here
return name
def check_or_make_target(module, tree, xpath, namespaces):
(inner_xpath, changes) = split_xpath_last(xpath)
if (inner_xpath == xpath) or (changes is None):
module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
(xpath, etree.tostring(tree, pretty_print=True)))
return False
changed = False
if not is_node(tree, inner_xpath, namespaces):
changed = check_or_make_target(module, tree, inner_xpath, namespaces)
# we test again after calling check_or_make_target
if is_node(tree, inner_xpath, namespaces) and changes:
for (eoa, eoa_value) in changes:
if eoa and eoa[0] != '@' and eoa[0] != '/':
# implicitly creating an element
new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
if eoa_value:
for nk in new_kids:
nk.text = eoa_value
for node in tree.xpath(inner_xpath, namespaces=namespaces):
node.extend(new_kids)
changed = True
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
elif eoa and eoa[0] == '/':
element = eoa[1:]
new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
for node in tree.xpath(inner_xpath, namespaces=namespaces):
node.extend(new_kids)
for nk in new_kids:
for subexpr in eoa_value:
# module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
# (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
check_or_make_target(module, nk, "./" + subexpr, namespaces)
changed = True
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
elif eoa == "":
for node in tree.xpath(inner_xpath, namespaces=namespaces):
if (node.text != eoa_value):
node.text = eoa_value
changed = True
elif eoa and eoa[0] == '@':
attribute = nsnameToClark(eoa[1:], namespaces)
for element in tree.xpath(inner_xpath, namespaces=namespaces):
changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
if changing:
changed = changed or changing
if eoa_value is None:
value = ""
else:
value = eoa_value
element.attrib[attribute] = value
# module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
# (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
else:
module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
return changed
def ensure_xpath_exists(module, tree, xpath, namespaces):
changed = False
if not is_node(tree, xpath, namespaces):
changed = check_or_make_target(module, tree, xpath, namespaces)
finish(module, tree, xpath, namespaces, changed)
def set_target_inner(module, tree, xpath, namespaces, attribute, value):
changed = False
try:
if not is_node(tree, xpath, namespaces):
changed = check_or_make_target(module, tree, xpath, namespaces)
except Exception as e:
missing_namespace = ""
# NOTE: This checks only the namespaces defined in root element!
# TODO: Implement a more robust check to check for child namespaces' existance
if tree.getroot().nsmap and ":" not in xpath:
missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
(missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
(xpath, etree.tostring(tree, pretty_print=True)))
for element in tree.xpath(xpath, namespaces=namespaces):
if not attribute:
changed = changed or (element.text != value)
if element.text != value:
element.text = value
else:
changed = changed or (element.get(attribute) != value)
if ":" in attribute:
attr_ns, attr_name = attribute.split(":")
# attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
if element.get(attribute) != value:
element.set(attribute, value)
return changed
def set_target(module, tree, xpath, namespaces, attribute, value):
changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
finish(module, tree, xpath, namespaces, changed)
def get_element_text(module, tree, xpath, namespaces):
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
elements = []
for element in tree.xpath(xpath, namespaces=namespaces):
elements.append({element.tag: element.text})
finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
def get_element_attr(module, tree, xpath, namespaces):
if not is_node(tree, xpath, namespaces):
module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
elements = []
for element in tree.xpath(xpath, namespaces=namespaces):
child = {}
for key in element.keys():
value = element.get(key)
child.update({key: value})
elements.append({element.tag: child})
finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
def child_to_element(module, child, in_type):
if in_type == 'xml':
infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
try:
parser = etree.XMLParser()
node = etree.parse(infile, parser)
return node.getroot()
except etree.XMLSyntaxError as e:
module.fail_json(msg="Error while parsing child element: %s" % e)
elif in_type == 'yaml':
if isinstance(child, string_types):
return etree.Element(child)
elif isinstance(child, MutableMapping):
if len(child) > 1:
module.fail_json(msg="Can only create children from hashes with one key")
(key, value) = next(iteritems(child))
if isinstance(value, MutableMapping):
children = value.pop('_', None)
node = etree.Element(key, value)
if children is not None:
if not isinstance(children, list):
module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
subnodes = children_to_nodes(module, children)
node.extend(subnodes)
else:
node = etree.Element(key)
node.text = value
return node
else:
module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
else:
module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
def children_to_nodes(module=None, children=None, type='yaml'):
"""turn a str/hash/list of str&hash into a list of elements"""
children = [] if children is None else children
return [child_to_element(module, child, type) for child in children]
def make_pretty(module, tree):
xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
result = dict(
changed=False,
)
if module.params['path']:
xml_file = module.params['path']
with open(xml_file, 'rb') as xml_content:
if xml_string != xml_content.read():
result['changed'] = True
if not module.check_mode:
if module.params['backup']:
result['backup_file'] = module.backup_local(module.params['path'])
tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
elif module.params['xmlstring']:
result['xmlstring'] = xml_string
# NOTE: Modifying a string is not considered a change !
if xml_string != module.params['xmlstring']:
result['changed'] = True
module.exit_json(**result)
def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
result = dict(
actions=dict(
xpath=xpath,
namespaces=namespaces,
state=module.params['state']
),
changed=has_changed(tree),
)
if module.params['count'] or hitcount:
result['count'] = hitcount
if module.params['print_match'] or matches:
result['matches'] = matches
if msg:
result['msg'] = msg
if result['changed']:
if module._diff:
result['diff'] = dict(
before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
)
if module.params['path'] and not module.check_mode:
if module.params['backup']:
result['backup_file'] = module.backup_local(module.params['path'])
tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
if module.params['xmlstring']:
result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
module.exit_json(**result)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', aliases=['dest', 'file']),
xmlstring=dict(type='str'),
xpath=dict(type='str'),
namespaces=dict(type='dict', default={}),
state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
value=dict(type='raw'),
attribute=dict(type='raw'),
add_children=dict(type='list'),
set_children=dict(type='list'),
count=dict(type='bool', default=False),
print_match=dict(type='bool', default=False),
pretty_print=dict(type='bool', default=False),
content=dict(type='str', choices=['attribute', 'text']),
input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
backup=dict(type='bool', default=False),
),
supports_check_mode=True,
# TODO: Implement this as soon as #28662 (required_by functionality) is merged
# required_by=dict(
# add_children=['xpath'],
# attribute=['value'],
# set_children=['xpath'],
# value=['xpath'],
# ),
required_if=[
['content', 'attribute', ['xpath']],
['content', 'text', ['xpath']],
['count', True, ['xpath']],
['print_match', True, ['xpath']],
],
required_one_of=[
['path', 'xmlstring'],
['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
],
mutually_exclusive=[
['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
['path', 'xmlstring'],
],
)
xml_file = module.params['path']
xml_string = module.params['xmlstring']
xpath = module.params['xpath']
namespaces = module.params['namespaces']
state = module.params['state']
value = json_dict_bytes_to_unicode(module.params['value'])
attribute = module.params['attribute']
set_children = json_dict_bytes_to_unicode(module.params['set_children'])
add_children = json_dict_bytes_to_unicode(module.params['add_children'])
pretty_print = module.params['pretty_print']
content = module.params['content']
input_type = module.params['input_type']
print_match = module.params['print_match']
count = module.params['count']
backup = module.params['backup']
# Check if we have lxml 2.3.0 or newer installed
if not HAS_LXML:
module.fail_json(msg='The xml ansible module requires the lxml python library installed on the managed machine')
elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
# Check if the file exists
if xml_string:
infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
elif os.path.isfile(xml_file):
infile = open(xml_file, 'rb')
else:
module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
# Parse and evaluate xpath expression
if xpath is not None:
try:
etree.XPath(xpath)
except etree.XPathSyntaxError as e:
module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
except etree.XPathEvalError as e:
module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
# Try to parse in the target XML file
try:
parser = etree.XMLParser(remove_blank_text=pretty_print)
doc = etree.parse(infile, parser)
except etree.XMLSyntaxError as e:
module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
# Ensure we have the original copy to compare
global orig_doc
orig_doc = copy.deepcopy(doc)
if print_match:
do_print_match(module, doc, xpath, namespaces)
if count:
count_nodes(module, doc, xpath, namespaces)
if content == 'attribute':
get_element_attr(module, doc, xpath, namespaces)
elif content == 'text':
get_element_text(module, doc, xpath, namespaces)
# File exists:
if state == 'absent':
# - absent: delete xpath target
delete_xpath_target(module, doc, xpath, namespaces)
# - present: carry on
# children && value both set?: should have already aborted by now
# add_children && set_children both set?: should have already aborted by now
# set_children set?
if set_children:
set_target_children(module, doc, xpath, namespaces, set_children, input_type)
# add_children set?
if add_children:
add_target_children(module, doc, xpath, namespaces, add_children, input_type)
# No?: Carry on
# Is the xpath target an attribute selector?
if value is not None:
set_target(module, doc, xpath, namespaces, attribute, value)
# If an xpath was provided, we need to do something with the data
if xpath is not None:
ensure_xpath_exists(module, doc, xpath, namespaces)
# Otherwise only reformat the xml data?
if pretty_print:
make_pretty(module, doc)
module.fail_json(msg="Don't know what to do")
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans;
import java.beans.IntrospectionException;
import java.beans.PropertyDescriptor;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.jspecify.annotations.Nullable;
import org.springframework.core.ResolvableType;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
/**
* Common delegate methods for Spring's internal {@link PropertyDescriptor} implementations.
*
* @author Chris Beams
* @author Juergen Hoeller
* @author Sam Brannen
*/
abstract class PropertyDescriptorUtils {
public static final PropertyDescriptor[] EMPTY_PROPERTY_DESCRIPTOR_ARRAY = {};
/**
* Simple introspection algorithm for basic set/get/is accessor methods,
* building corresponding JavaBeans property descriptors for them.
* <p>This just supports the basic JavaBeans conventions, without indexed
* properties or any customizers, and without other BeanInfo metadata.
* For standard JavaBeans introspection, use the JavaBeans Introspector.
* @param beanClass the target class to introspect
* @return a collection of property descriptors
* @throws IntrospectionException from introspecting the given bean class
* @since 5.3.24
* @see SimpleBeanInfoFactory
* @see java.beans.Introspector#getBeanInfo(Class)
*/
public static Collection<? extends PropertyDescriptor> determineBasicProperties(Class<?> beanClass)
throws IntrospectionException {
Map<String, BasicPropertyDescriptor> pdMap = new TreeMap<>();
for (Method method : beanClass.getMethods()) {
String methodName = method.getName();
boolean setter;
int nameIndex;
if (methodName.startsWith("set") && method.getParameterCount() == 1) {
setter = true;
nameIndex = 3;
}
else if (methodName.startsWith("get") && method.getParameterCount() == 0 && method.getReturnType() != void.class) {
setter = false;
nameIndex = 3;
}
else if (methodName.startsWith("is") && method.getParameterCount() == 0 && method.getReturnType() == boolean.class) {
setter = false;
nameIndex = 2;
}
else {
continue;
}
String propertyName = StringUtils.uncapitalizeAsProperty(methodName.substring(nameIndex));
if (propertyName.isEmpty()) {
continue;
}
BasicPropertyDescriptor pd = pdMap.get(propertyName);
if (pd != null) {
if (setter) {
pd.addWriteMethod(method);
}
else {
Method readMethod = pd.getReadMethod();
if (readMethod == null || readMethod.getReturnType().isAssignableFrom(method.getReturnType())) {
pd.setReadMethod(method);
}
}
}
else {
pd = new BasicPropertyDescriptor(propertyName, beanClass, (!setter ? method : null), (setter ? method : null));
pdMap.put(propertyName, pd);
}
}
return pdMap.values();
}
/**
* See {@link java.beans.FeatureDescriptor}.
*/
public static void copyNonMethodProperties(PropertyDescriptor source, PropertyDescriptor target) {
target.setExpert(source.isExpert());
target.setHidden(source.isHidden());
target.setPreferred(source.isPreferred());
target.setName(source.getName());
target.setShortDescription(source.getShortDescription());
target.setDisplayName(source.getDisplayName());
// Copy all attributes (emulating behavior of private FeatureDescriptor#addTable)
Enumeration<String> keys = source.attributeNames();
while (keys.hasMoreElements()) {
String key = keys.nextElement();
target.setValue(key, source.getValue(key));
}
// See java.beans.PropertyDescriptor#PropertyDescriptor(PropertyDescriptor)
target.setPropertyEditorClass(source.getPropertyEditorClass());
target.setBound(source.isBound());
target.setConstrained(source.isConstrained());
}
/**
* See {@link java.beans.PropertyDescriptor#findPropertyType}.
*/
public static @Nullable Class<?> findPropertyType(@Nullable Method readMethod, @Nullable Method writeMethod)
throws IntrospectionException {
Class<?> propertyType = null;
if (readMethod != null) {
if (readMethod.getParameterCount() != 0) {
throw new IntrospectionException("Bad read method arg count: " + readMethod);
}
propertyType = readMethod.getReturnType();
if (propertyType == void.class) {
throw new IntrospectionException("Read method returns void: " + readMethod);
}
}
if (writeMethod != null) {
Class<?>[] params = writeMethod.getParameterTypes();
if (params.length != 1) {
throw new IntrospectionException("Bad write method arg count: " + writeMethod);
}
if (propertyType != null) {
if (propertyType.isAssignableFrom(params[0])) {
// Write method's property type potentially more specific
propertyType = params[0];
}
else if (params[0].isAssignableFrom(propertyType)) {
// Proceed with read method's property type
}
else {
throw new IntrospectionException(
"Type mismatch between read and write methods: " + readMethod + " - " + writeMethod);
}
}
else {
propertyType = params[0];
}
}
return propertyType;
}
/**
* See {@link java.beans.IndexedPropertyDescriptor#findIndexedPropertyType}.
*/
public static @Nullable Class<?> findIndexedPropertyType(String name, @Nullable Class<?> propertyType,
@Nullable Method indexedReadMethod, @Nullable Method indexedWriteMethod) throws IntrospectionException {
Class<?> indexedPropertyType = null;
if (indexedReadMethod != null) {
Class<?>[] params = indexedReadMethod.getParameterTypes();
if (params.length != 1) {
throw new IntrospectionException("Bad indexed read method arg count: " + indexedReadMethod);
}
if (params[0] != int.class) {
throw new IntrospectionException("Non int index to indexed read method: " + indexedReadMethod);
}
indexedPropertyType = indexedReadMethod.getReturnType();
if (indexedPropertyType == void.class) {
throw new IntrospectionException("Indexed read method returns void: " + indexedReadMethod);
}
}
if (indexedWriteMethod != null) {
Class<?>[] params = indexedWriteMethod.getParameterTypes();
if (params.length != 2) {
throw new IntrospectionException("Bad indexed write method arg count: " + indexedWriteMethod);
}
if (params[0] != int.class) {
throw new IntrospectionException("Non int index to indexed write method: " + indexedWriteMethod);
}
if (indexedPropertyType != null) {
if (indexedPropertyType.isAssignableFrom(params[1])) {
// Write method's property type potentially more specific
indexedPropertyType = params[1];
}
else if (params[1].isAssignableFrom(indexedPropertyType)) {
// Proceed with read method's property type
}
else {
throw new IntrospectionException("Type mismatch between indexed read and write methods: " +
indexedReadMethod + " - " + indexedWriteMethod);
}
}
else {
indexedPropertyType = params[1];
}
}
if (propertyType != null && (!propertyType.isArray() ||
propertyType.componentType() != indexedPropertyType)) {
throw new IntrospectionException("Type mismatch between indexed and non-indexed methods: " +
indexedReadMethod + " - " + indexedWriteMethod);
}
return indexedPropertyType;
}
/**
* Compare the given {@code PropertyDescriptors} and return {@code true} if
* they are equivalent, i.e. their read method, write method, property type,
* property editor and flags are equivalent.
* @see java.beans.PropertyDescriptor#equals(Object)
*/
public static boolean equals(PropertyDescriptor pd, PropertyDescriptor otherPd) {
return (ObjectUtils.nullSafeEquals(pd.getReadMethod(), otherPd.getReadMethod()) &&
ObjectUtils.nullSafeEquals(pd.getWriteMethod(), otherPd.getWriteMethod()) &&
ObjectUtils.nullSafeEquals(pd.getPropertyType(), otherPd.getPropertyType()) &&
ObjectUtils.nullSafeEquals(pd.getPropertyEditorClass(), otherPd.getPropertyEditorClass()) &&
pd.isBound() == otherPd.isBound() && pd.isConstrained() == otherPd.isConstrained());
}
/**
* PropertyDescriptor for {@link #determineBasicProperties(Class)},
* not performing any early type determination for
* {@link #setReadMethod}/{@link #setWriteMethod}.
* @since 5.3.24
*/
private static class BasicPropertyDescriptor extends PropertyDescriptor {
private final Class<?> beanClass;
private @Nullable Method readMethod;
private @Nullable Method writeMethod;
private final List<Method> candidateWriteMethods = new ArrayList<>();
public BasicPropertyDescriptor(String propertyName, Class<?> beanClass, @Nullable Method readMethod, @Nullable Method writeMethod)
throws IntrospectionException {
super(propertyName, readMethod, writeMethod);
this.beanClass = beanClass;
}
@Override
public void setReadMethod(@Nullable Method readMethod) {
this.readMethod = readMethod;
}
@Override
public @Nullable Method getReadMethod() {
return this.readMethod;
}
@Override
public void setWriteMethod(@Nullable Method writeMethod) {
this.writeMethod = writeMethod;
}
void addWriteMethod(Method writeMethod) {
// Since setWriteMethod() is invoked from the PropertyDescriptor(String, Method, Method)
// constructor, this.writeMethod may be non-null.
if (this.writeMethod != null) {
this.candidateWriteMethods.add(this.writeMethod);
this.writeMethod = null;
}
this.candidateWriteMethods.add(writeMethod);
}
@Override
public @Nullable Method getWriteMethod() {
if (this.writeMethod == null && !this.candidateWriteMethods.isEmpty()) {
if (this.readMethod == null || this.candidateWriteMethods.size() == 1) {
this.writeMethod = this.candidateWriteMethods.get(0);
}
else {
Class<?> resolvedReadType =
ResolvableType.forMethodReturnType(this.readMethod, this.beanClass).toClass();
for (Method method : this.candidateWriteMethods) {
// 1) Check for an exact match against the resolved types.
Class<?> resolvedWriteType =
ResolvableType.forMethodParameter(method, 0, this.beanClass).toClass();
if (resolvedReadType.equals(resolvedWriteType)) {
this.writeMethod = method;
break;
}
// 2) Check if the candidate write method's parameter type is compatible with
// the read method's return type.
Class<?> parameterType = method.getParameterTypes()[0];
if (this.readMethod.getReturnType().isAssignableFrom(parameterType)) {
// If we haven't yet found a compatible write method, or if the current
// candidate's parameter type is a subtype of the previous candidate's
// parameter type, track the current candidate as the write method.
if (this.writeMethod == null ||
this.writeMethod.getParameterTypes()[0].isAssignableFrom(parameterType)) {
this.writeMethod = method;
// We do not "break" here, since we need to compare the current candidate
// with all remaining candidates.
}
}
}
}
}
return this.writeMethod;
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/PropertyDescriptorUtils.java |
# -*- coding: utf8 -*-
"""
Tests for pika.adapters.blocking_connection.BlockingChannel
"""
from collections import deque
import logging
try:
import mock
except ImportError:
from unittest import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
from pika.adapters import blocking_connection
from pika import callback
from pika import channel
from pika import frame
from pika import spec
BLOCKING_CHANNEL = 'pika.adapters.blocking_connection.BlockingChannel'
BLOCKING_CONNECTION = 'pika.adapters.blocking_connection.BlockingConnection'
class ChannelTemplate(channel.Channel):
channel_number = 1
class BlockingChannelTests(unittest.TestCase):
@mock.patch(BLOCKING_CONNECTION)
def _create_connection(self, connection=None):
return connection
def setUp(self):
self.connection = self._create_connection()
channelImplMock = mock.Mock(spec=ChannelTemplate,
is_closing=False,
is_closed=False,
is_open=True)
self.obj = blocking_connection.BlockingChannel(channelImplMock,
self.connection)
def tearDown(self):
del self.connection
del self.obj
def test_init_initial_value_confirmation(self):
self.assertFalse(self.obj._delivery_confirmation)
def test_init_initial_value_pending_events(self):
self.assertEqual(self.obj._pending_events, deque())
def test_init_initial_value_buback_return(self):
self.assertIsNone(self.obj._puback_return)
def test_basic_consume(self):
with mock.patch.object(self.obj._impl, '_generate_consumer_tag'):
self.obj._impl._generate_consumer_tag.return_value = 'ctag0'
self.obj._impl.basic_consume.return_value = 'ctag0'
self.obj.basic_consume(mock.Mock(), "queue")
self.assertEqual(self.obj._consumer_infos['ctag0'].state,
blocking_connection._ConsumerInfo.ACTIVE) | unknown | codeparrot/codeparrot-clean | ||
import logging
logger = logging.getLogger(__name__)
from redbean.secure.identity import SessionIdentity
from redbean.secure.keeper import UserIdentityKeeper
from redbean.asyncid import AsyncID64
from test.security.app import rest, etcd_endpoint
user_id_generator = AsyncID64('/asyncid/user_sn', etcd_endpoint)
keeper = UserIdentityKeeper(etcd_endpoint, user_id_generator=user_id_generator)
# rest.
rest.set_path('.')
@rest.post('login')
@rest.prepare_session
async def login(json_body: dict) -> SessionIdentity:
client_id = json_body.get('client_id')
identity = json_body.get('identity')
passwd = json_body.get('passwd')
identity = await keeper.check_passwd(identity, passwd)
identity.client_id = client_id
return identity
@rest.post('logout')
@rest.close_session
async def logout(identity: SessionIdentity) -> None:
logger.debug(f'signout {identity}')
@rest.post('identity/new')
@rest.prepare_session
async def create_identity(json_body: dict) -> SessionIdentity:
login_id = json_body.get('identity')
passwd = json_body.get('passwd')
identity = await keeper.create_identity(login_id, passwd)
return identity
@rest.permission_verifier
async def verify_permissions(identity: SessionIdentity, permissions):
return await keeper.verify_permissions(identity.user_id, *permissions)
@rest.on_cleanup
async def cleanup():
user_id_generator.stop()
await user_id_generator.stopped()
# @rest.get('verify_email/{token}')
# @rest.prepare_session
# async def verify_email(token: str) -> SessionIdentity:
# """ 使用邮件确认链接确认其使用本人邮件地址作为登录标识 """
# assert token
# identity = await keeper.verify_email(token)
# return identity
# @rest.post('signup')
# async def signup(json_arg: dict) -> SessionIdentity:
# client_id = json_arg.get('client_id')
# identity = json_arg.get('login_id')
# passwd = json_arg.get('login_id')
# assert client_id
# assert identity
# assert passwd
# await keeper.create_email_identity(client_id, identity, passwd) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2025 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_STABLEHLO_TRANSFORMS_FOLD_BROADCAST_PASS_H_
#define TENSORFLOW_COMPILER_MLIR_STABLEHLO_TRANSFORMS_FOLD_BROADCAST_PASS_H_
#include <memory>
#include "mlir/Pass/Pass.h" // from @llvm-project
namespace mlir {
namespace odml {
// Constant folds broadcast_in_dim op conditionally.
std::unique_ptr<Pass> createFoldBroadcastPass();
} // namespace odml
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_STABLEHLO_TRANSFORMS_FOLD_BROADCAST_PASS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/stablehlo/transforms/fold_broadcast_pass.h |
## Input
```javascript
function f() {
let x = 1;
x = x + 1;
x += 1;
x >>>= 1;
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: f,
params: [],
isComponent: false,
};
```
## Code
```javascript
function f() {
return 1;
}
export const FIXTURE_ENTRYPOINT = {
fn: f,
params: [],
isComponent: false,
};
```
### Eval output
(kind: ok) 1 | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/assignment-variations.expect.md |
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/sprd,sc2731.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Spreadtrum SC27xx PMIC
maintainers:
- Orson Zhai <orsonzhai@gmail.com>
- Baolin Wang <baolin.wang7@gmail.com>
- Chunyan Zhang <zhang.lyra@gmail.com>
description: |
Spreadtrum PMICs belonging to the SC27xx series integrate all mobile handset
power management, audio codec, battery management and user interface support
functions in a single chip. They have 6 major functional blocks:
- DCDCs to support CPU, memory
- LDOs to support both internal and external requirements
- Battery management system, such as charger, fuel gauge
- Audio codec
- User interface functions, such as indicator, flash LED and so on
- IC level interface, such as power on/off control, RTC, typec and so on
properties:
$nodename:
pattern: '^pmic@[0-9a-f]+$'
compatible:
enum:
- sprd,sc2720
- sprd,sc2721
- sprd,sc2723
- sprd,sc2730
- sprd,sc2731
reg:
maxItems: 1
interrupts:
maxItems: 1
interrupt-controller: true
spi-max-frequency: true
'#address-cells':
const: 1
'#interrupt-cells':
const: 1
'#size-cells':
const: 0
regulators:
type: object
$ref: /schemas/regulator/sprd,sc2731-regulator.yaml#
patternProperties:
"^adc@[0-9a-f]+$":
type: object
$ref: /schemas/iio/adc/sprd,sc2720-adc.yaml#
"^charger@[0-9a-f]+$":
type: object
$ref: /schemas/power/supply/sc2731-charger.yaml#
"^efuse@[0-9a-f]+$":
type: object
$ref: /schemas/nvmem/sprd,sc2731-efuse.yaml#
"^fuel-gauge@[0-9a-f]+$":
type: object
$ref: /schemas/power/supply/sc27xx-fg.yaml#
"^gpio@[0-9a-f]+$":
type: object
$ref: /schemas/gpio/sprd,gpio-eic.yaml#
"^led-controller@[0-9a-f]+$":
type: object
$ref: /schemas/leds/sprd,sc2731-bltc.yaml#
"^rtc@[0-9a-f]+$":
type: object
$ref: /schemas/rtc/sprd,sc2731-rtc.yaml#
"^vibrator@[0-9a-f]+$":
type: object
$ref: /schemas/input/sprd,sc27xx-vibrator.yaml#
required:
- compatible
- reg
- interrupts
- interrupt-controller
- spi-max-frequency
- '#address-cells'
- '#interrupt-cells'
- '#size-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/leds/common.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
sc2731_pmic: pmic@0 {
compatible = "sprd,sc2731";
reg = <0>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
spi-max-frequency = <26000000>;
#address-cells = <1>;
#interrupt-cells = <1>;
#size-cells = <0>;
charger@0 {
compatible = "sprd,sc2731-charger";
reg = <0x0>;
phys = <&ssphy>;
monitored-battery = <&bat>;
};
led-controller@200 {
compatible = "sprd,sc2731-bltc";
reg = <0x200>;
#address-cells = <1>;
#size-cells = <0>;
led@0 {
reg = <0x0>;
color = <LED_COLOR_ID_RED>;
};
led@1 {
reg = <0x1>;
color = <LED_COLOR_ID_GREEN>;
};
led@2 {
reg = <0x2>;
color = <LED_COLOR_ID_BLUE>;
};
};
rtc@280 {
compatible = "sprd,sc2731-rtc";
reg = <0x280>;
interrupt-parent = <&sc2731_pmic>;
interrupts = <2>;
};
pmic_eic: gpio@300 {
compatible = "sprd,sc2731-eic";
reg = <0x300>;
interrupt-parent = <&sc2731_pmic>;
interrupts = <5>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
efuse@380 {
compatible = "sprd,sc2731-efuse";
reg = <0x380>;
hwlocks = <&hwlock 12>;
#address-cells = <1>;
#size-cells = <1>;
/* Data cells */
fgu_calib: calib@6 {
reg = <0x6 0x2>;
bits = <0 9>;
};
adc_big_scale: calib@24 {
reg = <0x24 0x2>;
};
adc_small_scale: calib@26 {
reg = <0x26 0x2>;
};
};
pmic_adc: adc@480 {
compatible = "sprd,sc2731-adc";
reg = <0x480>;
interrupt-parent = <&sc2731_pmic>;
interrupts = <0>;
#io-channel-cells = <1>;
hwlocks = <&hwlock 4>;
nvmem-cells = <&adc_big_scale>, <&adc_small_scale>;
nvmem-cell-names = "big_scale_calib", "small_scale_calib";
};
fuel-gauge@a00 {
compatible = "sprd,sc2731-fgu";
reg = <0xa00>;
battery-detect-gpios = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
interrupt-parent = <&sc2731_pmic>;
interrupts = <4>;
io-channels = <&pmic_adc 5>, <&pmic_adc 14>;
io-channel-names = "bat-temp", "charge-vol";
nvmem-cells = <&fgu_calib>;
nvmem-cell-names = "fgu_calib";
monitored-battery = <&bat>;
sprd,calib-resistance-micro-ohms = <21500>;
};
vibrator@ec8 {
compatible = "sprd,sc2731-vibrator";
reg = <0xec8>;
};
regulators {
compatible = "sprd,sc2731-regulator";
BUCK_CPU0 {
regulator-name = "vddarm0";
regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1996875>;
regulator-ramp-delay = <25000>;
regulator-always-on;
};
LDO_CAMA0 {
regulator-name = "vddcama0";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3750000>;
regulator-enable-ramp-delay = <100>;
};
};
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mfd/sprd,sc2731.yaml |
# strategy_best.py
# Strategy pattern -- function-based implementation
# selecting best promotion from static list of functions
"""
>>> joe = Customer('John Doe', 0)
>>> ann = Customer('Ann Smith', 1100)
>>> cart = [LineItem('banana', 4, .5),
... LineItem('apple', 10, 1.5),
... LineItem('watermellon', 5, 5.0)]
>>> Order(joe, cart, fidelity_promo)
<Order total: 42.00 due: 42.00>
>>> Order(ann, cart, fidelity_promo)
<Order total: 42.00 due: 39.90>
>>> banana_cart = [LineItem('banana', 30, .5),
... LineItem('apple', 10, 1.5)]
>>> Order(joe, banana_cart, bulk_item_promo)
<Order total: 30.00 due: 28.50>
>>> long_order = [LineItem(str(item_code), 1, 1.0)
... for item_code in range(10)]
>>> Order(joe, long_order, large_order_promo)
<Order total: 10.00 due: 9.30>
>>> Order(joe, cart, large_order_promo)
<Order total: 42.00 due: 42.00>
# BEGIN STRATEGY_BEST_TESTS
>>> Order(joe, long_order, best_promo) # <1>
<Order total: 10.00 due: 9.30>
>>> Order(joe, banana_cart, best_promo) # <2>
<Order total: 30.00 due: 28.50>
>>> Order(ann, cart, best_promo) # <3>
<Order total: 42.00 due: 39.90>
# END STRATEGY_BEST_TESTS
"""
from collections import namedtuple
Customer = namedtuple('Customer', 'name fidelity')
class LineItem:
def __init__(self, product, quantity, price):
self.product = product
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
class Order: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
def fidelity_promo(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# BEGIN STRATEGY_BEST
promos = [fidelity_promo, bulk_item_promo, large_order_promo] # <1>
def best_promo(order): # <2>
"""Select best discount available
"""
return max(promo(order) for promo in promos) # <3>
# END STRATEGY_BEST | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
__version__ = "1.13.2010.21:00"
__author__ = "Marfi"
'''
This is the installer file for airdrop-ng. It first checks for
different dependancies, such as make, svn, etc.
'''
import os, sys
from shutil import rmtree
if os.geteuid() != 0:
print "Installer must be root to run. \nPlease 'su' or 'sudo -i' and try again. \nExiting..."
sys.exit(1)
class checkDepend:
def __init__ (self):
clear = "\n" *100
print clear
print "Checking for dependancies used by the installer..."
self.a = 0
self.deps = ["make", "svn", "tar", "gcc"]
for depends in self.deps:
if (os.path.isfile("/usr/bin/" + depends) or os.path.isfile("/usr/sbin/" + depends) or os.path.isfile("/usr/local/bin/" + depends) or os.path.isfile("/usr/local/sbin/" + depends) or os.path.isfile ("/bin/" + depends) ) == True:
pass
else:
self.a = 1
print depends + " not installed."
if self.a == 0:
print "All dependancies installed! Continuing...\n"
print "#### NOTE: For Ubuntu based distro's, \npython2.6-dev must be installed. Please \nmake sure it is installed before continuing!\n"
else:
print "Please install dependancies. Exiting...\n\n"
exit()
class installAirdrop:
def __init__(self):
print "Welcome to the airdrop-ng installer!\nYou will be prompted for installing\nAirdrop-ng, lorcon, and pylorcon.\n"
yno = raw_input ("Continue with installer? (y/n): ")
if yno == "y":
pass
else:
print "Fine, be that way. Exiting..."
exit()
yno = raw_input ("Install airdrop-ng? (y/n): ")
if yno == "y":
self.install()
else:
print "airdrop-ng not installed. Continuing..."
pass
def install(self):
print "Build exist? "
if os.path.isdir("build"):
rmtree("build") # imported from shutil, or shutil.rmtree()
print "File exists. Cleaning it..."
os.mkdir ("build")
else:
os.mkdir ("build")
print "Didn't exist. Creating..."
# moves everything to build/. This is to keep everything clean,
# and not clutter up the directory.
os.system ("cp airdrop-ng build/ && cp -r lib build/ && cp docs/airdrop-ng.1 build/")
print "Files copied. Now, moving to directory..."
os.chdir ("build")
if os.path.isdir("/usr/lib/airdrop-ng") == True:
rmtree ("/usr/lib/airdrop-ng")
print "Moving airdrop-ng to /usr/bin, lib to \n/usr/lib/airdrop-ng, and installing man pages..."
os.system ("cp airdrop-ng /usr/bin/airdrop-ng && cp -r lib /usr/lib/airdrop-ng && cp airdrop-ng.1 /usr/share/man/man1/")
#os.chdir ("..")
print "airdrop-ng installed! =)"
class installLorcon:
def __init__(self):
yno = raw_input ("Would you like to install lorcon? (y/n): ")
if yno == "y":
print "Running svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old. This may take a while..."
os.system ("svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old")
os.chdir("lorcon-old")
os.system ("./configure && make && make install")
print "Creating symlinks..."
os.system ("ln -s /usr/local/lib/liborcon-1.0.0.so /usr/lib")
os.chdir("..")
else:
print "Lorcon wasn't installed. "
class installPylorcon:
def __init__(self):
yno = raw_input ("Would you like to install pylorcon? (y/n): ")
if yno == "y":
import urllib
urllib.urlretrieve("http://pylorcon.googlecode.com/files/pylorcon-3.tar.bz2", "pylorcon-3.tar.bz2")
os.system ("tar -xvf pylorcon-3.tar.bz2")
os.chdir ("pylorcon")
os.system ("python setup.py install")
os.chdir("..")
# What actually runs the classes
checkDepend()
installAirdrop()
installLorcon()
installPylorcon()
yno = raw_input ("Clean up? (y/n): ")
if yno == "y":
os.chdir("..")
if os.path.isdir("build") == True:
rmtree("build")
print "Operation(s) complete! May the source be with you. =) "
sys.exit() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.EnvelopeRequestData;
import org.apache.kafka.common.message.EnvelopeResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import java.nio.ByteBuffer;
public class EnvelopeRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<EnvelopeRequest> {
private final EnvelopeRequestData data;
public Builder(ByteBuffer requestData,
byte[] serializedPrincipal,
byte[] clientAddress) {
super(ApiKeys.ENVELOPE);
this.data = new EnvelopeRequestData()
.setRequestData(requestData)
.setRequestPrincipal(serializedPrincipal)
.setClientHostAddress(clientAddress);
}
@Override
public EnvelopeRequest build(short version) {
return new EnvelopeRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final EnvelopeRequestData data;
public EnvelopeRequest(EnvelopeRequestData data, short version) {
super(ApiKeys.ENVELOPE, version);
this.data = data;
}
public ByteBuffer requestData() {
return data.requestData();
}
public byte[] clientAddress() {
return data.clientHostAddress();
}
public byte[] requestPrincipal() {
return data.requestPrincipal();
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new EnvelopeResponse(new EnvelopeResponseData()
.setErrorCode(Errors.forException(e).code()));
}
public static EnvelopeRequest parse(Readable readable, short version) {
return new EnvelopeRequest(new EnvelopeRequestData(readable, version), version);
}
@Override
public EnvelopeRequestData data() {
return data;
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/EnvelopeRequest.java |
for a, b, c in b:
pass
else:
1/0
for : keyword.control.flow.python, source.python
: source.python
a : source.python
, : punctuation.separator.element.python, source.python
: source.python
b : source.python
, : punctuation.separator.element.python, source.python
: source.python
c : source.python
: source.python
in : keyword.control.flow.python, source.python
: source.python
b : source.python
: : punctuation.separator.colon.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
else : keyword.control.flow.python, source.python
: : punctuation.separator.colon.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
/ : keyword.operator.arithmetic.python, source.python
0 : constant.numeric.dec.python, source.python | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import sys
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_type = str
text_type = str
int2str = chr
else: # pragma: no cover
string_type = basestring # noqa
text_type = unicode # noqa
int2str = unichr # noqa
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = re.compile(
"^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video|nav)$",
re.IGNORECASE
)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
AMP_SUBSTITUTE = STX+"amp"+ETX
HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = (
('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F') # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # pragma: no cover
# Is the C implementation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError): # pragma: no cover
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False
def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
"""Parses a string representing bool value. If parsing was successful,
returns True or False. If preserve_none=True, returns True, False,
or None. If parsing was not successful, raises ValueError, or, if
fail_on_errors=False, returns None."""
if not isinstance(value, string_type):
if preserve_none and value is None:
return value
return bool(value)
elif preserve_none and value.lower() == 'none':
return None
elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
return True
elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
return False
elif fail_on_errors:
raise ValueError('Cannot parse bool value: %r' % value)
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__(self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks = []
self.tag_counter = 0
self.tag_data = [] # list of dictionaries in the order tags appear
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return HTML_PLACEHOLDER % key
def store_tag(self, tag, attrs, left_index, right_index):
"""Store tag data and return a placeholder."""
self.tag_data.append({'tag': tag, 'attrs': attrs,
'left_index': left_index,
'right_index': right_index})
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
self.tag_counter += 1 # equal to the tag's index in self.tag_data
return placeholder | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
function foo(a, b, c) {
const x = [];
if (a) {
const y = [];
y.push(b);
x.push(<div>{y}</div>);
} else {
x.push(c);
}
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: foo,
params: ['TodoAdd'],
isComponent: 'TodoAdd',
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function foo(a, b, c) {
const $ = _c(8);
let x;
if ($[0] !== a || $[1] !== b || $[2] !== c) {
x = [];
if (a) {
let y;
if ($[4] !== b) {
y = [];
y.push(b);
$[4] = b;
$[5] = y;
} else {
y = $[5];
}
let t0;
if ($[6] !== y) {
t0 = <div>{y}</div>;
$[6] = y;
$[7] = t0;
} else {
t0 = $[7];
}
x.push(t0);
} else {
x.push(c);
}
$[0] = a;
$[1] = b;
$[2] = c;
$[3] = x;
} else {
x = $[3];
}
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: foo,
params: ["TodoAdd"],
isComponent: "TodoAdd",
};
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/reactive-scopes-if.expect.md |
---
layout: default
---
<section class="docs">
<div class="grid">
{% include docs_contents_mobile.html -%}
<div class="unit four-fifths">
<article>
{% include improve_doc_link.html %}
<h1>{{ page.title }}</h1>
{{ content }}
</article>
</div>
{% include docs_contents.html -%}
<div class="clear"></div>
</div>
</section> | html | github | https://github.com/jekyll/jekyll | docs/_layouts/docs.html |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
if context is None:
context = {}
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
voucher_res = {}
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id <> statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
type = 'general'
ttype = amount < 0 and 'payment' or 'receipt'
sign = 1
if line.journal_id.type in ('sale', 'sale_refund'):
type = 'customer'
ttype = 'receipt'
elif line.journal_id.type in ('purchase', 'purhcase_refund'):
type = 'supplier'
ttype = 'payment'
sign = -1
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=sign*amount, currency_id= statement.currency.id, ttype=ttype, date=line_date, context=context)
voucher_res = { 'type': ttype,
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date,
'amount': sign*amount,
'payment_rate': result['value']['payment_rate'],
'payment_rate_currency_id': result['value']['payment_rate_currency_id'],
'period_id':statement.period_id.id}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'type': type,
'partner_id': line.partner_id.id,
'account_id': line.account_id.id,
'statement_id': statement_id,
'ref': line.ref,
'voucher_id': voucher_id,
'date': time.strftime('%Y-%m-%d'),
}, context=context)
return {'type': 'ir.actions.act_window_close'}
account_statement_from_invoice_lines()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test
import afip
import invoice
import config
import partner
import account
import country
import report
import currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
name: Static Typing
description: Report an issue with the NumPy typing hints.
title: "TYP: <Please write a comprehensive title after the 'TYP: ' prefix>"
labels: [41 - Static typing]
body:
- type: markdown
attributes:
value: >
Thank you for taking the time to report this issue.
Please make sure that this issue hasn't already been reported before.
- type: textarea
attributes:
label: "Describe the issue:"
validations:
required: true
- type: textarea
attributes:
label: "Reproduce the code example:"
description: >
A short code example that reproduces the error in your type-checker. It
should be self-contained, i.e., can be run as-is via e.g.
`mypy myproblem.py` or `pyright myproblem.py`.
placeholder: |
import numpy as np
import numpy.typing as npt
<< your code here >>
render: python
validations:
required: true
- type: textarea
attributes:
label: "Error message:"
description: >
Please include all relevant error messages from your type-checker or IDE.
render: shell
- type: textarea
attributes:
label: "Python and NumPy Versions:"
description: >
Output from `import sys, numpy; print(numpy.__version__); print(sys.version)`.
validations:
required: true
- type: textarea
attributes:
label: "Type-checker version and settings:"
description: >
Please include the exact version of the type-checker you are using.
Popular (static) type checkers include Mypy, Pyright / Pylance, Pytype,
Pyre, PyCharm, etc.
Also include the full CLI command used to run the type-checker, and
all of the relevant configuration options.
validations:
required: true
- type: textarea
attributes:
label: "Additional typing packages."
description: |
If you are using `typing-extensions` or typing-stub packages, please
list their versions here.
validations:
required: false | unknown | github | https://github.com/numpy/numpy | .github/ISSUE_TEMPLATE/typing.yml |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group
short_description: create or delete log_group in CloudWatchLogs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html)
description:
- Create or delete log_group in CloudWatchLogs.
version_added: "2.5"
author:
- Willian Ricardo(@willricardo) <willricardo@gmail.com>
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- Whether the rule is present, absent or get
choices: ["present", "absent"]
default: present
required: false
log_group_name:
description:
- The name of the log group.
required: true
kms_key_id:
description:
- The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
required: false
tags:
description:
- The key-value pairs to use for the tags.
required: false
retention:
description:
- "The number of days to retain the log events in the specified log group.
Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
required: false
overwrite:
description:
- Whether an existing log group should be overwritten on create.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group:
log_group_name: test-log-group
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
kms_key_id: arn:aws:kms:region:account-id:key/key-id
- cloudwatchlogs_log_group:
state: absent
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objetcs representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: string
creation_time:
description: The creation time of the log group.
returned: always
type: integer
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: integer
metric_filter_count:
description: The number of metric filters.
returned: always
type: integer
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: string
stored_bytes:
description: The number of bytes stored.
returned: always
type: string
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: string
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
request = {'logGroupName': log_group_name}
if kms_key_id:
request['kmsKeyId'] = kms_key_id
if tags:
request['tags'] = tags
try:
client.create_log_group(**request)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc())
if retention:
input_retention_policy(client=client,
log_group_name=log_group_name,
retention=retention, module=module)
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
return i
module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
def input_retention_policy(client, log_group_name, retention, module):
try:
permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
if retention in permited_values:
response = client.put_retention_policy(logGroupName=log_group_name,
retentionInDays=retention)
else:
delete_log_group(client=client, log_group_name=log_group_name, module=module)
module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_log_group(client, log_group_name, module):
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
try:
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
client.delete_log_group(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def describe_log_group(client, log_group_name, module):
try:
desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'],
default='present'),
kms_key_id=dict(required=False, type='str'),
tags=dict(required=False, type='dict'),
retention=dict(required=False, type='int'),
overwrite=dict(required=False, type='bool', default=False)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
changed = False
# Determine if the log group exists
desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = {}
for i in desc_log_group.get('logGroups', []):
if module.params['log_group_name'] == i['logGroupName']:
found_log_group = i
break
if state == 'present':
if found_log_group and module.params['overwrite'] is True:
changed = True
delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
elif not found_log_group:
changed = True
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
elif state == 'absent':
if found_log_group:
changed = True
delete_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
module.exit_json(changed=changed)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/commentsCommentParsing.ts] ////
//// [commentsCommentParsing.ts]
/// This is simple /// comments
function simple() {
}
simple();
/// multiLine /// Comments
/// This is example of multiline /// comments
/// Another multiLine
function multiLine() {
}
multiLine();
/** this is eg of single line jsdoc style comment */
function jsDocSingleLine() {
}
jsDocSingleLine();
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
function jsDocMultiLine() {
}
jsDocMultiLine();
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
/** Shoul mege this line as well
* and this too*/ /** Another this one too*/
function jsDocMultiLineMerge() {
}
jsDocMultiLineMerge();
/// Triple slash comment
/** jsdoc comment */
function jsDocMixedComments1() {
}
jsDocMixedComments1();
/// Triple slash comment
/** jsdoc comment */ /*** another jsDocComment*/
function jsDocMixedComments2() {
}
jsDocMixedComments2();
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
function jsDocMixedComments3() {
}
jsDocMixedComments3();
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
/// Triple slash comment 2
function jsDocMixedComments4() {
}
jsDocMixedComments4();
/// Triple slash comment 1
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
/// Triple slash comment 2
function jsDocMixedComments5() {
}
jsDocMixedComments5();
/*** another jsDocComment*/
/// Triple slash comment 1
/// Triple slash comment
/// Triple slash comment 2
/** jsdoc comment */
function jsDocMixedComments6() {
}
jsDocMixedComments6();
// This shoulnot be help comment
function noHelpComment1() {
}
noHelpComment1();
/* This shoulnot be help comment */
function noHelpComment2() {
}
noHelpComment2();
function noHelpComment3() {
}
noHelpComment3();
/** Adds two integers and returns the result
* @param {number} a first number
* @param b second number
*/
function sum(a: number, b: number) {
return a + b;
}
sum(10, 20);
/** This is multiplication function*/
/** @param */
/** @param a first number*/
/** @param b */
/** @param c {
@param d @anotherTag*/
/** @param e LastParam @anotherTag*/
function multiply(a: number, b: number, c?: number, d?, e?) {
}
/** fn f1 with number
* @param { string} b about b
*/
function f1(a: number);
function f1(b: string);
/**@param opt optional parameter*/
function f1(aOrb, opt?) {
return aOrb;
}
/** This is subtract function
@param { a
*@param { number | } b this is about b
@param { { () => string; } } c this is optional param c
@param { { () => string; } d this is optional param d
@param { { () => string; } } e this is optional param e
@param { { { () => string; } } f this is optional param f
*/
function subtract(a: number, b: number, c?: () => string, d?: () => string, e?: () => string, f?: () => string) {
}
/** this is square function
@paramTag { number } a this is input number of paramTag
@param { number } a this is input number
@returnType { number } it is return type
*/
function square(a: number) {
return a * a;
}
/** this is divide function
@param { number} a this is a
@paramTag { number } g this is optional param g
@param { number} b this is b
*/
function divide(a: number, b: number) {
}
/** this is jsdoc style function with param tag as well as inline parameter help
*@param a it is first parameter
*@param c it is third parameter
*/
function jsDocParamTest(/** this is inline comment for a */a: number, /** this is inline comment for b*/ b: number, c: number, d: number) {
return a + b + c + d;
}
/**/
class NoQuickInfoClass {
}
//// [commentsCommentParsing.js]
"use strict";
/// This is simple /// comments
function simple() {
}
simple();
/// multiLine /// Comments
/// This is example of multiline /// comments
/// Another multiLine
function multiLine() {
}
multiLine();
/** this is eg of single line jsdoc style comment */
function jsDocSingleLine() {
}
jsDocSingleLine();
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
function jsDocMultiLine() {
}
jsDocMultiLine();
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
/** Shoul mege this line as well
* and this too*/ /** Another this one too*/
function jsDocMultiLineMerge() {
}
jsDocMultiLineMerge();
/// Triple slash comment
/** jsdoc comment */
function jsDocMixedComments1() {
}
jsDocMixedComments1();
/// Triple slash comment
/** jsdoc comment */ /*** another jsDocComment*/
function jsDocMixedComments2() {
}
jsDocMixedComments2();
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
function jsDocMixedComments3() {
}
jsDocMixedComments3();
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
/// Triple slash comment 2
function jsDocMixedComments4() {
}
jsDocMixedComments4();
/// Triple slash comment 1
/** jsdoc comment */ /*** another jsDocComment*/
/// Triple slash comment
/// Triple slash comment 2
function jsDocMixedComments5() {
}
jsDocMixedComments5();
/*** another jsDocComment*/
/// Triple slash comment 1
/// Triple slash comment
/// Triple slash comment 2
/** jsdoc comment */
function jsDocMixedComments6() {
}
jsDocMixedComments6();
// This shoulnot be help comment
function noHelpComment1() {
}
noHelpComment1();
/* This shoulnot be help comment */
function noHelpComment2() {
}
noHelpComment2();
function noHelpComment3() {
}
noHelpComment3();
/** Adds two integers and returns the result
* @param {number} a first number
* @param b second number
*/
function sum(a, b) {
return a + b;
}
sum(10, 20);
/** This is multiplication function*/
/** @param */
/** @param a first number*/
/** @param b */
/** @param c {
@param d @anotherTag*/
/** @param e LastParam @anotherTag*/
function multiply(a, b, c, d, e) {
}
/**@param opt optional parameter*/
function f1(aOrb, opt) {
return aOrb;
}
/** This is subtract function
@param { a
*@param { number | } b this is about b
@param { { () => string; } } c this is optional param c
@param { { () => string; } d this is optional param d
@param { { () => string; } } e this is optional param e
@param { { { () => string; } } f this is optional param f
*/
function subtract(a, b, c, d, e, f) {
}
/** this is square function
@paramTag { number } a this is input number of paramTag
@param { number } a this is input number
@returnType { number } it is return type
*/
function square(a) {
return a * a;
}
/** this is divide function
@param { number} a this is a
@paramTag { number } g this is optional param g
@param { number} b this is b
*/
function divide(a, b) {
}
/** this is jsdoc style function with param tag as well as inline parameter help
*@param a it is first parameter
*@param c it is third parameter
*/
function jsDocParamTest(/** this is inline comment for a */ a, /** this is inline comment for b*/ b, c, d) {
return a + b + c + d;
}
/**/
class NoQuickInfoClass {
}
//// [commentsCommentParsing.d.ts]
declare function simple(): void;
declare function multiLine(): void;
/** this is eg of single line jsdoc style comment */
declare function jsDocSingleLine(): void;
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
declare function jsDocMultiLine(): void;
/** this is multiple line jsdoc stule comment
*New line1
*New Line2*/
/** Shoul mege this line as well
* and this too*/ /** Another this one too*/
declare function jsDocMultiLineMerge(): void;
/** jsdoc comment */
declare function jsDocMixedComments1(): void;
/** jsdoc comment */ /*** another jsDocComment*/
declare function jsDocMixedComments2(): void;
/** jsdoc comment */ /*** another jsDocComment*/
declare function jsDocMixedComments3(): void;
/** jsdoc comment */ /*** another jsDocComment*/
declare function jsDocMixedComments4(): void;
/** jsdoc comment */ /*** another jsDocComment*/
declare function jsDocMixedComments5(): void;
/*** another jsDocComment*/
/** jsdoc comment */
declare function jsDocMixedComments6(): void;
declare function noHelpComment1(): void;
declare function noHelpComment2(): void;
declare function noHelpComment3(): void;
/** Adds two integers and returns the result
* @param {number} a first number
* @param b second number
*/
declare function sum(a: number, b: number): number;
/** This is multiplication function*/
/** @param */
/** @param a first number*/
/** @param b */
/** @param c {
@param d @anotherTag*/
/** @param e LastParam @anotherTag*/
declare function multiply(a: number, b: number, c?: number, d?: any, e?: any): void;
/** fn f1 with number
* @param { string} b about b
*/
declare function f1(a: number): any;
declare function f1(b: string): any;
/** This is subtract function
@param { a
*@param { number | } b this is about b
@param { { () => string; } } c this is optional param c
@param { { () => string; } d this is optional param d
@param { { () => string; } } e this is optional param e
@param { { { () => string; } } f this is optional param f
*/
declare function subtract(a: number, b: number, c?: () => string, d?: () => string, e?: () => string, f?: () => string): void;
/** this is square function
@paramTag { number } a this is input number of paramTag
@param { number } a this is input number
@returnType { number } it is return type
*/
declare function square(a: number): number;
/** this is divide function
@param { number} a this is a
@paramTag { number } g this is optional param g
@param { number} b this is b
*/
declare function divide(a: number, b: number): void;
/** this is jsdoc style function with param tag as well as inline parameter help
*@param a it is first parameter
*@param c it is third parameter
*/
declare function jsDocParamTest(/** this is inline comment for a */ a: number, /** this is inline comment for b*/ b: number, c: number, d: number): number;
declare class NoQuickInfoClass {
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/commentsCommentParsing(target=es2015).js |
"""
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_uint, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import (
bool_output,
const_string_output,
double_output,
geom_output,
int64_output,
int_output,
srs_output,
void_output,
voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
GDAL_OF_READONLY = 0x00
GDAL_OF_UPDATE = 0x01
GDAL_OF_ALL = 0x00
GDAL_OF_RASTER = 0x02
GDAL_OF_VECTOR = 0x04
# Driver Routines
register_all = void_output(lgdal.GDALAllRegister, [], errcheck=False)
cleanup_all = void_output(lgdal.GDALDestroyDriverManager, [], errcheck=False)
get_driver = voidptr_output(lgdal.GDALGetDriver, [c_int])
get_driver_by_name = voidptr_output(
lgdal.GDALGetDriverByName, [c_char_p], errcheck=False
)
get_driver_count = int_output(lgdal.GDALGetDriverCount, [])
get_driver_description = const_string_output(lgdal.GDALGetDescription, [c_void_p])
# DataSource
open_ds = voidptr_output(
lgdal.GDALOpenEx,
[c_char_p, c_uint, POINTER(c_char_p), POINTER(c_char_p), POINTER(c_char_p)],
)
destroy_ds = void_output(lgdal.GDALClose, [c_void_p], errcheck=False)
get_ds_name = const_string_output(lgdal.GDALGetDescription, [c_void_p])
get_dataset_driver = voidptr_output(lgdal.GDALGetDatasetDriver, [c_void_p])
get_layer = voidptr_output(lgdal.GDALDatasetGetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(
lgdal.GDALDatasetGetLayerByName, [c_void_p, c_char_p]
)
get_layer_count = int_output(lgdal.GDALDatasetGetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(
lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False
)
set_spatial_filter_rect = void_output(
lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double],
errcheck=False,
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(
lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p],
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_integer64 = int64_output(
lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int]
)
is_field_set = bool_output(lgdal.OGR_F_IsFieldSetAndNotNull, [c_void_p, c_int])
get_field_as_string = const_string_output(
lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int]
)
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p]) | python | github | https://github.com/django/django | django/contrib/gis/gdal/prototypes/ds.py |
//// [tests/cases/conformance/async/es6/asyncArrowFunction/asyncArrowFunction6_es6.ts] ////
//// [asyncArrowFunction6_es6.ts]
var foo = async (a = await): Promise<void> => {
}
//// [asyncArrowFunction6_es6.js]
"use strict";
var foo = (...args_1) => __awaiter(void 0, [...args_1], void 0, function* (a = yield ) {
}); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/asyncArrowFunction6_es6.js |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
VALID_FFT_RANKS = (1, 2, 3)
class FFTOpsTest(tf.test.TestCase):
def _Compare(self, x, rank):
if tf.test.is_gpu_available():
# GPU/Forward
self.assertAllClose(
self._npFFT(x, rank),
self._tfFFT(x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
# GPU/Backward
self.assertAllClose(
self._npIFFT(x, rank),
self._tfIFFT(x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
def _checkGrad(self, func, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
# func is a forward or inverse FFT function (batched or unbatched)
z = func(tf.complex(inx, iny))
# loss = sum(|z|^2)
loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
[inx, iny],
[list(x.shape), list(y.shape)],
loss,
[1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
def _npFFT(self, x, rank):
if rank == 1:
return np.fft.fft2(x, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank):
if rank == 1:
return np.fft.ifft2(x, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFT(self, x, rank, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
return self._tfFFTForRank(rank)(x).eval()
def _tfIFFT(self, x, rank, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
return self._tfIFFTForRank(rank)(x).eval()
def _tfFFTForRank(self, rank):
if rank == 1:
return tf.fft
elif rank == 2:
return tf.fft2d
elif rank == 3:
return tf.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return tf.ifft
elif rank == 2:
return tf.ifft2d
elif rank == 3:
return tf.ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
if tf.test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(
np.arange(np.power(4, dims)), 10).reshape((4,) * dims), rank)
def testRandom(self):
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(gen((4,) * dims), rank)
def testError(self):
if tf.test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError,
"Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError,
"Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
if tf.test.is_gpu_available():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np.float32)
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
def testGrad_Random(self):
if tf.test.is_gpu_available():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
if __name__ == "__main__":
tf.test.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# GTK3 widget to implement statuses in Turpial
import re
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import GdkPixbuf
from turpial.ui.lang import i18n
from turpial.ui.gtk.common import *
from turpial.ui.gtk.statusmenu import StatusMenu
#from turpial.ui.gtk.imagebutton import ImageButton
from turpial.ui.gtk.markuplabel import MarkupLabel
class StatusWidget(Gtk.EventBox):
def __init__(self, base, status):
Gtk.EventBox.__init__(self)
self.base = base
self.status = status
self.set_margin_bottom(OUTTER_BOTTOM_MARGIN)
self.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(65535, 65535, 65535))
# Variables to control work in progress over the status
self.in_progress = {
StatusProgress.FAVING: False,
StatusProgress.UNFAVING: False,
StatusProgress.RETWEETING: False,
StatusProgress.UNRETWEETING: False,
StatusProgress.DELETING: False,
}
self.avatar = Gtk.Image()
self.avatar.set_margin_right(AVATAR_MARGIN)
self.avatar_box = Gtk.Alignment()
self.avatar_box.add(self.avatar)
self.avatar_box.set(0.5, 0, -1, -1)
self.favorited_mark = Gtk.Image()
self.protected_mark = Gtk.Image()
self.verified_mark = Gtk.Image()
self.reposted_mark = Gtk.Image()
self.repeated_mark = Gtk.Image()
self.username = MarkupLabel(act_as_link=True)
self.username.set_ellipsize(Pango.EllipsizeMode.END)
self.status_text = MarkupLabel()
self.footer = MarkupLabel()
# Setting user image
self.avatar.set_from_pixbuf(self.base.load_image('unknown.png', True))
# Building the status style
user = '<span size="9000" foreground="%s"><b>%s</b></span>' % (
self.base.get_color_scheme('links'), status.username
)
self.username.set_markup(user)
text = status.text.replace('>', '>')
text = text.replace('<', '<')
pango_text = '<span size="9000">%s</span>' % escape_text_for_markup(text)
pango_text = self.__highlight_urls(status, pango_text)
pango_text = self.__highlight_hashtags(status, pango_text)
pango_text = self.__highlight_groups(status, pango_text)
pango_text = self.__highlight_mentions(status, pango_text)
self.status_text.set_markup(pango_text)
footer = '<span size="small" foreground="#999">%s' % status.datetime
if status.source:
footer += ' %s %s' % (_('from'), status.source.name)
if status.in_reply_to_user:
footer += ' %s %s' % (_('in reply to'), status.in_reply_to_user)
if status.reposted_by:
footer += '\n%s %s' % (_('Retweeted by'), status.reposted_by)
footer += '</span>'
self.footer.set_markup(footer)
starbox = Gtk.HBox()
starbox.pack_start(self.repeated_mark, False, False, 2)
starbox.pack_start(self.favorited_mark, False, False, 2)
staralign = Gtk.Alignment()
staralign.set(1, -1, -1, -1)
staralign.add(starbox)
header = Gtk.HBox()
header.pack_start(self.reposted_mark, False, False, 2)
header.pack_start(self.username, False, False, 2)
header.pack_start(self.verified_mark, False, False, 2)
header.pack_start(self.protected_mark, False, False, 0)
header.pack_start(staralign, True, True, 0)
content = Gtk.VBox()
content.pack_start(header, False, False, 0)
content.pack_start(self.status_text, True, True, 0)
content.pack_start(self.footer, False, False, 0)
box = Gtk.HBox()
box.pack_start(self.avatar_box, False, False, 0)
box.pack_start(content, True, True, 0)
bbox = Gtk.VBox()
bbox.pack_start(box, True, True, 0)
self.add(bbox)
self.show_all()
# After showing all widgets we set the marks
self.set_favorited_mark(status.favorited)
self.set_protected_mark(status.protected)
self.set_verified_mark(status.verified)
self.set_repeated_mark(status.repeated)
self.set_reposted_mark(status.reposted_by)
self.connect('button-release-event', self.__on_click)
self.click_url_handler = self.status_text.connect('activate-link', self.__open_url)
self.click_avatar_handler = self.avatar_box.connect('button-press-event', self.__on_click_avatar)
self.click_username_handler = self.username.connect('button-release-event', self.__on_click_username)
self.base.fetch_status_avatar(status, self.update_avatar)
def __on_click_username(self, widget, event=None):
print 'clicked username', widget, event
def __on_click(self, widget, event=None, data=None):
# Capture clicks for avatar
if event.x <= 48 and event.y <= 48 and event.button == 1:
self.__on_click_avatar()
return True
print event.x, event.y
if event.button != 3:
return False
self.menu = StatusMenu(self.base, self.status, self.in_progress)
self.menu.show_all()
self.menu.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def __on_click_avatar(self):
self.base.show_user_avatar(self.status.account_id, self.status.username)
def __highlight_urls(self, status, text):
for url in status.entities['urls']:
if url.url == None:
url.url = url.search_for
cad = "<a href='%s'>%s</a>" % (escape_text_for_markup(url.url), escape_text_for_markup(url.display_text))
text = text.replace(url.search_for, cad)
return text
def __highlight_hashtags(self, status, text):
for h in status.entities['hashtags']:
url = "%s-search:%%23%s" % (self.status.account_id, h.display_text[1:])
cad = '<a href="hashtags:%s">%s</a>' % (url, h.display_text)
text = text.replace(h.search_for, cad)
return text
def __highlight_groups(self, status, text):
for h in status.entities['groups']:
cad = '<a href="groups:%s">%s</a>' % (h.url, h.display_text)
text = text.replace(h.search_for, cad)
return text
def __highlight_mentions(self, status, text):
for h in status.entities['mentions']:
args = "%s:%s" % (status.account_id, h.display_text[1:])
cad = '<a href="profile:%s">%s</a>' % (args, h.display_text)
pattern = re.compile(h.search_for, re.IGNORECASE)
text = pattern.sub(cad, text)
return text
def __open_url(self, widget, url):
if url.startswith('http'):
self.base.open_url(url)
elif url.startswith('hashtag'):
column_id = url.replace('hashtags:', '')
self.base.save_column(column_id)
elif url.startswith('groups'):
print "Opening groups"
elif url.startswith('profile'):
url = url.replace('profile:', '')
account_id = url.split(':')[0]
username = url.split(':')[1]
self.base.show_user_profile(account_id, username)
return True
def __del__(self):
print 'garbage collected'
def release(self):
self.avatar_box.disconnect(self.click_avatar_handler)
self.username.disconnect(self.click_username_handler)
self.status_text.disconnect(self.click_url_handler)
def update(self, status):
self.status = status
# render again
def update_avatar(self, response):
if response.code == 0:
pix = GdkPixbuf.Pixbuf.new_from_file_at_scale(response.items, 48, 48, True)
self.avatar.set_from_pixbuf(pix)
del pix
def set_favorited_mark(self, value):
if value:
self.favorited_mark.set_from_pixbuf(self.base.load_image('mark-favorite.png', True))
else:
self.favorited_mark.set_from_pixbuf(None)
self.status.favorited = value
def set_repeated_mark(self, value):
if value:
self.repeated_mark.set_from_pixbuf(self.base.load_image('mark-repeated.png', True))
else:
self.repeated_mark.set_from_pixbuf(None)
self.status.repeated = value
def set_protected_mark(self, value):
if value:
self.protected_mark.set_from_pixbuf(self.base.load_image('mark-protected.png', True))
else:
self.protected_mark.set_from_pixbuf(None)
def set_verified_mark(self, value):
if value:
self.verified_mark.set_from_pixbuf(self.base.load_image('mark-verified.png', True))
else:
self.verified_mark.set_from_pixbuf(None)
def set_reposted_mark(self, value):
if value:
self.reposted_mark.set_from_pixbuf(self.base.load_image('mark-reposted.png', True))
else:
self.reposted_mark.set_from_pixbuf(None) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test that checks preprocessing of files.
Tests preprocessing by adding having the preprocessor
provide the actual rctext data.
'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import grit.tool.preprocess_interface
from grit.tool import rc2grd
class PreProcessingUnittest(unittest.TestCase):
def testPreProcessing(self):
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.pre_process = 'grit.tool.preprocess_unittest.DummyPreProcessor'
result = tool.Process('', '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'DUMMY_STRING_1')
class DummyPreProcessor(grit.tool.preprocess_interface.PreProcessor):
def Process(self, rctext, rcpath):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
return rctext
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_group_info
short_description: Gather information about ec2 security groups in AWS.
description:
- Gather information about ec2 security groups in AWS.
- This module was called C(ec2_group_facts) before Ansible 2.9. The usage did not change.
version_added: "2.3"
requirements: [ boto3 ]
author:
- Henrique Rodrigues (@Sodki)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores (_)
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
type: dict
notes:
- By default, the module will return all security groups. To limit results use the appropriate filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all security groups
- ec2_group_info:
# Gather information about all security groups in a specific VPC
- ec2_group_info:
filters:
vpc-id: vpc-12345678
# Gather information about all security groups in a specific VPC
- ec2_group_info:
filters:
vpc-id: vpc-12345678
# Gather information about a security group
- ec2_group_info:
filters:
group-name: example-1
# Gather information about a security group by id
- ec2_group_info:
filters:
group-id: sg-12345678
# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
- ec2_group_info:
filters:
group_id: sg-12345678
vpc-id: vpc-12345678
# Gather information about various security groups
- ec2_group_info:
filters:
group-name:
- example-1
- example-2
- example-3
# Gather information about any security group with a tag key Name and value Example.
# The quotes around 'tag:name' are important because of the colon in the value
- ec2_group_info:
filters:
"tag:Name": Example
'''
RETURN = '''
security_groups:
description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
type: list
returned: always
sample:
'''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, boto3_conn, HAS_BOTO3, get_aws_connection_info,
boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_filter_list,
camel_dict_to_snake_dict)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'ec2_group_facts':
module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(
module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params
)
else:
module.fail_json(msg="region must be specified")
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
sanitized_filters = module.params.get("filters")
for key in list(sanitized_filters):
if not key.startswith("tag:"):
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
try:
security_groups = connection.describe_security_groups(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc())
snaked_security_groups = []
for security_group in security_groups['SecurityGroups']:
# Modify boto3 tags list to be ansible friendly dict
# but don't camel case tags
security_group = camel_dict_to_snake_dict(security_group)
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
snaked_security_groups.append(security_group)
module.exit_json(security_groups=snaked_security_groups)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_clip import *
from .feature_extraction_clip import *
from .image_processing_clip import *
from .image_processing_clip_fast import *
from .modeling_clip import *
from .processing_clip import *
from .tokenization_clip import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) | python | github | https://github.com/huggingface/transformers | src/transformers/models/clip/__init__.py |
"""SCons.Tool.mwld
Tool-specific initialization for the Metrowerks CodeWarrior linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwld.py 2013/03/03 09:48:35 garyo"
import SCons.Tool
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
def exists(env):
import SCons.Tool.mwcc
return SCons.Tool.mwcc.set_vars(env)
def shlib_generator(target, source, env, for_signature):
cmd = ['$SHLINK', '$SHLINKFLAGS', '-shared']
no_import_lib = env.get('no_import_lib', 0)
if no_import_lib: cmd.extend('-noimplib')
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.extend(['-implib', implib.get_string(for_signature)])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX"))
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Append an import library to the list of targets.
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX'))
return target, source
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/binfmt_script.c
*
* Copyright (C) 1996 Martin von Löwis
* original #!-checking implemented by tytso.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/binfmts.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
static inline const char *next_non_spacetab(const char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
static inline const char *next_terminator(const char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
return first;
return NULL;
}
static int load_script(struct linux_binprm *bprm)
{
const char *i_name, *i_sep, *i_arg, *i_end, *buf_end;
struct file *file;
int retval;
/* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
/*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
* (though the buffer will have trailing NUL padding when the
* file size was smaller than the buffer size).
*
* We do not want to exec a truncated interpreter path, so either
* we find a newline (which indicates nothing is truncated), or
* we find a space/tab/NUL after the interpreter path (which
* itself may be preceded by spaces/tabs). Truncating the
* arguments is fine: the interpreter can re-read the script to
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
i_end = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
if (!i_end) {
i_end = next_non_spacetab(bprm->buf + 2, buf_end);
if (!i_end)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
if (!next_terminator(i_end, buf_end))
return -ENOEXEC;
i_end = buf_end;
}
/* Trim any trailing spaces/tabs from i_end */
while (spacetab(i_end[-1]))
i_end--;
/* Skip over leading spaces/tabs */
i_name = next_non_spacetab(bprm->buf+2, i_end);
if (!i_name || (i_name == i_end))
return -ENOEXEC; /* No interpreter name found */
/* Is there an optional argument? */
i_arg = NULL;
i_sep = next_terminator(i_name, i_end);
if (i_sep && (*i_sep != '\0'))
i_arg = next_non_spacetab(i_sep, i_end);
/*
* If the script filename will be inaccessible after exec, typically
* because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
* up now (on the assumption that the interpreter will want to load
* this file).
*/
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
* Splice in (1) the interpreter's name for argv[0]
* (2) (optional) argument to interpreter
* (3) filename of shell script (replace argv[0])
*
* This is done in reverse order, because of how the
* user environment and arguments are stored.
*/
retval = remove_arg_zero(bprm);
if (retval)
return retval;
retval = copy_string_kernel(bprm->interp, bprm);
if (retval < 0)
return retval;
bprm->argc++;
*((char *)i_end) = '\0';
if (i_arg) {
*((char *)i_sep) = '\0';
retval = copy_string_kernel(i_arg, bprm);
if (retval < 0)
return retval;
bprm->argc++;
}
retval = copy_string_kernel(i_name, bprm);
if (retval)
return retval;
bprm->argc++;
retval = bprm_change_interp(i_name, bprm);
if (retval < 0)
return retval;
/*
* OK, now restart the process with the interpreter's dentry.
*/
file = open_exec(i_name);
if (IS_ERR(file))
return PTR_ERR(file);
bprm->interpreter = file;
return 0;
}
static struct linux_binfmt script_format = {
.module = THIS_MODULE,
.load_binary = load_script,
};
static int __init init_script_binfmt(void)
{
register_binfmt(&script_format);
return 0;
}
static void __exit exit_script_binfmt(void)
{
unregister_binfmt(&script_format);
}
core_initcall(init_script_binfmt);
module_exit(exit_script_binfmt);
MODULE_DESCRIPTION("Kernel support for scripts starting with #!");
MODULE_LICENSE("GPL"); | c | github | https://github.com/torvalds/linux | fs/binfmt_script.c |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Scientific methods for the Volume datatypes.
.. moduleauthor:: Stuart A. Knock <Stuart@tvb.invalid>
"""
import tvb.datatypes.volumes_data as volumes_data
class VolumeScientific(volumes_data.VolumeData):
""" This class exists to add scientific methods to VolumeData. """
__tablename__ = None
def _find_summary_info(self):
"""
Gather scientifically interesting summary information from an instance
of this datatype.
"""
summary = {"Volume type": self.__class__.__name__,
"Origin": self.origin,
"Voxel size": self.voxel_size,
"Units": self.voxel_unit}
return summary
class ParcellationMaskScientific(volumes_data.ParcellationMaskData,
VolumeScientific):
""" This class exists to add scientific methods to ParcellationMaskData. """
def _find_summary_info(self):
""" Extend the base class's summary dictionary. """
summary = super(ParcellationMaskScientific, self)._find_summary_info()
summary["Volume shape"] = self.get_data_shape('data')
summary["Number of regions"] = self.get_data_shape('region_labels')[0]
return summary
class StructuralMRIScientific(volumes_data.StructuralMRIData,
VolumeScientific):
""" This class exists to add scientific methods to StructuralMRIData. """
pass | unknown | codeparrot/codeparrot-clean | ||
#ifndef Py_INTERNAL_AUDIT_H
#define Py_INTERNAL_AUDIT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/* Runtime audit hook state */
typedef struct _Py_AuditHookEntry {
struct _Py_AuditHookEntry *next;
Py_AuditHookFunction hookCFunction;
void *userData;
} _Py_AuditHookEntry;
extern int _PySys_Audit(
PyThreadState *tstate,
const char *event,
const char *argFormat,
...);
// _PySys_ClearAuditHooks() must not be exported: use extern rather than
// PyAPI_FUNC(). We want minimal exposure of this function.
extern void _PySys_ClearAuditHooks(PyThreadState *tstate);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AUDIT_H */ | c | github | https://github.com/python/cpython | Include/internal/pycore_audit.h |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'active': False,
'author': 'ADHOC SA.',
'category': u'base.module_category_knowledge_management',
'demo_xml': [],
'depends': [u'product'],
'description': u'Product Customer Price',
'installable': True,
'license': 'AGPL-3',
'name': u'Product Customer Price',
'test': [],
'data': [
'view/product_view.xml',
'security/ir.model.access.csv',
],
'version': 'No version',
'website': ''}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import base64
import io
import json
import os
import platform
import signal
import subprocess
import tempfile
import zipfile
from abc import ABCMeta, abstractmethod
import mozinfo
import mozleak
import mozversion
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile, Preferences
from mozrunner import FirefoxRunner
from mozrunner.utils import test_environment, get_stack_fixer_function
from mozcrash import mozcrash
from .base import (Browser,
ExecutorBrowser,
NullBrowser,
OutputHandler,
OutputHandlerState,
browser_command,
cmd_arg,
get_free_port,
require_arg)
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executormarionette import (MarionetteTestharnessExecutor, # noqa: F401
MarionetteRefTestExecutor, # noqa: F401
MarionettePrintRefTestExecutor, # noqa: F401
MarionetteWdspecExecutor, # noqa: F401
MarionetteCrashtestExecutor) # noqa: F401
from ..webdriver_server import WebDriverServer
here = os.path.dirname(__file__)
__wptrunner__ = {"product": "firefox",
"check_args": "check_args",
"browser": {None: "FirefoxBrowser",
"wdspec": "FirefoxWdSpecBrowser"},
"executor": {"crashtest": "MarionetteCrashtestExecutor",
"testharness": "MarionetteTestharnessExecutor",
"reftest": "MarionetteRefTestExecutor",
"print-reftest": "MarionettePrintRefTestExecutor",
"wdspec": "MarionetteWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties",
"timeout_multiplier": "get_timeout_multiplier"}
def get_timeout_multiplier(test_type, run_info_data, **kwargs):
if kwargs["timeout_multiplier"] is not None:
return kwargs["timeout_multiplier"]
if test_type == "reftest":
if run_info_data["debug"] or run_info_data.get("asan") or run_info_data.get("tsan"):
return 4
else:
return 2
elif run_info_data["debug"] or run_info_data.get("asan") or run_info_data.get("tsan"):
if run_info_data.get("ccov"):
return 4
else:
return 3
elif run_info_data["os"] == "android":
return 4
# https://bugzilla.mozilla.org/show_bug.cgi?id=1538725
elif run_info_data["os"] == "win" and run_info_data["processor"] == "aarch64":
return 4
elif run_info_data.get("ccov"):
return 2
return 1
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"prefs_root": kwargs["prefs_root"],
"extra_prefs": kwargs["extra_prefs"],
"test_type": test_type,
"debug_info": kwargs["debug_info"],
"symbols_path": kwargs["symbols_path"],
"stackwalk_binary": kwargs["stackwalk_binary"],
"certutil_binary": kwargs["certutil_binary"],
"ca_certificate_path": config.ssl_config["ca_cert_path"],
"e10s": kwargs["gecko_e10s"],
"enable_webrender": kwargs["enable_webrender"],
"enable_fission": kwargs["enable_fission"],
"stackfix_dir": kwargs["stackfix_dir"],
"binary_args": kwargs["binary_args"],
"timeout_multiplier": get_timeout_multiplier(test_type,
run_info_data,
**kwargs),
"leak_check": run_info_data["debug"] and (kwargs["leak_check"] is not False),
"asan": run_info_data.get("asan"),
"stylo_threads": kwargs["stylo_threads"],
"chaos_mode_flags": kwargs["chaos_mode_flags"],
"config": config,
"browser_channel": kwargs["browser_channel"],
"headless": kwargs["headless"],
"preload_browser": kwargs["preload_browser"] and not kwargs["pause_after_test"] and not kwargs["num_test_groups"] == 1,
"specialpowers_path": kwargs["specialpowers_path"]}
class WdSpecProfile(object):
def __init__(self, profile):
self.profile = profile
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.profile.cleanup()
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = test_type != "reftest"
executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
run_info_data,
**kwargs)
executor_kwargs["e10s"] = run_info_data["e10s"]
capabilities = {}
if test_type == "testharness":
capabilities["pageLoadStrategy"] = "eager"
if test_type in ("reftest", "print-reftest"):
executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
if test_type == "wdspec":
options = {"args": []}
if kwargs["binary"]:
options["binary"] = kwargs["binary"]
if kwargs["binary_args"]:
options["args"] = kwargs["binary_args"]
profile_creator = ProfileCreator(logger,
kwargs["prefs_root"],
test_environment.config,
test_type,
kwargs["extra_prefs"],
kwargs["gecko_e10s"],
kwargs["enable_fission"],
kwargs["browser_channel"],
kwargs["binary"],
kwargs["certutil_binary"],
test_environment.config.ssl_config["ca_cert_path"])
if kwargs["processes"] > 1:
# With multiple processes, we would need a profile directory per process, but we
# don't have an easy way to do that, so include the profile in the capabilties
# directly instead. This means recreating it per session, which is slow
options["profile"] = profile_creator.create_base64()
profile = None
else:
profile = profile_creator.create()
options["args"].extend(["--profile", profile.profile])
test_environment.env_extras_cms.append(WdSpecProfile(profile))
capabilities["moz:firefoxOptions"] = options
# This gets reused for firefox_android, but the environment setup
# isn't required in that case
if kwargs["binary"]:
environ = get_environ(logger,
kwargs["binary"],
kwargs["debug_info"],
kwargs["stylo_threads"],
kwargs["headless"],
kwargs["enable_webrender"],
kwargs["chaos_mode_flags"])
leak_report_file = setup_leak_report(kwargs["leak_check"], profile, environ)
# This doesn't work with wdspec tests
# In particular tests can create a session without passing in the capabilites
# and in those cases we get the default geckodriver profile which doesn't
# guarantee zero network access
del environ["MOZ_DISABLE_NONLOCAL_CONNECTIONS"]
executor_kwargs["environ"] = environ
else:
if kwargs["headless"] and "--headless" not in options["args"]:
options["args"].append("--headless")
leak_report_file = None
executor_kwargs["stackfix_dir"] = kwargs["stackfix_dir"],
executor_kwargs["leak_report_file"] = leak_report_file
executor_kwargs["asan"] = run_info_data.get("asan")
if kwargs["certutil_binary"] is None:
capabilities["acceptInsecureCerts"] = True
if capabilities:
executor_kwargs["capabilities"] = capabilities
executor_kwargs["debug"] = run_info_data["debug"]
executor_kwargs["ccov"] = run_info_data.get("ccov", False)
executor_kwargs["browser_version"] = run_info_data.get("browser_version")
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# The server host is set to 127.0.0.1 as Firefox is configured (through the
# network.dns.localDomains preference set below) to resolve the test
# domains to localhost without relying on the network stack.
#
# https://github.com/web-platform-tests/wpt/pull/9480
return {"server_host": "127.0.0.1",
"supports_debugger": True}
def run_info_extras(**kwargs):
def get_bool_pref_if_exists(pref):
for key, value in kwargs.get('extra_prefs', []):
if pref == key:
return value.lower() in ('true', '1')
return None
def get_bool_pref(pref):
pref_value = get_bool_pref_if_exists(pref)
return pref_value if pref_value is not None else False
rv = {"e10s": kwargs["gecko_e10s"],
"wasm": kwargs.get("wasm", True),
"verify": kwargs["verify"],
"headless": kwargs.get("headless", False) or "MOZ_HEADLESS" in os.environ,
"sw-e10s": True,
"fission": kwargs.get("enable_fission") or get_bool_pref("fission.autostart"),
"sessionHistoryInParent": (kwargs.get("enable_fission") or
get_bool_pref("fission.autostart") or
get_bool_pref("fission.sessionHistoryInParent")),
"swgl": get_bool_pref("gfx.webrender.software")}
# The value of `sw-e10s` defaults to whether the "parent_intercept"
# implementation is enabled for the current build. This value, however,
# can be overridden by explicitly setting the pref with the `--setpref` CLI
# flag, which is checked here.
sw_e10s_override = get_bool_pref_if_exists("dom.serviceWorkers.parent_intercept")
if sw_e10s_override is not None:
rv["sw-e10s"] = sw_e10s_override
rv.update(run_info_browser_version(**kwargs))
return rv
def run_info_browser_version(**kwargs):
try:
version_info = mozversion.get_version(kwargs["binary"])
except mozversion.errors.VersionError:
version_info = None
if version_info:
rv = {"browser_build_id": version_info.get("application_buildid", None),
"browser_changeset": version_info.get("application_changeset", None)}
if "browser_version" not in kwargs:
rv["browser_version"] = version_info.get("application_version")
return rv
return {}
def update_properties():
return (["os", "debug", "webrender", "fission", "e10s", "sw-e10s", "processor", "swgl"],
{"os": ["version"], "processor": ["bits"]})
def get_environ(logger, binary, debug_info, stylo_threads, headless, enable_webrender,
chaos_mode_flags=None):
env = test_environment(xrePath=os.path.abspath(os.path.dirname(binary)),
debugger=debug_info is not None,
useLSan=True,
log=logger)
env["STYLO_THREADS"] = str(stylo_threads)
if chaos_mode_flags is not None:
env["MOZ_CHAOSMODE"] = str(chaos_mode_flags)
if headless:
env["MOZ_HEADLESS"] = "1"
if enable_webrender:
env["MOZ_WEBRENDER"] = "1"
env["MOZ_ACCELERATED"] = "1"
else:
env["MOZ_WEBRENDER"] = "0"
return env
def setup_leak_report(leak_check, profile, env):
leak_report_file = None
if leak_check:
filename = "runtests_leaks_%s.log" % os.getpid()
if profile is not None:
leak_report_file = os.path.join(profile.profile, filename)
else:
leak_report_file = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(leak_report_file):
os.remove(leak_report_file)
env["XPCOM_MEM_BLOAT_LOG"] = leak_report_file
return leak_report_file
class FirefoxInstanceManager:
__metaclass__ = ABCMeta
def __init__(self, logger, binary, binary_args, profile_creator, debug_info,
chaos_mode_flags, headless, enable_webrender, stylo_threads,
leak_check, stackfix_dir, symbols_path, asan):
"""Object that manages starting and stopping instances of Firefox."""
self.logger = logger
self.binary = binary
self.binary_args = binary_args
self.base_profile = profile_creator.create()
self.debug_info = debug_info
self.chaos_mode_flags = chaos_mode_flags
self.headless = headless
self.enable_webrender = enable_webrender
self.stylo_threads = stylo_threads
self.leak_check = leak_check
self.stackfix_dir = stackfix_dir
self.symbols_path = symbols_path
self.asan = asan
self.previous = None
self.current = None
@abstractmethod
def teardown(self, force=False):
pass
@abstractmethod
def get(self):
"""Get a BrowserInstance for a running Firefox.
This can only be called once per instance, and between calls stop_current()
must be called."""
pass
def stop_current(self, force=False):
"""Shutdown the current instance of Firefox.
The BrowserInstance remains available through self.previous, since some
operations happen after shutdown."""
if not self.current:
return
self.current.stop(force)
self.previous = self.current
self.current = None
def start(self):
"""Start an instance of Firefox, returning a BrowserInstance handle"""
profile = self.base_profile.clone(self.base_profile.profile)
marionette_port = get_free_port()
profile.set_preferences({"marionette.port": marionette_port})
env = get_environ(self.logger, self.binary, self.debug_info, self.stylo_threads,
self.headless, self.enable_webrender, self.chaos_mode_flags)
args = self.binary_args[:] if self.binary_args else []
args += [cmd_arg("marionette"), "about:blank"]
debug_args, cmd = browser_command(self.binary,
args,
self.debug_info)
leak_report_file = setup_leak_report(self.leak_check, profile, env)
output_handler = FirefoxOutputHandler(self.logger,
cmd,
stackfix_dir=self.stackfix_dir,
symbols_path=self.symbols_path,
asan=self.asan,
leak_report_file=leak_report_file)
runner = FirefoxRunner(profile=profile,
binary=cmd[0],
cmdargs=cmd[1:],
env=env,
process_class=ProcessHandler,
process_args={"processOutputLine": [output_handler]})
instance = BrowserInstance(self.logger, runner, marionette_port,
output_handler, leak_report_file)
self.logger.debug("Starting Firefox")
runner.start(debug_args=debug_args,
interactive=self.debug_info and self.debug_info.interactive)
output_handler.after_process_start(runner.process_handler.pid)
self.logger.debug("Firefox Started")
return instance
class SingleInstanceManager(FirefoxInstanceManager):
"""FirefoxInstanceManager that manages a single Firefox instance"""
def get(self):
assert not self.current, ("Tried to call get() on InstanceManager that has "
"an existing instance")
if self.previous:
self.previous.cleanup()
self.previous = None
self.current = self.start()
return self.current
def teardown(self, force=False):
for instance in [self.previous, self.current]:
if instance:
instance.stop(force)
instance.cleanup()
self.base_profile.cleanup()
class PreloadInstanceManager(FirefoxInstanceManager):
def __init__(self, *args, **kwargs):
"""FirefoxInstanceManager that keeps once Firefox instance preloaded
to allow rapid resumption after an instance shuts down."""
super(PreloadInstanceManager, self).__init__(*args, **kwargs)
self.pending = None
def get(self):
assert not self.current, ("Tried to call get() on InstanceManager that has "
"an existing instance")
if self.previous:
self.previous.cleanup()
self.previous = None
if not self.pending:
self.pending = self.start()
self.current = self.pending
self.pending = self.start()
return self.current
def teardown(self, force=False):
for instance, unused in [(self.previous, False),
(self.current, False),
(self.pending, True)]:
if instance:
instance.stop(force, unused)
instance.cleanup()
self.base_profile.cleanup()
class BrowserInstance:
shutdown_timeout = 70
def __init__(self, logger, runner, marionette_port, output_handler, leak_report_file):
"""Handle to a running Firefox instance"""
self.logger = logger
self.runner = runner
self.marionette_port = marionette_port
self.output_handler = output_handler
self.leak_report_file = leak_report_file
def stop(self, force=False, unused=False):
"""Stop Firefox
:param force: Signal the firefox process without waiting for a clean shutdown
:param unused: This instance was not used for running tests and so
doesn't have an active marionette session and doesn't require
output postprocessing.
"""
is_running = self.runner is not None and self.runner.is_running()
if is_running:
self.logger.debug("Stopping Firefox %s" % self.pid())
shutdown_methods = [(True, lambda: self.runner.wait(self.shutdown_timeout)),
(False, lambda: self.runner.stop(signal.SIGTERM,
self.shutdown_timeout))]
if hasattr(signal, "SIGKILL"):
shutdown_methods.append((False, lambda: self.runner.stop(signal.SIGKILL,
self.shutdown_timeout)))
if unused or force:
# Don't wait for the instance to close itself
shutdown_methods = shutdown_methods[1:]
try:
# For Firefox we assume that stopping the runner prompts the
# browser to shut down. This allows the leak log to be written
for i, (clean, stop_f) in enumerate(shutdown_methods):
self.logger.debug("Shutting down attempt %i/%i" % (i + 1, len(shutdown_methods)))
retcode = stop_f()
if retcode is not None:
self.logger.info("Browser exited with return code %s" % retcode)
break
except OSError:
# This can happen on Windows if the process is already dead
pass
elif self.runner:
# The browser was already stopped, which we assume was a crash
# TODO: Should we check the exit code here?
clean = False
if not unused:
self.output_handler.after_process_stop(clean_shutdown=clean)
def pid(self):
if self.runner.process_handler is None:
return None
try:
return self.runner.process_handler.pid
except AttributeError:
return None
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.runner.cleanup()
self.runner = None
class FirefoxOutputHandler(OutputHandler):
def __init__(self, logger, command, symbols_path=None, stackfix_dir=None, asan=False,
leak_report_file=None):
"""Filter for handling Firefox process output.
This receives Firefox process output in the __call__ function, does
any additional processing that's required, and decides whether to log
the output. Because the Firefox process can be started before we know
which filters are going to be required, we buffer all output until
setup() is called. This is responsible for doing the final configuration
of the output handlers.
"""
super().__init__(logger, command)
self.symbols_path = symbols_path
if stackfix_dir:
# We hide errors because they cause disconcerting `CRITICAL`
# warnings in web platform test output.
self.stack_fixer = get_stack_fixer_function(stackfix_dir,
self.symbols_path,
hideErrors=True)
else:
self.stack_fixer = None
self.asan = asan
self.leak_report_file = leak_report_file
# These are filled in after configure_handlers() is called
self.lsan_handler = None
self.mozleak_allowed = None
self.mozleak_thresholds = None
self.group_metadata = {}
def start(self, group_metadata=None, lsan_disabled=False, lsan_allowed=None,
lsan_max_stack_depth=None, mozleak_allowed=None, mozleak_thresholds=None,
**kwargs):
"""Configure the output handler"""
if group_metadata is None:
group_metadata = {}
self.group_metadata = group_metadata
self.mozleak_allowed = mozleak_allowed
self.mozleak_thresholds = mozleak_thresholds
if self.asan:
self.lsan_handler = mozleak.LSANLeaks(self.logger,
scope=group_metadata.get("scope", "/"),
allowed=lsan_allowed,
maxNumRecordedFrames=lsan_max_stack_depth,
allowAll=lsan_disabled)
else:
self.lsan_handler = None
super().start()
def after_process_stop(self, clean_shutdown=True):
super().after_process_stop(clean_shutdown)
if self.lsan_handler:
self.lsan_handler.process()
if self.leak_report_file is not None:
if not clean_shutdown:
# If we didn't get a clean shutdown there probably isn't a leak report file
self.logger.warning("Firefox didn't exit cleanly, not processing leak logs")
else:
# We have to ignore missing leaks in the tab because it can happen that the
# content process crashed and in that case we don't want the test to fail.
# Ideally we would record which content process crashed and just skip those.
self.logger.info("PROCESS LEAKS %s" % self.leak_report_file)
mozleak.process_leak_log(
self.leak_report_file,
leak_thresholds=self.mozleak_thresholds,
ignore_missing_leaks=["tab", "gmplugin"],
log=self.logger,
stack_fixer=self.stack_fixer,
scope=self.group_metadata.get("scope"),
allowed=self.mozleak_allowed)
if os.path.exists(self.leak_report_file):
os.unlink(self.leak_report_file)
def __call__(self, line):
"""Write a line of output from the firefox process to the log"""
if b"GLib-GObject-CRITICAL" in line:
return
if line:
if self.state < OutputHandlerState.AFTER_HANDLER_START:
self.line_buffer.append(line)
return
data = line.decode("utf8", "replace")
if self.stack_fixer:
data = self.stack_fixer(data)
if self.lsan_handler:
data = self.lsan_handler.log(data)
if data is not None:
self.logger.process_output(self.pid,
data,
command=" ".join(self.command))
class ProfileCreator:
def __init__(self, logger, prefs_root, config, test_type, extra_prefs, e10s,
enable_fission, browser_channel, binary, certutil_binary, ca_certificate_path):
self.logger = logger
self.prefs_root = prefs_root
self.config = config
self.test_type = test_type
self.extra_prefs = extra_prefs
self.e10s = e10s
self.enable_fission = enable_fission
self.browser_channel = browser_channel
self.ca_certificate_path = ca_certificate_path
self.binary = binary
self.certutil_binary = certutil_binary
self.ca_certificate_path = ca_certificate_path
def create(self, **kwargs):
"""Create a Firefox profile and return the mozprofile Profile object pointing at that
profile
:param kwargs: Additional arguments to pass into the profile constructor
"""
preferences = self._load_prefs()
profile = FirefoxProfile(preferences=preferences,
restore=False,
**kwargs)
self._set_required_prefs(profile)
if self.ca_certificate_path is not None:
self._setup_ssl(profile)
return profile
def create_base64(self, **kwargs):
profile = self.create(**kwargs)
try:
with io.BytesIO() as buf:
with zipfile.ZipFile(buf, "w", compression=zipfile.ZIP_DEFLATED) as zipf:
for dirpath, _, filenames in os.walk(profile.profile):
for filename in filenames:
src_path = os.path.join(dirpath, filename)
dest_path = os.path.relpath(src_path, profile.profile)
with open(src_path, "rb") as f:
zipf.writestr(dest_path, f.read())
return base64.b64encode(buf.getvalue()).decode("ascii").strip()
finally:
profile.cleanup()
def _load_prefs(self):
prefs = Preferences()
pref_paths = []
profiles = os.path.join(self.prefs_root, 'profiles.json')
if os.path.isfile(profiles):
with open(profiles, 'r') as fh:
for name in json.load(fh)['web-platform-tests']:
if self.browser_channel in (None, 'nightly'):
pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
elif name != 'unittest-features':
pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
else:
# Old preference files used before the creation of profiles.json (remove when no longer supported)
legacy_pref_paths = (
os.path.join(self.prefs_root, 'prefs_general.js'), # Used in Firefox 60 and below
os.path.join(self.prefs_root, 'common', 'user.js'), # Used in Firefox 61
)
for path in legacy_pref_paths:
if os.path.isfile(path):
pref_paths.append(path)
for path in pref_paths:
if os.path.exists(path):
prefs.add(Preferences.read_prefs(path))
else:
self.logger.warning("Failed to find base prefs file in %s" % path)
# Add any custom preferences
prefs.add(self.extra_prefs, cast=True)
return prefs()
def _set_required_prefs(self, profile):
"""Set preferences required for wptrunner to function.
Note that this doesn't set the marionette port, since we don't always
know that at profile creation time. So the caller is responisble for
setting that once it's available."""
profile.set_preferences({
"network.dns.localDomains": ",".join(self.config.domains_set),
"dom.file.createInChild": True,
# TODO: Remove preferences once Firefox 64 is stable (Bug 905404)
"network.proxy.type": 0,
"places.history.enabled": False,
"network.preload": True,
})
if self.e10s:
profile.set_preferences({"browser.tabs.remote.autostart": True})
if self.enable_fission:
profile.set_preferences({"fission.autostart": True})
if self.test_type in ("reftest", "print-reftest"):
profile.set_preferences({"layout.interruptible-reflow.enabled": False})
if self.test_type == "print-reftest":
profile.set_preferences({"print.always_print_silent": True})
# Bug 1262954: winxp + e10s, disable hwaccel
if (self.e10s and platform.system() in ("Windows", "Microsoft") and
"5.1" in platform.version()):
self.profile.set_preferences({"layers.acceleration.disabled": True})
def _setup_ssl(self, profile):
"""Create a certificate database to use in the test profile. This is configured
to trust the CA Certificate that has signed the web-platform.test server
certificate."""
if self.certutil_binary is None:
self.logger.info("--certutil-binary not supplied; Firefox will not check certificates")
return
self.logger.info("Setting up ssl")
# Make sure the certutil libraries from the source tree are loaded when using a
# local copy of certutil
# TODO: Maybe only set this if certutil won't launch?
env = os.environ.copy()
certutil_dir = os.path.dirname(self.binary or self.certutil_binary)
if mozinfo.isMac:
env_var = "DYLD_LIBRARY_PATH"
elif mozinfo.isUnix:
env_var = "LD_LIBRARY_PATH"
else:
env_var = "PATH"
env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
if env_var in env else certutil_dir)
def certutil(*args):
cmd = [self.certutil_binary] + list(args)
self.logger.process_output("certutil",
subprocess.check_output(cmd,
env=env,
stderr=subprocess.STDOUT),
" ".join(cmd))
pw_path = os.path.join(profile.profile, ".crtdbpw")
with open(pw_path, "w") as f:
# Use empty password for certificate db
f.write("\n")
cert_db_path = profile.profile
# Create a new certificate db
certutil("-N", "-d", cert_db_path, "-f", pw_path)
# Add the CA certificate to the database and mark as trusted to issue server certs
certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
"-n", "web-platform-tests", "-i", self.ca_certificate_path)
# List all certs in the database
certutil("-L", "-d", cert_db_path)
class FirefoxBrowser(Browser):
init_timeout = 70
def __init__(self, logger, binary, prefs_root, test_type, extra_prefs=None, debug_info=None,
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
ca_certificate_path=None, e10s=False, enable_webrender=False, enable_fission=False,
stackfix_dir=None, binary_args=None, timeout_multiplier=None, leak_check=False,
asan=False, stylo_threads=1, chaos_mode_flags=None, config=None,
browser_channel="nightly", headless=None, preload_browser=False,
specialpowers_path=None, **kwargs):
Browser.__init__(self, logger)
self.logger = logger
if timeout_multiplier:
self.init_timeout = self.init_timeout * timeout_multiplier
self.instance = None
self._settings = None
self.stackfix_dir = stackfix_dir
self.symbols_path = symbols_path
self.stackwalk_binary = stackwalk_binary
self.asan = asan
self.leak_check = leak_check
self.specialpowers_path = specialpowers_path
profile_creator = ProfileCreator(logger,
prefs_root,
config,
test_type,
extra_prefs,
e10s,
enable_fission,
browser_channel,
binary,
certutil_binary,
ca_certificate_path)
if preload_browser:
instance_manager_cls = PreloadInstanceManager
else:
instance_manager_cls = SingleInstanceManager
self.instance_manager = instance_manager_cls(logger,
binary,
binary_args,
profile_creator,
debug_info,
chaos_mode_flags,
headless,
enable_webrender,
stylo_threads,
leak_check,
stackfix_dir,
symbols_path,
asan)
def settings(self, test):
self._settings = {"check_leaks": self.leak_check and not test.leaks,
"lsan_disabled": test.lsan_disabled,
"lsan_allowed": test.lsan_allowed,
"lsan_max_stack_depth": test.lsan_max_stack_depth,
"mozleak_allowed": self.leak_check and test.mozleak_allowed,
"mozleak_thresholds": self.leak_check and test.mozleak_threshold,
"special_powers": self.specialpowers_path and test.url_base == "/_mozilla/"}
return self._settings
def start(self, group_metadata=None, **kwargs):
self.instance = self.instance_manager.get()
self.instance.output_handler.start(group_metadata,
**kwargs)
def stop(self, force=False):
self.instance_manager.stop_current(force)
self.logger.debug("stopped")
def pid(self):
return self.instance.pid()
def is_alive(self):
return self.instance and self.instance.is_alive()
def cleanup(self, force=False):
self.instance_manager.teardown(force)
def executor_browser(self):
assert self.instance is not None
extensions = []
if self._settings.get("special_powers", False):
extensions.append(self.specialpowers_path)
return ExecutorBrowser, {"marionette_port": self.instance.marionette_port,
"extensions": extensions}
def check_crash(self, process, test):
dump_dir = os.path.join(self.instance.runner.profile.profile, "minidumps")
try:
return bool(mozcrash.log_crashes(self.logger,
dump_dir,
symbols_path=self.symbols_path,
stackwalk_binary=self.stackwalk_binary,
process=process,
test=test))
except IOError:
self.logger.warning("Looking for crash dump files failed")
return False
class FirefoxWdSpecBrowser(NullBrowser):
def __init__(self, logger, leak_check=False, **kwargs):
super().__init__(logger, **kwargs)
self.leak_check = leak_check
def settings(self, test):
return {"check_leaks": self.leak_check and not test.leaks,
"lsan_disabled": test.lsan_disabled,
"lsan_allowed": test.lsan_allowed,
"lsan_max_stack_depth": test.lsan_max_stack_depth,
"mozleak_allowed": self.leak_check and test.mozleak_allowed,
"mozleak_thresholds": self.leak_check and test.mozleak_threshold}
class GeckoDriverServer(WebDriverServer):
output_handler_cls = FirefoxOutputHandler
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, env=None, args=None):
if env is None:
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary,
host=host,
port=port,
env=env,
args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file './acq4/modules/DataManager/FileInfoViewTemplate.ui'
#
# Created: Tue Dec 24 01:49:10 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.scrollArea = QtGui.QScrollArea(Form)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 398, 298))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.formLayout_2 = QtGui.QFormLayout(self.scrollAreaWidgetContents)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.formLayout_2.setMargin(0)
self.formLayout_2.setHorizontalSpacing(10)
self.formLayout_2.setVerticalSpacing(0)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None)) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import scipy.misc
import scipy.signal
import math
import draw
import ref
# =============================================================================
# General image processing functions
# =============================================================================
def get_transform(center, scale, res, rot=0):
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int)
def crop(img, center, scale, res, rot=0):
# Upper left point
ul = np.array(transform([0, 0], center, scale, res, invert=1))
# Bottom right point
br = np.array(transform(res, center, scale, res, invert=1))
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
return scipy.misc.imresize(new_img, res)
def two_pt_crop(img, scale, pt1, pt2, pad, res, chg=None):
center = (pt1+pt2) / 2
scale = max(20*scale, np.linalg.norm(pt1-pt2)) * .007
scale *= pad
angle = math.atan2(pt2[1]-pt1[1],pt2[0]-pt1[0]) * 180 / math.pi - 90
flip = False
# Handle data augmentation
if chg is not None:
# Flipping
if 'flip' in chg:
if np.random.rand() < .5:
flip = True
# Scaling
if 'scale' in chg:
scale *= min(1+chg['scale'], max(1-chg['scale'], (np.random.randn() * chg['scale']) + 1))
# Rotation
if 'rotate' in chg:
angle += np.random.randint(-chg['rotate'], chg['rotate'] + 1)
# Translation
if 'translate' in chg:
for i in xrange(2):
offset = np.random.randint(-chg['translate'], chg['translate'] + 1) * scale
center[i] += offset
# Create input image
cropped = crop(img, center, scale, res, rot=angle)
inp = np.zeros((3, res[0], res[1]))
for i in xrange(3):
inp[i, :, :] = cropped[:, :, i]
# Create heatmap
hm = np.zeros((2,res[0],res[1]))
draw.gaussian(hm[0],transform(pt1, center, scale, res, rot=angle), 2)
draw.gaussian(hm[1],transform(pt2, center, scale, res, rot=angle), 2)
if flip:
inp = np.array([np.fliplr(inp[i]) for i in xrange(len(inp))])
hm = np.array([np.fliplr(hm[i]) for i in xrange(len(hm))])
return inp, hm
def nms(img):
# Do non-maximum suppression on a 2D array
win_size = 3
domain = np.ones((win_size, win_size))
maxes = scipy.signal.order_filter(img, domain, win_size ** 2 - 1)
diff = maxes - img
result = img.copy()
result[diff > 0] = 0
return result
# =============================================================================
# Helpful display functions
# =============================================================================
def gauss(x, a, b, c, d=0):
return a * np.exp(-(x - b)**2 / (2 * c**2)) + d
def color_heatmap(x):
color = np.zeros((x.shape[0],x.shape[1],3))
color[:,:,0] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
color[:,:,1] = gauss(x, 1, .5, .3)
color[:,:,2] = gauss(x, 1, .2, .3)
color[color > 1] = 1
color = (color * 255).astype(np.uint8)
return color
def sample_with_heatmap(dataset, inp, out, num_rows=2, parts_to_show=None):
img = np.zeros((inp.shape[1], inp.shape[2], inp.shape[0]))
for i in xrange(3):
img[:, :, i] = inp[i, :, :]
if parts_to_show is None:
parts_to_show = np.arange(out.shape[0])
# Generate a single image to display input/output pair
num_cols = np.ceil(float(len(parts_to_show)) / num_rows)
size = img.shape[0] / num_rows
full_img = np.zeros((img.shape[0], size * (num_cols + num_rows), 3), np.uint8)
full_img[:img.shape[0], :img.shape[1]] = img
inp_small = scipy.misc.imresize(img, [size, size])
# Set up heatmap display for each part
for i, part in enumerate(parts_to_show):
if type(part) is str:
part_idx = ref.parts[dataset].index(part)
else:
part_idx = part
out_resized = scipy.misc.imresize(out[part_idx], [size, size])
out_resized = out_resized.astype(float)/255
out_img = inp_small.copy() * .3
color_hm = color_heatmap(out_resized)
out_img += color_hm * .7
col_offset = (i % num_cols + num_rows) * size
row_offset = (i // num_cols) * size
full_img[row_offset:row_offset + size, col_offset:col_offset + size] = out_img
return full_img
def sample_with_skeleton(annot, idx, preds, res=None):
# Load image and basic info
ds = annot.attrs['name']
img = ref.loadimg(annot, idx)
c = annot['center'][idx]
s = annot['scale'][idx]
if res is None:
res = [256, 256]
# Skeleton colors
colors = [(255, 0, 0), # Upper arm (left)
(255, 100, 100), # Lower arm (left)
(0, 0, 255), # Upper arm (right)
(100, 100, 255), # Lower arm (right)
(100, 255, 100), # Head/neck/face
(255, 75, 0), # Upper leg (left)
(255, 175, 100), # Lower leg (left)
(0, 75, 255), # Upper leg (right)
(100, 175, 255) # Lower leg (right)
]
# Draw arms
draw.limb(img, preds[ref.parts[ds].index('lsho')], preds[ref.parts[ds].index('lelb')], colors[0], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('lwri')], preds[ref.parts[ds].index('lelb')], colors[1], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rsho')], preds[ref.parts[ds].index('relb')], colors[2], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rwri')], preds[ref.parts[ds].index('relb')], colors[3], 5 * s)
if ds == 'mpii':
# MPII
# Draw head
draw.circle(img, preds[ref.parts[ds].index('head')], colors[4], 5 * s)
draw.circle(img, preds[ref.parts[ds].index('neck')], colors[4], 5 * s)
# Draw legs
draw.limb(img, preds[ref.parts[ds].index('lhip')], preds[ref.parts[ds].index('lkne')], colors[5], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('lank')], preds[ref.parts[ds].index('lkne')], colors[6], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rhip')], preds[ref.parts[ds].index('rkne')], colors[7], 5 * s)
draw.limb(img, preds[ref.parts[ds].index('rank')], preds[ref.parts[ds].index('rkne')], colors[8], 5 * s)
elif ds == 'flic':
# FLIC
# Draw face
draw.circle(img, preds[ref.parts[ds].index('leye')], colors[4], 3 * s)
draw.circle(img, preds[ref.parts[ds].index('reye')], colors[4], 3 * s)
draw.circle(img, preds[ref.parts[ds].index('nose')], colors[4], 3 * s)
# Draw hips
draw.circle(img, preds[ref.parts[ds].index('lhip')], colors[5], 5 * s)
draw.circle(img, preds[ref.parts[ds].index('rhip')], colors[7], 5 * s)
return crop(img, c, s, res) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "0.23.2"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.pandas',
'pyspark.pandas.data_type_ops',
'pyspark.pandas.indexes',
'pyspark.pandas.missing',
'pyspark.pandas.plot',
'pyspark.pandas.spark',
'pyspark.pandas.typedef',
'pyspark.pandas.usage_logging',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.2'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
],
'pandas_on_spark': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
'numpy>=1.14',
],
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH) | unknown | codeparrot/codeparrot-clean | ||
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2015, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#include "common.hpp"
#include "saturate_cast.hpp"
#include <vector>
namespace CAROTENE_NS {
bool isLaplacian3x3Supported(const Size2D &size, BORDER_MODE border)
{
return isSupportedConfiguration() && size.width >= 8 &&
(border == BORDER_MODE_CONSTANT ||
border == BORDER_MODE_REPLICATE);
}
void Laplacian3x3(const Size2D &size,
const u8 * srcBase, ptrdiff_t srcStride,
u8 * dstBase, ptrdiff_t dstStride,
BORDER_MODE border, u8 borderValue)
{
internal::assertSupportedConfiguration(isLaplacian3x3Supported(size, border));
#ifdef CAROTENE_NEON
const uint16x8_t v_border_x3 = vdupq_n_u16(borderValue * 3);
const uint16x8_t v_zero = vdupq_n_u16(0);
const uint8x8_t v_border = vdup_n_u8(borderValue);
uint8x8_t vsub;
uint16x8_t tprev = v_zero, tcurr = v_zero, tnext = v_zero;
uint16x8_t t0 = v_zero, t1 = v_zero, t2 = v_zero;
ptrdiff_t width = (ptrdiff_t)size.width, height = (ptrdiff_t)size.height;
for (ptrdiff_t y = 0; y < height; ++y)
{
const u8 * srow0 = y == 0 && border == BORDER_MODE_CONSTANT ? NULL : internal::getRowPtr(srcBase, srcStride, std::max<ptrdiff_t>(y - 1, 0));
const u8 * srow1 = internal::getRowPtr(srcBase, srcStride, y);
const u8 * srow2 = y + 1 == height && border == BORDER_MODE_CONSTANT ? NULL : internal::getRowPtr(srcBase, srcStride, std::min(y + 1, height - 1));
u8 * drow = internal::getRowPtr(dstBase, dstStride, y);
s16 prevx = 0, currx = 0, nextx = 0;
ptrdiff_t x = 0;
const ptrdiff_t bwidth = y + 2 < height ? width : (width - 8);
// perform vertical convolution
for ( ; x <= bwidth; x += 8)
{
internal::prefetch(srow0 + x);
internal::prefetch(srow1 + x);
internal::prefetch(srow2 + x);
uint8x8_t x0 = !srow0 ? v_border : vld1_u8(srow0 + x);
uint8x8_t x1 = vld1_u8(srow1 + x);
uint8x8_t x2 = !srow2 ? v_border : vld1_u8(srow2 + x);
// calculate values for plain CPU part below if needed
if (x + 8 >= bwidth)
{
ptrdiff_t x3 = x == width ? width - 1 : x;
ptrdiff_t x4 = border == BORDER_MODE_CONSTANT ? x3 - 1 : std::max<ptrdiff_t>(x3 - 1, 0);
if (border == BORDER_MODE_CONSTANT && x4 < 0)
prevx = borderValue;
else
prevx = (srow2 ? srow2[x4] : borderValue) + srow1[x4] + (srow0 ? srow0[x4] : borderValue);
currx = (srow2 ? srow2[x3] : borderValue) + srow1[x3] + (srow0 ? srow0[x3] : borderValue);
}
// make shift
if (x)
{
tprev = tcurr;
tcurr = tnext;
}
// and calculate next value
tnext = vaddw_u8(vaddl_u8(x0, x1), x2);
// make extrapolation for the first elements
if (!x)
{
// make border
if (border == BORDER_MODE_CONSTANT)
tcurr = v_border_x3;
else if (border == BORDER_MODE_REPLICATE)
tcurr = vdupq_n_u16(vgetq_lane_u16(tnext, 0));
vsub = x1;
continue;
}
// combine 3 "shifted" vectors
t0 = vextq_u16(tprev, tcurr, 7);
t1 = tcurr;
t2 = vextq_u16(tcurr, tnext, 1);
// and add them
t0 = vqaddq_u16(t0, vqaddq_u16(t1, t2));
int16x8_t tt0 = vsubq_s16(vreinterpretq_s16_u16(t0),
vreinterpretq_s16_u16(vaddw_u8(vshll_n_u8(vsub, 3), vsub)));
uint8x8_t it0 = vqmovun_s16(tt0);
vst1_u8(drow + x - 8, it0);
vsub = x1;
}
x -= 8;
if (x == width)
--x;
for ( ; x < width; ++x)
{
// make extrapolation for the last elements
if (x + 1 >= width)
{
if (border == BORDER_MODE_CONSTANT)
nextx = borderValue * 3;
else if (border == BORDER_MODE_REPLICATE)
nextx = srow2[x] + srow1[x] + srow0[x];
}
else
{
nextx = (srow2 ? srow2[x + 1] : borderValue) +
srow1[x + 1] +
(srow0 ? srow0[x + 1] : borderValue);
}
s32 val = (prevx + currx + nextx) - 9 * srow1[x];
drow[x] = internal::saturate_cast<u8>((s32)val);
// make shift
prevx = currx;
currx = nextx;
}
}
#else
(void)size;
(void)srcBase;
(void)srcStride;
(void)dstBase;
(void)dstStride;
(void)border;
(void)borderValue;
#endif
}
bool isLaplacianOpenCVSupported(const Size2D &size, BORDER_MODE border)
{
return isSupportedConfiguration() &&
size.width >= 8 && size.height >= 1 &&
(border == BORDER_MODE_CONSTANT ||
border == BORDER_MODE_REFLECT ||
border == BORDER_MODE_REFLECT101 ||
border == BORDER_MODE_REPLICATE);
}
void Laplacian1OpenCV(const Size2D &size,
const u8 * srcBase, ptrdiff_t srcStride,
s16 * dstBase, ptrdiff_t dstStride,
BORDER_MODE border, u8 borderValue)
{
internal::assertSupportedConfiguration(isLaplacianOpenCVSupported(size, border));
#ifdef CAROTENE_NEON
ptrdiff_t rows = size.height, cols = size.width;
std::vector<u8> _tmp;
u8 *tmp = 0;
if (border == BORDER_MODE_CONSTANT)
{
_tmp.assign(cols + 4,borderValue);
tmp = &_tmp[2];
}
for( ptrdiff_t y = 0; y < rows; y++ )
{
const u8* v0 = 0;
const u8* v1 = internal::getRowPtr(srcBase, srcStride, y);
const u8* v2 = 0;
// make border
if (border == BORDER_MODE_REFLECT101) {
v0 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : y+1);
v2 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 1 ? rows-2 : 0);
} else if (border == BORDER_MODE_CONSTANT) {
v0 = y > 0 ? internal::getRowPtr(srcBase, srcStride, y-1) : tmp;
v2 = y < rows-1 ? internal::getRowPtr(srcBase, srcStride, y+1) : tmp;
} else {
v0 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : 0);
v2 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 0 ? rows-1 : 0);
}
s16* drow = internal::getRowPtr(dstBase, dstStride, y);
int16x8_t tcurr = vmovq_n_s16(0x0);
int16x8_t tnext = vmovq_n_s16(0x0);
int16x8_t t0, t2;
uint8x8_t xx0 = vmov_n_u8(0x0);
uint8x8_t xx1 = vmov_n_u8(0x0);
uint8x8_t xx2 = vmov_n_u8(0x0);
ptrdiff_t x = 0;
const ptrdiff_t bcols = y + 2 < rows ? cols : (cols - 8);
for( ; x <= bcols; x += 8 )
{
internal::prefetch(v0 + x);
internal::prefetch(v1 + x);
internal::prefetch(v2 + x);
uint8x8_t x0 = vld1_u8(v0 + x);
uint8x8_t x1 = vld1_u8(v1 + x);
uint8x8_t x2 = vld1_u8(v2 + x);
if(x) {
xx0 = xx1;
xx1 = xx2;
} else {
xx1 = x1;
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT)
{
xx1 = vset_lane_u8(vget_lane_u8(x1, 0),x1, 7);
}
else if (border == BORDER_MODE_CONSTANT)
{
xx1 = vset_lane_u8(borderValue, x1, 7);
}
else if (border == BORDER_MODE_REFLECT101)
{
xx1 = vset_lane_u8(vget_lane_u8(x1, 1),x1, 7);
}
}
xx2 = x1;
if(x) {
tcurr = tnext;
}
tnext = vsubq_s16(vreinterpretq_s16_u16(vaddl_u8(x0, x2)),
vreinterpretq_s16_u16(vshll_n_u8(x1, 2)));
if(!x) {
tcurr = tnext;
continue;
}
t0 = vreinterpretq_s16_u16(vmovl_u8(vext_u8(xx0, xx1, 7)));
t2 = vreinterpretq_s16_u16(vmovl_u8(vext_u8(xx1, xx2, 1)));
t0 = vaddq_s16(vqaddq_s16(t0, t2), tcurr);
vst1q_s16(drow + x - 8, t0);
}
x -= 8;
if(x == cols){
x--;
}
for( ; x < cols; x++ )
{
s16 nextx;
s16 prevx;
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT)
{
prevx = x == 0 ? v1[0] : v1[x-1];
nextx = x == cols-1 ? v1[x] : v1[x+1];
}
else if (border == BORDER_MODE_REFLECT101)
{
prevx = x == 0 ? v1[1] : v1[x-1];
nextx = x == cols-1 ? v1[x-1] : v1[x+1];
}
else //if (border == BORDER_MODE_CONSTANT)
{
prevx = x == 0 ? borderValue : v1[x-1];
nextx = x == cols-1 ? borderValue : v1[x+1];
}
*(drow+x) = prevx + nextx - 4*v1[x] + v0[x] + v2[x];
}
}
#else
(void)size;
(void)srcBase;
(void)srcStride;
(void)dstBase;
(void)dstStride;
(void)border;
(void)borderValue;
#endif
}
void Laplacian3OpenCV(const Size2D &size,
const u8 * srcBase, ptrdiff_t srcStride,
s16 * dstBase, ptrdiff_t dstStride,
BORDER_MODE border, u8 borderValue)
{
internal::assertSupportedConfiguration(isLaplacianOpenCVSupported(size, border));
#ifdef CAROTENE_NEON
ptrdiff_t rows = size.height, cols = size.width;
std::vector<u8> _tmp;
u8 *tmp = 0;
if (border == BORDER_MODE_CONSTANT)
{
_tmp.assign(cols + 4,borderValue);
tmp = &_tmp[2];
}
for( ptrdiff_t y = 0; y < rows; y++ )
{
const u8* v0 = 0;
const u8* v1 = internal::getRowPtr(srcBase, srcStride, y);
const u8* v2 = 0;
// make border
if (border == BORDER_MODE_REFLECT101) {
v0 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : y+1);
v2 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 1 ? rows-2 : 0);
} else if (border == BORDER_MODE_CONSTANT) {
v0 = y > 0 ? internal::getRowPtr(srcBase, srcStride, y-1) : tmp;
v2 = y < rows-1 ? internal::getRowPtr(srcBase, srcStride, y+1) : tmp;
} else {
v0 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : 0);
v2 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 0 ? rows-1 : 0);
}
s16* drow = internal::getRowPtr(dstBase, dstStride, y);
int16x8_t tprev = vmovq_n_s16(0x0);
int16x8_t tcurr = vmovq_n_s16(0x0);
int16x8_t tnext = vmovq_n_s16(0x0);
int16x8_t tc = vmovq_n_s16(0x0);
int16x8_t t0, t2, tcnext;
ptrdiff_t x = 0;
const ptrdiff_t bcols = y + 2 < rows ? cols : (cols - 8);
for( ; x <= bcols; x += 8 )
{
internal::prefetch(v0 + x);
internal::prefetch(v1 + x);
internal::prefetch(v2 + x);
uint8x8_t x0 = vld1_u8(v0 + x);
uint8x8_t x1 = vld1_u8(v1 + x);
uint8x8_t x2 = vld1_u8(v2 + x);
tcnext = vreinterpretq_s16_u16(vshll_n_u8(x1, 2));
if(x) {
tprev = tcurr;
tcurr = tnext;
}
tnext = vreinterpretq_s16_u16(vaddl_u8(x0, x2));
if(!x) {
tcurr = tnext;
tc = tcnext;
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT)
{
tcurr = vsetq_lane_s16(vgetq_lane_s16(tcurr, 0),tcurr, 7);
}
else if (border == BORDER_MODE_CONSTANT)
{
tcurr = vsetq_lane_s16(borderValue, tcurr, 7);
}
else if (border == BORDER_MODE_REFLECT101)
{
tcurr = vsetq_lane_s16(vgetq_lane_s16(tcurr, 1),tcurr, 7);
}
continue;
}
t0 = vextq_s16(tprev, tcurr, 7);
t2 = vextq_s16(tcurr, tnext, 1);
t0 = vsubq_s16(vqaddq_s16(t0, t2), tc);
tc = tcnext;
t0 = vshlq_n_s16(t0, 1);
vst1q_s16(drow + x - 8, t0);
}
x -= 8;
if(x == cols){
x--;
}
for( ; x < cols; x++ )
{
s16 nextx, nextx2;
s16 prevx, prevx2;
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT)
{
prevx = x == 0 ? v0[0] : v0[x-1];
prevx2 = x == 0 ? v2[0] : v2[x-1];
nextx = x == cols-1 ? v0[x] : v0[x+1];
nextx2 = x == cols-1 ? v2[x] : v2[x+1];
}
else if (border == BORDER_MODE_REFLECT101)
{
prevx = x == 0 ? v0[1] : v0[x-1];
prevx2 = x == 0 ? v2[1] : v2[x-1];
nextx = x == cols-1 ? v0[x-1] : v0[x+1];
nextx2 = x == cols-1 ? v2[x-1] : v2[x+1];
}
else //if (border == BORDER_MODE_CONSTANT)
{
prevx = x == 0 ? borderValue : v0[x-1];
prevx2 = x == 0 ? borderValue : v2[x-1];
nextx = x == cols-1 ? borderValue : v0[x+1];
nextx2 = x == cols-1 ? borderValue : v2[x+1];
}
s16 res = prevx + nextx - 4*v1[x] + prevx2 + nextx2;
*(drow+x) = 2*res;
}
}
#else
(void)size;
(void)srcBase;
(void)srcStride;
(void)dstBase;
(void)dstStride;
(void)border;
(void)borderValue;
#endif
}
void Laplacian5OpenCV(const Size2D &size,
const u8 * srcBase, ptrdiff_t srcStride,
s16 * dstBase, ptrdiff_t dstStride,
BORDER_MODE border, u8 borderValue)
{
internal::assertSupportedConfiguration(isLaplacianOpenCVSupported(size, border));
#ifdef CAROTENE_NEON
ptrdiff_t rows = size.height, cols = size.width;
std::vector<u8> _tmp;
u8 *tmp = 0;
if (border == BORDER_MODE_CONSTANT)
{
_tmp.assign(cols + 4,borderValue);
tmp = &_tmp[2];
}
for( ptrdiff_t y = 0; y < rows; y++ )
{
const u8* v0 = 0;
const u8* v1 = 0;
const u8* v2 = internal::getRowPtr(srcBase, srcStride, y);
const u8* v3 = 0;
const u8* v4 = 0;
// make border
if (border == BORDER_MODE_REPLICATE) {
v0 = internal::getRowPtr(srcBase, srcStride, y > 1 ? y-2 : 0);
v1 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : 0);
v3 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 0 ? rows-1 : 0);
v4 = internal::getRowPtr(srcBase, srcStride, y < rows-2 ? y+2 : rows > 0 ? rows-1 : 0);
} else if (border == BORDER_MODE_REFLECT) {
v0 = internal::getRowPtr(srcBase, srcStride, y > 1 ? y-2 : rows > 1 ? 1-y : 0);
v1 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : 0);
v3 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 0 ? rows-1 : 0);
v4 = internal::getRowPtr(srcBase, srcStride, y < rows-2 ? y+2 : rows > 1 ? 2*rows-(y+3) : 0);
} else if (border == BORDER_MODE_REFLECT101) {
v0 = internal::getRowPtr(srcBase, srcStride, y > 1 ? y-2 : rows > 2-y ? 2-y : 0); ///check
v1 = internal::getRowPtr(srcBase, srcStride, y > 0 ? y-1 : rows > 1 ? 1 : 0);
v3 = internal::getRowPtr(srcBase, srcStride, y < rows-1 ? y+1 : rows > 1 ? rows-2 : 0);
v4 = internal::getRowPtr(srcBase, srcStride, y < rows-2 ? y+2 : rows > 2 ? 2*rows-(y+4) : 0);///bad if rows=2 y=1 rows - 4 + (2,1)
} else if (border == BORDER_MODE_CONSTANT) {
v0 = y > 1 ? internal::getRowPtr(srcBase, srcStride, y-2) : tmp;
v1 = y > 0 ? internal::getRowPtr(srcBase, srcStride, y-1) : tmp;
v3 = y < rows-1 ? internal::getRowPtr(srcBase, srcStride, y+1) : tmp;
v4 = y < rows-2 ? internal::getRowPtr(srcBase, srcStride, y+2) : tmp;
}
s16* drow = internal::getRowPtr(dstBase, dstStride, y);
int16x8_t tnext, tc, t0;
int16x8_t tnext2, tnext3;
int16x8_t tnext1Old, tnext2Old, tnext3Old;
int16x8_t tnext4OldOldOld, tnext5OldOldOld;
int16x8_t tcurr1 = vmovq_n_s16(0x0);
int16x8_t tnext1 = vmovq_n_s16(0x0);
int16x8_t tprev1 = vmovq_n_s16(0x0);
int16x8_t tpprev1 = vmovq_n_s16(0x0);
int16x8_t tppprev1 = vmovq_n_s16(0x0);
int16x8_t tnext4Old = vmovq_n_s16(0x0);
int16x8_t tnext5Old = vmovq_n_s16(0x0);
int16x8_t tnext1OldOld = vmovq_n_s16(0x0);
int16x8_t tnext2OldOld = vmovq_n_s16(0x0);
int16x8_t tnext3OldOld = vmovq_n_s16(0x0);
int16x8_t tnext4OldOld = vmovq_n_s16(0x0);
int16x8_t tnext5OldOld = vmovq_n_s16(0x0);
// do vertical convolution
ptrdiff_t x = 0;
const ptrdiff_t bcols = y + 3 < rows ? cols : (cols - 8);
for( ; x <= bcols; x += 8 )
{
internal::prefetch(v0 + x);
internal::prefetch(v1 + x);
internal::prefetch(v2 + x);
internal::prefetch(v3 + x);
internal::prefetch(v4 + x);
uint8x8_t x0 = vld1_u8(v0 + x);
uint8x8_t x1 = vld1_u8(v1 + x);
uint8x8_t x2 = vld1_u8(v2 + x);
uint8x8_t x3 = vld1_u8(v3 + x);
uint8x8_t x4 = vld1_u8(v4 + x);
if(x) {
tcurr1 = tnext1;
}
tnext4OldOldOld = tnext4Old;
tnext5OldOldOld = tnext5Old;
tnext1Old = tnext1OldOld;
tnext2Old = tnext2OldOld;
tnext3Old = tnext3OldOld;
tnext4Old = tnext4OldOld;
tnext5Old = tnext5OldOld;
tnext3 = vreinterpretq_s16_u16(vaddq_u16(vaddl_u8(x3, x2),vaddl_u8(x2, x1)));
tnext3 = vshlq_n_s16(tnext3, 1);
tc = vreinterpretq_s16_u16(vsubl_u8(x4, x2));
tnext = vreinterpretq_s16_u16(vsubl_u8(x2, x0));
tnext2 = vsubq_s16(tc, tnext);
tnext1 = vaddq_s16(tnext3, tnext2);
// tnext1 = x0 + 2*x1 + 2*x2 + 2*x3 + x4
tnext2 = vshlq_n_s16(tnext2, 1);
// tnext2 = 2*x4 - 4*x2 + 2*x0
tnext3 = vsubq_s16(tnext2, vshlq_n_s16(tnext3, 1));
// tnext3 = 2*x0 - 4*x1 - 12*x2 - 4*x3 + 2*x4
tnext1OldOld = tnext1;
tnext2OldOld = tnext2;
tnext3OldOld = tnext3;
tnext4OldOld = tnext2;
tnext5OldOld = tnext1;
if(x) {
tnext1 = vextq_s16(tnext1Old, tnext1, 2);
tcurr1 = vextq_s16(tnext2Old, tnext2, 1);
tprev1 = tnext3Old;
if(x!=8) {
tpprev1 = vextq_s16(tnext4OldOldOld, tnext4Old, 7);
tppprev1 = vextq_s16(tnext5OldOldOld, tnext5Old, 6);
}
}
if(!x) {
// make border
if (border == BORDER_MODE_REPLICATE) {
tpprev1 = vextq_s16(tnext2, tnext2, 7);
tpprev1 = vsetq_lane_s16(vgetq_lane_s16(tpprev1, 1),tpprev1, 0);
tprev1 = vextq_s16(tnext1, tnext1, 6);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 2),tprev1, 0);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 2),tprev1, 1);
} else if (border == BORDER_MODE_REFLECT) {
tpprev1 = vextq_s16(tnext2, tnext2, 7);
tpprev1 = vsetq_lane_s16(vgetq_lane_s16(tpprev1, 1),tpprev1, 0);
tprev1 = vextq_s16(tnext1, tnext1, 6);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 3),tprev1, 0);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 2),tprev1, 1);
} else if (border == BORDER_MODE_REFLECT101) {
tpprev1 = vextq_s16(tnext2, tnext2, 7);
tpprev1 = vsetq_lane_s16(vgetq_lane_s16(tpprev1, 2),tpprev1, 0);
tprev1 = vextq_s16(tnext1, tnext1, 6);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 3),tprev1, 1);
tprev1 = vsetq_lane_s16(vgetq_lane_s16(tprev1, 4),tprev1, 0);
} else if (border == BORDER_MODE_CONSTANT) {
tpprev1 = vextq_s16(tnext2, tnext2, 7);
tpprev1 = vsetq_lane_s16(borderValue, tpprev1, 0);
tprev1 = vextq_s16(tnext1, tnext1, 6);
tprev1 = vsetq_lane_s16(borderValue, tprev1, 0);
tprev1 = vsetq_lane_s16(borderValue, tprev1, 1);
}
tppprev1 = tprev1;
continue;
}
t0 = vaddq_s16(vaddq_s16(vqaddq_s16(tcurr1, tprev1), vqaddq_s16(tpprev1, tppprev1)), tnext1);
t0 = vaddq_s16(t0, t0);
vst1q_s16(drow + x - 8, t0);
}
x -= 8;
if(x >= cols - 1)
x = cols-2;
s16 pprevx = 0;
s16 prevx = 0;
s16 nextx = 0;
s16 nnextx = 0;
for( ; x < cols; x++ )
{
if (x == 0) {
// make border
if (border == BORDER_MODE_REPLICATE) {
pprevx = v0[0] + 2*v1[0] + 2*v2[0] + 2*v3[0] + v4[0];
prevx = 2*v0[0] - 4*v2[0] + 2*v4[0];
} else if (border == BORDER_MODE_REFLECT) {
pprevx = v0[1] + 2*v1[1] + 2*v2[1] + 2*v3[1] + v4[1];
prevx = 2*v0[0] - 4*v2[0] + 2*v4[0];
} else if (border == BORDER_MODE_REFLECT101) {
pprevx = v0[2] + 2*v1[2] + 2*v2[2] + 2*v3[2] + v4[2];
prevx = 2*v0[1] - 4*v2[1] + 2*v4[1];
} else if (border == BORDER_MODE_CONSTANT) {
pprevx = 8 * borderValue;
prevx = 0;
}
} else if (x == 1) {
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT) {
pprevx = v0[0] + 2*v1[0] + 2*v2[0] + 2*v3[0] + v4[0];
} else if (border == BORDER_MODE_REFLECT101) {
pprevx = v0[1] + 2*v1[1] + 2*v2[1] + 2*v3[1] + v4[1];
} else if (border == BORDER_MODE_CONSTANT) {
pprevx = 8 * borderValue;
}
prevx = 2*v0[0] - 4*v2[0] + 2*v4[0];
} else {
pprevx = v0[x-2] + 2*v1[x-2] + 2*v2[x-2] + 2*v3[x-2] + v4[x-2];
prevx = 2*v0[x-1] - 4*v2[x-1] + 2*v4[x-1];
}
s16 currx = 2*v0[x] - 4*v1[x] - 12*v2[x] - 4*v3[x] + 2*v4[x];
if (x == cols-1) {
// make border
if (border == BORDER_MODE_REPLICATE) {
nextx = 2*v0[x] - 4*v2[x] + 2*v4[x];
nnextx = v0[x] + 2*v1[x] + 2*v2[x] + 2*v3[x] + v4[x];
} else if (border == BORDER_MODE_REFLECT) {
nextx = 2*v0[x] - 4*v2[x] + 2*v4[x];
nnextx = v0[x-1] + 2*v1[x-1] + 2*v2[x-1] + 2*v3[x-1] + v4[x-1];
} else if (border == BORDER_MODE_REFLECT101) {
nextx = 2*v0[x-1] - 4*v2[x-1] + 2*v4[x-1];
nnextx = v0[x-2] + 2*v1[x-2] + 2*v2[x-2] + 2*v3[x-2] + v4[x-2];
} else if (border == BORDER_MODE_CONSTANT) {
nextx = 0;
nnextx = 8 * borderValue;
}
} else if (x == cols-2) {
// make border
if (border == BORDER_MODE_REPLICATE || border == BORDER_MODE_REFLECT) {
nnextx = v0[x+1] + 2*v1[x+1] + 2*v2[x+1] + 2*v3[x+1] + v4[x+1];
} else if (border == BORDER_MODE_REFLECT101) {
nnextx = v0[x] + 2*v1[x] + 2*v2[x] + 2*v3[x] + v4[x];
} else if (border == BORDER_MODE_CONSTANT) {
nnextx = 8 * borderValue;
}
nextx = 2*v0[x+1] - 4*v2[x+1] + 2*v4[x+1];
} else {
nextx = 2*v0[x+1] - 4*v2[x+1] + 2*v4[x+1];
nnextx = v0[x+2] + 2*v1[x+2] + 2*v2[x+2] + 2*v3[x+2] + v4[x+2];
}
s16 res = pprevx + prevx + currx + nextx + nnextx;
*(drow+x) = 2*res;
}
}
#else
(void)size;
(void)srcBase;
(void)srcStride;
(void)dstBase;
(void)dstStride;
(void)border;
(void)borderValue;
#endif
}
} // namespace CAROTENE_NS | cpp | github | https://github.com/opencv/opencv | hal/carotene/src/laplacian.cpp |
#ifndef Py_SSL_H
#define Py_SSL_H
/* OpenSSL header files */
#include "openssl/evp.h"
#include "openssl/x509.h"
/*
* ssl module state
*/
typedef struct {
/* Types */
PyTypeObject *PySSLContext_Type;
PyTypeObject *PySSLSocket_Type;
PyTypeObject *PySSLMemoryBIO_Type;
PyTypeObject *PySSLSession_Type;
PyTypeObject *PySSLCertificate_Type;
/* SSL error object */
PyObject *PySSLErrorObject;
PyObject *PySSLCertVerificationErrorObject;
PyObject *PySSLZeroReturnErrorObject;
PyObject *PySSLWantReadErrorObject;
PyObject *PySSLWantWriteErrorObject;
PyObject *PySSLSyscallErrorObject;
PyObject *PySSLEOFErrorObject;
/* Error mappings */
PyObject *err_codes_to_names;
PyObject *lib_codes_to_names;
/* socket type from module CAPI */
PyTypeObject *Sock_Type;
/* Interned strings */
PyObject *str_library;
PyObject *str_reason;
PyObject *str_verify_code;
PyObject *str_verify_message;
/* keylog lock */
PyThread_type_lock keylog_lock;
} _sslmodulestate;
static struct PyModuleDef _sslmodule_def;
Py_LOCAL_INLINE(_sslmodulestate*)
get_ssl_state(PyObject *module)
{
void *state = PyModule_GetState(module);
assert(state != NULL);
return (_sslmodulestate *)state;
}
#define get_state_type(type) \
(get_ssl_state(PyType_GetModuleByDef(type, &_sslmodule_def)))
#define get_state_ctx(c) (((PySSLContext *)(c))->state)
#define get_state_sock(s) (((PySSLSocket *)(s))->ctx->state)
#define get_state_obj(o) ((_sslmodulestate *)PyType_GetModuleState(Py_TYPE(o)))
#define get_state_mbio(b) get_state_obj(b)
#define get_state_cert(c) get_state_obj(c)
/* ************************************************************************
* certificate
*/
enum py_ssl_encoding {
PY_SSL_ENCODING_PEM=X509_FILETYPE_PEM,
PY_SSL_ENCODING_DER=X509_FILETYPE_ASN1,
PY_SSL_ENCODING_PEM_AUX=X509_FILETYPE_PEM + 0x100,
};
typedef struct {
PyObject_HEAD
X509 *cert;
Py_hash_t hash;
} PySSLCertificate;
/* ************************************************************************
* helpers and utils
*/
static PyObject *_PySSL_BytesFromBIO(_sslmodulestate *state, BIO *bio);
static PyObject *_PySSL_UnicodeFromBIO(_sslmodulestate *state, BIO *bio, const char *error);
#endif /* Py_SSL_H */ | c | github | https://github.com/python/cpython | Modules/_ssl.h |
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"testing"
_ "unsafe"
)
//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove
func heapObjectsCanMove() bool
func TestHeapObjectsCanMove(t *testing.T) {
if heapObjectsCanMove() {
// If this happens (or this test stops building),
// it will break go4.org/unsafe/assume-no-moving-gc.
t.Fatalf("heap objects can move!")
}
} | go | github | https://github.com/golang/go | src/runtime/heap_test.go |
import copy
import json
from geowatchutil.buffer.base import GeoWatchBuffer
from geowatchutil.channel.base import GeoWatchChannelTopic, GeoWatchChannelError
class GeoWatchChannelSlack(GeoWatchChannelTopic):
# Public
message_templates = None
# Private
_buffer = None # Used for temporarily caching messages locally since rtm returns all messages
@classmethod
def encode(self, message):
return message
@classmethod
def decode(self, message):
return message
def _render_message_attachments(self, m, t):
"""
render message based on template
"""
r = copy.deepcopy(t)
for i in range(len(r["attachments"])):
a = self._render_message_attachment(m, r["attachments"][i])
r["attachments"][i] = a
return r
def _render_message_attachment(self, m, a):
r = copy.deepcopy(a)
for k in ["title", "title_link", "fallback", "text", "thumb_url"]:
if k in r:
r[k] = r[k].format(** m)
if "fields" in r:
for j in range(len(r["fields"])):
f = r["fields"][j]
if "title" in f:
f["title"] = f["title"].format(** m)
if "value" in f:
f["value"] = f["value"].format(** m)
r["fields"][j].update(f)
return r
def _render_message_plain(self, m, t):
r = None
try:
r = {}
if "text" in t:
r["text"] = t["text"].format(** m)
if "icon_url" in t:
r["icon_url"] = t["icon_url"].format(** m)
except:
print "Could not build plain slack message for resource"
r = None
return r
def send_message(self, message, **kwargs):
if self._client.authtoken:
# https://api.slack.com/methods/chat.postMessage
return self._client._client.api_call(
"chat.postMessage",
channel=kwargs.pop('topic', '#'+self.topic),
attachments=json.dumps(message["attachments"]))
else:
return self._client._post(self._client.url_webhook, message)
def send_messages(self, messages, **kwargs):
if self._client.authtoken:
topic = kwargs.pop('topic', '#'+self.topic)
for message in messages:
# https://api.slack.com/methods/chat.postMessage
return self._client._client.api_call(
"chat.postMessage",
channel=topic,
attachments=json.dumps(message["attachments"]))
else:
for message in messages:
return self._client._post(self._client.url_webhook, message)
def get_messages_raw(self, count, block=True, timeout=5):
if self._client:
self._buffer.add_messages(self._client._client.rtm_read())
return self._buffer.pop_messages(count=count)
else:
raise GeoWatchChannelError("Client has not been initialized for GeoWatch Slack channel")
def __init__(self, client, topic, mode, num_procs=1, message_templates=None):
super(GeoWatchChannelSlack, self).__init__(
client,
topic,
mode,
num_procs=num_procs)
self.message_templates = message_templates
if mode == "duplex" or mode == "consumer":
self._client._client.rtm_connect()
self._buffer = GeoWatchBuffer() | unknown | codeparrot/codeparrot-clean | ||
data = (
'Xi ', # 0x00
'Kao ', # 0x01
'Lang ', # 0x02
'Fu ', # 0x03
'Ze ', # 0x04
'Shui ', # 0x05
'Lu ', # 0x06
'Kun ', # 0x07
'Gan ', # 0x08
'Geng ', # 0x09
'Ti ', # 0x0a
'Cheng ', # 0x0b
'Tu ', # 0x0c
'Shao ', # 0x0d
'Shui ', # 0x0e
'Ya ', # 0x0f
'Lun ', # 0x10
'Lu ', # 0x11
'Gu ', # 0x12
'Zuo ', # 0x13
'Ren ', # 0x14
'Zhun ', # 0x15
'Bang ', # 0x16
'Bai ', # 0x17
'Ji ', # 0x18
'Zhi ', # 0x19
'Zhi ', # 0x1a
'Kun ', # 0x1b
'Leng ', # 0x1c
'Peng ', # 0x1d
'Ke ', # 0x1e
'Bing ', # 0x1f
'Chou ', # 0x20
'Zu ', # 0x21
'Yu ', # 0x22
'Su ', # 0x23
'Lue ', # 0x24
'[?] ', # 0x25
'Yi ', # 0x26
'Xi ', # 0x27
'Bian ', # 0x28
'Ji ', # 0x29
'Fu ', # 0x2a
'Bi ', # 0x2b
'Nuo ', # 0x2c
'Jie ', # 0x2d
'Zhong ', # 0x2e
'Zong ', # 0x2f
'Xu ', # 0x30
'Cheng ', # 0x31
'Dao ', # 0x32
'Wen ', # 0x33
'Lian ', # 0x34
'Zi ', # 0x35
'Yu ', # 0x36
'Ji ', # 0x37
'Xu ', # 0x38
'Zhen ', # 0x39
'Zhi ', # 0x3a
'Dao ', # 0x3b
'Jia ', # 0x3c
'Ji ', # 0x3d
'Gao ', # 0x3e
'Gao ', # 0x3f
'Gu ', # 0x40
'Rong ', # 0x41
'Sui ', # 0x42
'You ', # 0x43
'Ji ', # 0x44
'Kang ', # 0x45
'Mu ', # 0x46
'Shan ', # 0x47
'Men ', # 0x48
'Zhi ', # 0x49
'Ji ', # 0x4a
'Lu ', # 0x4b
'Su ', # 0x4c
'Ji ', # 0x4d
'Ying ', # 0x4e
'Wen ', # 0x4f
'Qiu ', # 0x50
'Se ', # 0x51
'[?] ', # 0x52
'Yi ', # 0x53
'Huang ', # 0x54
'Qie ', # 0x55
'Ji ', # 0x56
'Sui ', # 0x57
'Xiao ', # 0x58
'Pu ', # 0x59
'Jiao ', # 0x5a
'Zhuo ', # 0x5b
'Tong ', # 0x5c
'Sai ', # 0x5d
'Lu ', # 0x5e
'Sui ', # 0x5f
'Nong ', # 0x60
'Se ', # 0x61
'Hui ', # 0x62
'Rang ', # 0x63
'Nuo ', # 0x64
'Yu ', # 0x65
'Bin ', # 0x66
'Ji ', # 0x67
'Tui ', # 0x68
'Wen ', # 0x69
'Cheng ', # 0x6a
'Huo ', # 0x6b
'Gong ', # 0x6c
'Lu ', # 0x6d
'Biao ', # 0x6e
'[?] ', # 0x6f
'Rang ', # 0x70
'Zhuo ', # 0x71
'Li ', # 0x72
'Zan ', # 0x73
'Xue ', # 0x74
'Wa ', # 0x75
'Jiu ', # 0x76
'Qiong ', # 0x77
'Xi ', # 0x78
'Qiong ', # 0x79
'Kong ', # 0x7a
'Yu ', # 0x7b
'Sen ', # 0x7c
'Jing ', # 0x7d
'Yao ', # 0x7e
'Chuan ', # 0x7f
'Zhun ', # 0x80
'Tu ', # 0x81
'Lao ', # 0x82
'Qie ', # 0x83
'Zhai ', # 0x84
'Yao ', # 0x85
'Bian ', # 0x86
'Bao ', # 0x87
'Yao ', # 0x88
'Bing ', # 0x89
'Wa ', # 0x8a
'Zhu ', # 0x8b
'Jiao ', # 0x8c
'Qiao ', # 0x8d
'Diao ', # 0x8e
'Wu ', # 0x8f
'Gui ', # 0x90
'Yao ', # 0x91
'Zhi ', # 0x92
'Chuang ', # 0x93
'Yao ', # 0x94
'Tiao ', # 0x95
'Jiao ', # 0x96
'Chuang ', # 0x97
'Jiong ', # 0x98
'Xiao ', # 0x99
'Cheng ', # 0x9a
'Kou ', # 0x9b
'Cuan ', # 0x9c
'Wo ', # 0x9d
'Dan ', # 0x9e
'Ku ', # 0x9f
'Ke ', # 0xa0
'Zhui ', # 0xa1
'Xu ', # 0xa2
'Su ', # 0xa3
'Guan ', # 0xa4
'Kui ', # 0xa5
'Dou ', # 0xa6
'[?] ', # 0xa7
'Yin ', # 0xa8
'Wo ', # 0xa9
'Wa ', # 0xaa
'Ya ', # 0xab
'Yu ', # 0xac
'Ju ', # 0xad
'Qiong ', # 0xae
'Yao ', # 0xaf
'Yao ', # 0xb0
'Tiao ', # 0xb1
'Chao ', # 0xb2
'Yu ', # 0xb3
'Tian ', # 0xb4
'Diao ', # 0xb5
'Ju ', # 0xb6
'Liao ', # 0xb7
'Xi ', # 0xb8
'Wu ', # 0xb9
'Kui ', # 0xba
'Chuang ', # 0xbb
'Zhao ', # 0xbc
'[?] ', # 0xbd
'Kuan ', # 0xbe
'Long ', # 0xbf
'Cheng ', # 0xc0
'Cui ', # 0xc1
'Piao ', # 0xc2
'Zao ', # 0xc3
'Cuan ', # 0xc4
'Qiao ', # 0xc5
'Qiong ', # 0xc6
'Dou ', # 0xc7
'Zao ', # 0xc8
'Long ', # 0xc9
'Qie ', # 0xca
'Li ', # 0xcb
'Chu ', # 0xcc
'Shi ', # 0xcd
'Fou ', # 0xce
'Qian ', # 0xcf
'Chu ', # 0xd0
'Hong ', # 0xd1
'Qi ', # 0xd2
'Qian ', # 0xd3
'Gong ', # 0xd4
'Shi ', # 0xd5
'Shu ', # 0xd6
'Miao ', # 0xd7
'Ju ', # 0xd8
'Zhan ', # 0xd9
'Zhu ', # 0xda
'Ling ', # 0xdb
'Long ', # 0xdc
'Bing ', # 0xdd
'Jing ', # 0xde
'Jing ', # 0xdf
'Zhang ', # 0xe0
'Yi ', # 0xe1
'Si ', # 0xe2
'Jun ', # 0xe3
'Hong ', # 0xe4
'Tong ', # 0xe5
'Song ', # 0xe6
'Jing ', # 0xe7
'Diao ', # 0xe8
'Yi ', # 0xe9
'Shu ', # 0xea
'Jing ', # 0xeb
'Qu ', # 0xec
'Jie ', # 0xed
'Ping ', # 0xee
'Duan ', # 0xef
'Shao ', # 0xf0
'Zhuan ', # 0xf1
'Ceng ', # 0xf2
'Deng ', # 0xf3
'Cui ', # 0xf4
'Huai ', # 0xf5
'Jing ', # 0xf6
'Kan ', # 0xf7
'Jing ', # 0xf8
'Zhu ', # 0xf9
'Zhu ', # 0xfa
'Le ', # 0xfb
'Peng ', # 0xfc
'Yu ', # 0xfd
'Chi ', # 0xfe
'Gan ', # 0xff
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para documentaristreaming
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# by dentaku65, DrZ3r0
# ------------------------------------------------------------
import urlparse, urllib2, urllib, re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "documentaristreaming"
__category__ = "F,D"
__type__ = "generic"
__title__ = "documentaristreaming (TV)"
__language__ = "IT"
sito = "http://documentaristreaming.net/"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.documentaristreaming mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Aggiornamenti[/COLOR]",
action="peliculas",
url=sito,
thumbnail="http://dc584.4shared.com/img/XImgcB94/s7/13feaf0b538/saquinho_de_pipoca_01"),
Item(channel=__channel__,
title="[COLOR azure]Categorie[/COLOR]",
action="categorias",
url=sito,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def peliculas(item):
logger.info("pelisalacarta.documentaristreaming peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<img[^s]+src="(.*?)"[^>]+>[^<]+<[^<]+<[^<]+<[^<]+<[^<]+</a>\s*'
patron += '<div[^>]+>\s*'
patron += '<div[^<]+<[^<]+<[^<]+</div>\s*'
patron += '<h3[^>]+>\s*'
patron += '<a href="(.*?)"[^>]+>\s*'
patron += '(.*?)</a>\s*'
patron += '</h3>\s*'
patron += '<div[^>]+>\s*'
patron += '<span[^>]+>\s*'
patron += '<a[^<]+<[^<]+</a>\s*'
patron += '<a[^<]+</a>\s*'
patron += '</span>\s*'
patron += '<span[^<]+</span>\s*'
patron += '<a[^<]+<[^<]+<[^<]+<[^<]+<[^<]+</a>\s*'
patron += '</div>\s*'
patron += '<div[^>]+><p>(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="play",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
viewmode="movie_with_plot",
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Extrae el paginador
patronvideos = '<a class="next page-numbers" href="(.*?)"><i class="icon-iconic-right"></i></a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def categorias(item):
logger.info("pelisalacarta.documentaristreaming categorias")
itemlist = []
data = scrapertools.cache_page(item.url)
logger.info(data)
# Narrow search by selecting only the combo
start = data.find('<ul class="sub-menu menu-odd menu-depth-1">')
end = data.find('</ul>', start)
bloque = data[start:end]
# The categories are the options for the combo
patron = '<li class=[^>]+><a.*?href="(.*?)"[^>]+><span>(.*?)</span></a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for url, titulo in matches:
scrapedtitle = scrapertools.decodeHtmlentities(titulo)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def search(item,texto):
logger.info("[documentaristreaming.py] "+item.url+" search "+texto)
item.url = "http://documentaristreaming.net/?s="+texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def play(item):
logger.info("[documentaristreaming.py] play")
itemlist = []
video_url = ""
server = None
data = scrapertools.cache_page(item.url)
url = scrapertools.find_single_match(data, '<iframe\s+(?:width="[^"]*"\s*height="[^"]*"\s*)?src="([^"]+)"')
if 'youtu' in url:
data = scrapertools.cache_page(url)
vid = scrapertools.find_single_match(data, '\'VIDEO_ID\'\s*:\s*"([^"]+)')
if vid != "":
video_url = "http://www.youtube.com/watch?v=%s" % vid
server = 'youtube'
elif 'rai.tv' in url:
data = scrapertools.cache_page(url)
video_url = scrapertools.find_single_match(data, '<meta\s+name="videourl_m3u8"\s*content="([^"]+)"')
if video_url != "":
item.url = video_url
item.server = server
itemlist.append(item)
return itemlist | unknown | codeparrot/codeparrot-clean | ||
#pragma once
// The legacy mechanism for dispatching operators in ATen is a Type
// object, which is essentially a giant virtual dispatch table
// for every operation we support dynamically dispatching over.
//
// This has been deprecated in favor of ATenDispatch, and in the future,
// c10 dispatcher.
// TODO: Clean up what remains here
#include <c10/core/impl/LocalDispatchKeySet.h>
namespace at {
// A RAII, thread local (!) guard that will disable dispatch to variable
// handler.
//
// NOTE [ Treating Variables as non-Variables in type dispatch ]
//
// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes
// dispatches on ATen functions to go to the non-variable implementation,
// bypassing autograd handling (and also profiling and tracing).
//
// To understand why this guard exists, it's helpful to understand the history
// behind how Variable was implemented. Previously, Variables were implemented
// as a wrapper on Tensors; so the act of processing a Variable involved
// unwrapping the underlying Tensor, and then calling the underlying base
// operation on /that/ operation
//
// However, after the Variable/Tensor merge, there is no concept of unwrapping
// a tensor anymore. If you just call the operation on the same variable
// again inside your VariableType handler, you'll dispatch back to
// VariableType, which is not what we want.
//
// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which
// when enabled will cause `legacyTensorType()` and `getType()` to always return
// non-Variable type, even if the tensor being called on is a variable.
/* Note [AutoDispatchBelowAutograd]
* AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used
* for kernel implementations and customized C++ kernels.
* If you are looking for a guard to run workload in inference mode, please use
* c10::InferenceMode RAII which is user facing API.
* In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode)
* was used in the user code for inference-only workload, this was under risk of
* producing wrong results silently in some edge cases. For example:
* ```
* torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true);
* torch::Tensor out = s * s;
* {
* at::AutoDispatchBelowAutograd guard;
* s.add_(1); // Skips version bump on `s`.
* }
* // WRONG GRADIENT! s.grad() are now computed using `s` value after the
* // inplace update.
* out.backward(torch::ones_like(out));
* ```
* Users should use `c10::InferenceMode` here so that it'll properly throw an
* error saying "one of the variables needed for gradient computation has be modified."
*/
struct TORCH_API AutoDispatchBelowAutograd {
AutoDispatchBelowAutograd() :
autograd_guard_(c10::autograd_dispatch_keyset) {
}
// disable all autograd dispatch keys
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
};
// TODO: AutoNonVariableTypeMode should be removed in release 1.10.
struct TORCH_API AutoNonVariableTypeMode {
AutoNonVariableTypeMode(bool enabled = true) :
autograd_guard_(c10::autograd_dispatch_keyset) {
TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. "
"For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, "
"If you are looking for a user facing API to enable running your inference-only "
"workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code "
"is under risk of producing silent wrong result in some edge cases. "
"See Note [AutoDispatchBelowAutograd] for more details.");
TORCH_INTERNAL_ASSERT(enabled);
}
// disable all autograd dispatch keys
c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
};
struct TORCH_API AutoDispatchSkipFunctionalize {
AutoDispatchSkipFunctionalize() :
dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) {
}
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
};
/* Note [AutoDispatchBelowADInplaceOrView]
* AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode
* before we split inplace & view ops out of VariableType kernel.
* Note this guard is used in VariableType kernels for functional ops
* as well as ADInplaceOrView kernels for inplace/view ops to enforce the
* Invariant:
* Once you are in VariableType/ADInplaceOrView kernel for an op,
* you never go back to a kernel on same dispatch key until
* you finish the current op.
*/
struct TORCH_API AutoDispatchBelowADInplaceOrView {
AutoDispatchBelowADInplaceOrView() :
dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) {
}
// disable Autograd & ADInplaceOrView dispatch keys
c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
};
} // namespace at | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/core/LegacyTypeDispatch.h |
#!/usr/bin/env python2
#!---coding:utf-8---
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
import logging
from base import Base, initPhantomjs
class Weibo(Base):
INDEX_URL = "http://weibo.com"
LOGIN_URL = "http://weibo.com/login.php"
def __init__(self, user, driver):
self.driver = driver
self.user = user
def login(self):
cookie = self.user["cookie"]
if cookie:
self.driver.add_cookie(cookie)
self.driver.get(self.LOGIN_URL)
logging.debug(self.driver.current_url)
return
self.driver.get(self.LOGIN_URL)
if self.driver.current_url != self.LOGIN_URL:
logging.info(self.driver.get_cookies())
logging.info("already logined.")
return
username_xpath = '''//*[@id="pl_login_form"]/div[1]/div/input'''
pass_xpath = '''//*[@id="pl_login_form"]/div[2]/div/input'''
savestate_xpath = '''//*[@id="login_form_savestate"]'''
submit_xpath = '''//*[@id="pl_login_form"]/div[6]/div[1]/a'''
username_element = self.driver.find_element_by_xpath(username_xpath)
username_element.send_keys(self.user["username"])
pass_element = self.driver.find_element_by_xpath(pass_xpath)
pass_element.send_keys(self.user["password"])
savestate_element = self.driver.find_element_by_xpath(savestate_xpath)
#print savestate_element.is_selected()
submit_element = self.driver.find_element_by_xpath(submit_xpath)
submit_element.click()
#TODO handle cookie
#self.user["cookie"] = self.driver.get_cookies()
logging.debug(self.driver.current_url)
def post(self,meg):
self.driver.get(self.INDEX_URL)
input_xpath = '''//*[@id="pl_content_publisherTop"]/div/div[2]/textarea'''
send_xpath = '''//*[@id="pl_content_publisherTop"]/div/div[3]/div[1]/a'''
input_element = self.driver.find_element_by_xpath(input_xpath)
input_element.clear()
input_element.click()
input_element.send_keys("")
input_element.send_keys(meg)
send_element = self.driver.find_element_by_xpath(send_xpath)
logging.warning(send_element.get_attribute("class"))
send_element.click()
def post_with_pic(self, meg, pic):
"not work"
raise Exception("Not support")
image_button_xpath = '''//*[@id="pl_content_publisherTop"]/div/div[3]/div[2]/span/a[2]'''
image_button = self.driver.find_element_by_xpath(image_button_xpath)
image_button.click()
image_upload_class = "layer_send_pic_v2"
image_upload_element = self.driver.find_element_by_class_name(image_upload_class)
image_upload_element.click() | unknown | codeparrot/codeparrot-clean | ||
---
name: updatecli-compose
on:
workflow_dispatch:
schedule:
- cron: "0 6 * * *"
permissions:
contents: read
jobs:
setup-matrix:
if: github.repository == 'elastic/elasticsearch'
runs-on: ubuntu-latest
outputs:
branches: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v4
- id: set-matrix
run: |
BRANCHES=$(jq -c '[.branches[].branch]' branches.json)
echo "matrix=$BRANCHES" >> $GITHUB_OUTPUT
compose:
needs: setup-matrix
if: github.repository == 'elastic/elasticsearch'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
branch: ${{ fromJson(needs.setup-matrix.outputs.branches) }}
permissions:
contents: write
packages: read
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ matrix.branch }}
- name: Update branch in SCM values
run: |
yq eval '.scm.branch = "${{ matrix.branch }}"' -i .github/updatecli/values.d/scm.yml
- uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- uses: elastic/oblt-actions/updatecli/run@v1
with:
# Runs in "--debug" mode to provide logs if the PR creation fails
command: --experimental compose apply --debug
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | unknown | github | https://github.com/elastic/elasticsearch | .github/workflows/updatecli-compose.yml |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deck', '0010_create_activities_from_proposals'),
]
operations = [
migrations.RemoveField(
model_name='proposal',
name='author',
),
migrations.RemoveField(
model_name='proposal',
name='created_at',
),
migrations.RemoveField(
model_name='proposal',
name='description',
),
migrations.RemoveField(
model_name='proposal',
name='id',
),
migrations.RemoveField(
model_name='proposal',
name='is_published',
),
migrations.RemoveField(
model_name='proposal',
name='slug',
),
migrations.RemoveField(
model_name='proposal',
name='title',
),
migrations.AddField(
model_name='proposal',
name='activity_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=None, serialize=False, to='deck.Activity'),
preserve_default=False,
),
] | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception as exc
from heat.common import template_format
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareComponentTest(common.HeatTestCase):
def setUp(self):
super(SoftwareComponentTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
mysql_component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo "Create MySQL"
tool: script
- actions: [UPDATE]
config: |
#!/bin/bash
echo "Update MySQL"
tool: script
inputs:
- name: mysql_port
outputs:
- name: root_password
'''
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['mysql_component']
self.rpc_client = mock.MagicMock()
self.component._rpc_client = self.rpc_client
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
props = dict(self.component.properties)
self.component.handle_create()
self.rpc_client.create_software_config.assert_called_with(
self.ctx,
group='component',
name=None,
inputs=props['inputs'],
outputs=props['outputs'],
config={'configs': props['configs']},
options=None)
self.assertEqual(config_id, self.component.resource_id)
def test_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.component.handle_delete())
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.component.resource_id = config_id
self.rpc_client.delete_software_config.return_value = None
self.assertIsNone(self.component.handle_delete())
self.rpc_client.delete_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component.handle_delete())
def test_resolve_attribute(self):
self.assertIsNone(self.component._resolve_attribute('others'))
self.component.resource_id = None
self.assertIsNone(self.component._resolve_attribute('configs'))
self.component.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
configs = self.template['resources']['mysql_component'
]['properties']['configs']
# configs list is stored in 'config' property of SoftwareConfig
value = {'config': {'configs': configs}}
self.rpc_client.show_software_config.return_value = value
self.assertEqual(configs, self.component._resolve_attribute('configs'))
self.rpc_client.show_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component._resolve_attribute('configs'))
class SoftwareComponentValidationTest(common.HeatTestCase):
scenarios = [
(
'component_full',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
inputs:
- name: foo
outputs:
- name: bar
options:
opt1: blah
''',
err=None,
err_msg=None)
),
(
'no_input_output_options',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=None,
err_msg=None)
),
(
'wrong_property_config',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
config: #!/bin/bash
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Unknown Property config')
),
(
'missing_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
inputs:
- name: foo
''',
err=exc.StackValidationFailed,
err_msg='Property configs not assigned')
),
(
'empty_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
''',
err=exc.StackValidationFailed,
err_msg='resources.component.properties.configs: '
'length (0) is out of range (min: 1, max: None)')
),
(
'invalid_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='is not a list')
),
(
'config_empty_actions',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: []
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='component.properties.configs[0].actions: '
'length (0) is out of range (min: 1, max: None)')
),
(
'multiple_configs_per_action_single',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: #!/bin/bash
tool: script
- actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
(
'multiple_configs_per_action_overlapping_list',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE, UPDATE, RESUME]
config: #!/bin/bash
tool: script
- actions: [UPDATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
]
def setUp(self):
super(SoftwareComponentValidationTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
%s
''' % self.snippet
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['component']
self.component._rpc_client = mock.MagicMock()
def test_properties_schema(self):
if self.err:
err = self.assertRaises(self.err, self.stack.validate)
if self.err_msg:
self.assertIn(self.err_msg, six.text_type(err))
else:
self.assertIsNone(self.stack.validate()) | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for buildscripts.patch_builds patckage.""" | python | github | https://github.com/mongodb/mongo | buildscripts/tests/patch_builds/__init__.py |
#########################################################################
# #
# # ## # # # # #
# # # # # # # # # # #
# ##### # # # # ##### ## ### # # ## ### ### #
# # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # #
# # # ## # ## # # ### ### ### ## ### # # #
# # # # #
# ## # # #
# #
#########################################################################
#
# This file is part of AQUA-gpusph, a free CFD program based on SPH.
# Copyright (C) 2012 Jose Luis Cercos Pita <jl.cercos@upm.es>
#
# AQUA-gpusph is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AQUA-gpusph is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AQUA-gpusph. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import numpy as np
import os.path as path
import aquagpusph as aqua
# Read the experimental data
f = open(path.join('@EXAMPLE_DEST_DIR@/doc', 'Motion_Body.dat'), 'r')
lines = f.readlines()
f.close()
T = []
X = []
U = []
DUDT = []
for l in lines[2:]:
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
if l == '':
continue
t, dudt, u, x = map(float, l.split(' '))
T.append(t)
X.append(x)
U.append(u)
DUDT.append(dudt)
del f, lines
T = np.asarray(T)
X = np.asarray(X)
U = np.asarray(U)
DUDT = np.asarray(DUDT)
F = open('Motion.dat', 'w')
def main():
# Get the time instant
t = aqua.get("t")
# Interpolate the data
r = np.zeros(2, dtype=np.float32)
r[0] = np.interp(t, T, X)
u = np.zeros(2, dtype=np.float32)
u[0] = np.interp(t, T, U)
dudt = np.zeros(2, dtype=np.float32)
dudt[0] = np.interp(t, T, DUDT)
# Send it to AQUAgpusph
aqua.set("motion_r", r)
aqua.set("motion_drdt", u)
aqua.set("motion_ddrddt", dudt)
# Write output
F.write('{}\t{}\t{}\t{}\n'.format(t, r[0], u[0], dudt[0]))
F.flush()
return True | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Viennarna(AutotoolsPackage):
"""The ViennaRNA Package consists of a C code library and several
stand-alone programs for the prediction and comparison of RNA secondary
structures.
"""
homepage = "https://www.tbi.univie.ac.at/RNA/"
url = "https://www.tbi.univie.ac.at/RNA/download/sourcecode/2_3_x/ViennaRNA-2.3.5.tar.gz"
version('2.3.5', '4542120adae9b7abb605e2304c2a1326')
variant('sse', default=True, description='Enable SSE in order to substantially speed up execution')
variant('perl', default=True, description='Build ViennaRNA with Perl interface')
variant('python', default=True, description='Build ViennaRNA with Python interface')
depends_on('perl', type=('build', 'run'))
depends_on('python', type=('build', 'run'))
depends_on('libsvm')
depends_on('gsl')
def url_for_version(self, version):
url = 'https://www.tbi.univie.ac.at/RNA/download/sourcecode/{0}_x/ViennaRNA-{1}.tar.gz'
return url.format(version.up_to(2).underscored, version)
def configure_args(self):
args = self.enable_or_disable('sse')
args += self.with_or_without('python')
args += self.with_or_without('perl')
if 'python@3:' in self.spec:
args.append('--with-python3')
return args | unknown | codeparrot/codeparrot-clean | ||
from langchain_classic.schema.cache import __all__
EXPECTED_ALL = ["BaseCache", "RETURN_VAL_TYPE"]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL) | python | github | https://github.com/langchain-ai/langchain | libs/langchain/tests/unit_tests/schema/test_cache.py |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import keyword_plan_campaign
from google.ads.googleads.v7.services.types import keyword_plan_campaign_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class KeywordPlanCampaignServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for KeywordPlanCampaignService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_keyword_plan_campaign: gapic_v1.method.wrap_method(
self.get_keyword_plan_campaign,
default_timeout=None,
client_info=client_info,
),
self.mutate_keyword_plan_campaigns: gapic_v1.method.wrap_method(
self.mutate_keyword_plan_campaigns,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_keyword_plan_campaign(self) -> typing.Callable[
[keyword_plan_campaign_service.GetKeywordPlanCampaignRequest],
keyword_plan_campaign.KeywordPlanCampaign]:
raise NotImplementedError
@property
def mutate_keyword_plan_campaigns(self) -> typing.Callable[
[keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest],
keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse]:
raise NotImplementedError
__all__ = (
'KeywordPlanCampaignServiceTransport',
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base',
'version': '1.3',
'category': 'Hidden',
'description': """The kernel of OpenERP, needed for all installation.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': [],
'init_xml': [
'base_data.xml',
'security/base_security.xml',
'base_menu.xml',
'res/res_security.xml',
'res/res_config.xml',
'data/res.country.state.csv'
],
'update_xml': [
'base_update.xml',
'ir/wizard/wizard_menu_view.xml',
'ir/ir.xml',
'ir/ir_config_parameter_view.xml',
'ir/workflow/workflow_view.xml',
'ir/report/ir_report.xml',
'module/module_view.xml',
'module/module_data.xml',
'module/module_report.xml',
'module/wizard/base_module_import_view.xml',
'module/wizard/base_module_update_view.xml',
'module/wizard/base_language_install_view.xml',
'module/wizard/base_import_language_view.xml',
'module/wizard/base_module_upgrade_view.xml',
'module/wizard/base_module_configuration_view.xml',
'module/wizard/base_export_language_view.xml',
'module/wizard/base_update_translations_view.xml',
'res/res_request_view.xml',
'res/res_lang_view.xml',
'res/res_log_view.xml',
'res/res_partner_report.xml',
'res/res_partner_view.xml',
'res/res_partner_shortcut_data.xml',
'res/res_bank_view.xml',
'res/res_country_view.xml',
'res/res_currency_view.xml',
'res/res_partner_event_view.xml',
#'res/wizard/partner_sms_send_view.xml',
#'res/wizard/partner_wizard_massmail_view.xml',
'res/wizard/partner_clear_ids_view.xml',
'res/wizard/partner_wizard_ean_check_view.xml',
'res/res_partner_data.xml',
'res/ir_property_view.xml',
'security/base_security.xml',
'publisher_warranty/publisher_warranty_view.xml',
'security/ir.model.access.csv',
'res/res_widget_view.xml',
'res/res_widget_data.xml',
'publisher_warranty/publisher_warranty_data.xml',
],
'demo_xml': [
'base_demo.xml',
'res/res_partner_demo.xml',
'res/res_widget_demo.xml',
],
'test': [
'test/base_test.xml',
'test/base_test.yml',
'test/test_context.xml',
'test/bug_lp541545.xml',
'test/test_osv_expression.yml',
'test/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules.
'test/test_ir_values.yml',
# Commented because this takes some time.
# This must be (un)commented with the corresponding import statement
# in test/__init__.py.
# 'test/test_ir_cron.yml', # <-- These tests perform a roolback.
],
'installable': True,
'auto_install': True,
'certificate': '0076807797149',
"css": [ 'static/src/css/modules.css' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
###################################################################################
## MODULE : spell.__init__
## DATE : Mar 18, 2011
## PROJECT : SPELL
## DESCRIPTION: Module initialization
## --------------------------------------------------------------------------------
##
## Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.A.R.L.
##
## This file is part of SPELL.
##
## This component is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SPELL. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################### | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.contrib.auth.handlers.modwsgi import (
check_password, groups_for_user,
)
from django.contrib.auth.models import Group, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.test import TransactionTestCase, override_settings
# This must be a TransactionTestCase because the WSGI auth handler performs
# its own transaction management.
class ModWsgiHandlerTestCase(TransactionTestCase):
"""
Tests for the mod_wsgi authentication handler
"""
available_apps = [
'django.contrib.auth',
'django.contrib.contenttypes',
]
def test_check_password(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
"""
User.objects.create_user('test', 'test@example.com', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password
self.assertTrue(check_password({}, 'test', 'test'))
# correct password, but user is inactive
User.objects.filter(username='test').update(is_active=False)
self.assertFalse(check_password({}, 'test', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test', 'incorrect'))
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_check_password_custom_user(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
with custom user installed
"""
CustomUser._default_manager.create_user('test@example.com', '1990-01-01', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password'
self.assertTrue(check_password({}, 'test@example.com', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test@example.com', 'incorrect'))
def test_groups_for_user(self):
"""
Check that groups_for_user returns correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Group_Authorisation
"""
user1 = User.objects.create_user('test', 'test@example.com', 'test')
User.objects.create_user('test1', 'test1@example.com', 'test1')
group = Group.objects.create(name='test_group')
user1.groups.add(group)
# User not in database
self.assertEqual(groups_for_user({}, 'unknown'), [])
self.assertEqual(groups_for_user({}, 'test'), [b'test_group'])
self.assertEqual(groups_for_user({}, 'test1'), []) | unknown | codeparrot/codeparrot-clean | ||
# This is a component of AXIS, a front-end for emc
# Copyright 2007 Anders Wallin <anders.wallin@helsinki.fi>
#
# TJP 12 04 2007
# Rugludallur saw that spinbuttons had no initial value until after thumbs inc'd or de'c
# TJP saw that if xml prescribed <value>1234</value> the spinbutton locked up after the inc/dec
# it seems a new term in the __init__ may fix this
# end TJP 12 04 2007
#
# Added initval to checkbutton/scale for initial values, Dallur 15 April 2007 (jarl stefansson) (jarl stefansson)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" A widget library for pyVCP
The layout and composition of a Python Virtual Control Panel is specified
with an XML file. The file must begin with <pyvcp>, and end with </pyvcp>
In the documentation for each widget, optional tags are shown bracketed:
[ <option>Something</option> ]
such a tag is not required for pyVCP to work, but may add functionality or
modify the behaviour of a widget.
Example XML file:
<pyvcp>
<led>
<size>40</size>
<halpin>"my-led"</halpin>
</led>
</pyvcp>
This will create a VCP with a single LED widget which indicates the value
of HAL pin compname.my-led
"""
from Tkinter import *
from hal import *
import math
import bwidget
import time
# -------------------------------------------
class pyvcp_dial(Canvas):
# Dial widget by tomp
""" A dial that outputs a HAL_FLOAT
reacts to both mouse-wheel and mouse dragging
<dial>
[ <size>376</size> ]
[ <dialcolor>"grey"</dialcolor> ]
[ <edgecolor>"pink"</edgecolor> ]
[ <dotcolor>"white"</dotcolor> ]
[ <cpr>100</cpr> ] number of changes per rev, is # of dial tick marks, beware hi values)
[ <min_>-33.123456</min_> ]
[ <max_>3.3</max_> ]
[ <text>"Gallons per Hour"</text> ] (knob label)
[ <initval>123</initval> ] (initial value a whole number must end in '.')
[ <resolution>.001</resolution> ] (scale value a whole number must end in '.')
[ <halpin>"anaout"</halpin> ]
</dial>
key bindings:
<Button-4> untested no wheel mouse
<Button-5> untested no wheel mouse
<Button1-Motion> used internally during drag
<ButtonPress> used internally to record beginning of drag
<ButtonRelease> used internally at end of drag
<Double-1> divides scale by 10
<Double-2> resets scale to original value
<Double-3> multiplies scale by 10
<Shift-1> shift-click resets original analog value
features:
text autoscales
"""
# FIXME:
# -jogging should be enabled only when the circle has focus
# TJP nocando: only widgets have events, not thier 'items', the circle is an item
# -circle should maintain focus when mouse over dot
# TJP nocando: ditto, the circle is an item, so focus & event are not aligned to it
# -jogging by dragging with the mouse could work better
# -add a scaled output, scale changes when alt/ctrl/shift is held down
# TJP dblLeftClick divides scale by 10 , dblRightClcik muxs by 10
n=0
#TJP TODO: let some artists look at it, butt ugly!
#TJP cpr is overloaded, now it means "chgs per rev" not "counts per rev"
#TJP the tik marks could get very fine, avoid high cpr to size ratios (easily seen)
def __init__(self,root,pycomp,halpin=None,size=200,cpr=40,dialcolor="", \
edgecolor="",dotcolor="grey",min_=-1e20,max_=1e20, \
text=None,initval=0,resolution=0.1, \
**kw):
pad=size/10
counts = int(round(initval/resolution))
self.out = counts * resolution # float output out
self.origValue=initval # in case user wants to reset the pot/valve/thingy
#self.text3=resolution
Canvas.__init__(self,root,width=size,height=size)
pad2=pad-size/15
self.circle2=self.create_oval(pad2,pad2,size-pad2,size-pad2,width=3)# edge circle
self.itemconfig(self.circle2,fill=edgecolor,activefill=edgecolor)
self.circle=self.create_oval(pad,pad,size-pad,size-pad) # dial circle
self.itemconfig(self.circle,fill=dialcolor,activefill=dialcolor)
self.itemconfig(self.circle)
self.mid=size/2
self.r=(size-2*pad)/2
self.alfa=0
self.d_alfa=2*math.pi/cpr
self.size=size
self.funit=resolution
self.origFunit=self.funit # allow restoration
self.mymin=min_
self.mymax=max_
self.dot = self.create_oval(self.dot_coords())
self.itemconfig(self.dot,fill=dotcolor,activefill="black")
self.line = self.create_line( self.mid+(self.r*1)*math.cos(self.alfa), \
self.mid+(self.r*1)*math.sin(self.alfa), \
self.mid+(self.r*1.1)*math.cos(self.alfa), \
self.mid+(self.r*1.1)*math.sin(self.alfa))
self.itemconfig(self.line,arrow="last",arrowshape=(10,10,10))
self.itemconfig(self.line,width=10)
#TJP items get rendered in order of creation, so the knob will be behind these texts
#TJP the font can be described with pixel size by using negative value
self.txtroom=size/6
# a title, if the user has supplied one
if text!=None:
self.title=self.create_text([self.mid,self.mid-self.txtroom],
text=text,font=('Arial',-self.txtroom))
# the output
self.dro=self.create_text([self.mid,self.mid],
text=str(self.out),font=('Arial',-self.txtroom))
# the scale
self.delta=self.create_text([self.mid,self.mid+self.txtroom],
text='x '+ str(self.funit),font=('Arial',-self.txtroom))
self.bind('<Button-4>',self.wheel_up) # untested no wheel mouse
self.bind('<Button-5>',self.wheel_down) # untested no wheel mouse
self.bind('<Button1-Motion>',self.motion) #during drag
self.bind('<ButtonPress>',self.bdown) #begin of drag
self.bind('<ButtonRelease>',self.bup) #end of drag
self.bind('<Double-1>',self.chgScaleDn) # doubleclick scales down
self.bind('<Double-2>',self.resetScale) # doubleclick resets scale
self.bind('<Double-3>',self.chgScaleUp) # doubleclick scales up
self.bind('<Shift-1>',self.resetValue) # shift resets value
self.draw_ticks(cpr)
self.dragstartx=0
self.dragstarty=0
self.dragstart=0
self.dotcolor=dotcolor
# create the hal pin
if halpin == None:
halpin = "dial."+str(pyvcp_dial.n)+".out"
pyvcp_dial.n += 1
pycomp.newpin(halpin, HAL_FLOAT, HAL_OUT)
self.halpin=halpin
self.pycomp=pycomp
def chgScaleDn(self,event):
# reduces the scale by 10x
self.funit=self.funit/10.0
self.update_scale()
self.update_dot()
def chgScaleUp(self,event):
# increases the scale by 10x
self.funit=self.funit*10.0
self.update_scale()
self.update_dot()
def resetScale(self,event):
# reset scale to original value
self.funit=self.origFunit
self.update_scale()
def resetValue(self,event):
# reset output to orifinal value
counts = int(round(self.origValue / self.funit))
self.out = counts * self.funit
self.update_dot()
self.update_dro()
def dot_coords(self):
# calculate the coordinates for the dot
DOTR=0.04*self.size
DOTPOS=0.85
midx = self.mid+DOTPOS*self.r*math.cos(self.alfa)
midy = self.mid+DOTPOS*self.r*math.sin(self.alfa)
return midx-DOTR, midy-DOTR,midx+DOTR,midy+DOTR
def bdown(self,event):
self.dragstartx=event.x
self.dragstarty=event.y
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
self.itemconfig(self.dot,fill="black",activefill="black")
def bup(self,event):
self.itemconfig(self.dot,fill=self.dotcolor)
def motion(self,event):
dragstop = math.atan2((event.y-self.mid),(event.x-self.mid))
delta = dragstop - self.dragstart
if delta>=self.d_alfa:
self.up()
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
elif delta<=-self.d_alfa:
self.down()
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
self.itemconfig(self.dot,fill="black",activefill="black")
def wheel_up(self,event):
self.up()
def wheel_down(self,event):
self.down()
def down(self):
old_out = self.out
counts = math.ceil(self.out / self.funit - self.funit*1e-6) - 1
self.out = counts * self.funit
if self.out < self.mymin:
self.out = self.mymin
if self.out != old_out:
self.alfa-=self.d_alfa
self.update_dot()
self.update_dro()
def up(self):
old_out = self.out
counts = math.floor(self.out / self.funit + self.funit*1e-6) + 1
self.out = counts * self.funit
if self.out > self.mymax:
self.out = self.mymax
if self.out != old_out:
self.alfa+=self.d_alfa
self.update_dot()
self.update_dro()
def update_dot(self):
self.coords(self.dot, self.dot_coords() )
self.coords(self.line, self.mid+(self.r*1)*math.cos(self.alfa),self.mid+(self.r*1)*math.sin(self.alfa), \
self.mid+(self.r*1.1)*math.cos(self.alfa), \
self.mid+(self.r*1.1)*math.sin(self.alfa))
def update_dro(self):
valtext = str(self.out)
self.itemconfig(self.dro,text=valtext)
def update_scale(self):
valtext = str(self.funit)
valtext = 'x ' + valtext
self.itemconfig(self.delta,text=valtext)
def draw_ticks(self,cpr):
for n in range(0,cpr,2):
for i in range(0,2):
startx=self.mid+self.r*math.cos((n+i)*self.d_alfa)
starty=self.mid+self.r*math.sin((n+i)*self.d_alfa)
if i == 0:
length = 1.15
width = 2
else:
length = 1.1
width = 1
stopx=self.mid+length*self.r*math.cos((n+i)*self.d_alfa)
stopy=self.mid+length*self.r*math.sin((n+i)*self.d_alfa)
self.create_line(startx,starty,stopx,stopy,width=width)
def update(self,pycomp):
self.pycomp[self.halpin] = self.out
# -------------------------------------------
class pyvcp_meter(Canvas):
""" Meter - shows the value of a FLOAT with an analog meter
<meter>
[ <size>300</size> ]
[ <halpin>"mymeter"</halpin> ]
[ <text>"My Voltage"</text> ]
[ <subtext>"Volts"</subtext>
[ <min_>-22</min_> ]
[ <max_>123</max_> ]
[ <majorscale>10</majorscale> ]
[ <minorscale>5</minorscale> ]
[ <region1>(70,80,"green")</region1> ]
[ <region2>(80,100,"orange")</region2> ]
[ <region3>(100,123,"red")</region3> ]
</meter>
"""
# FIXME: logarithmic scale option
n=0
def __init__(self,root,pycomp,halpin=None, size=200,text=None,subtext=None,min_=0,max_=100,majorscale=None, minorscale=None,region1=None,region2=None,region3=None,**kw):
self.size = size
self.pad=10
Canvas.__init__(self,root,width=size,height=size)
self.halpin=halpin
self.min_=min_
self.max_=max_
range_=2.5
self.min_alfa=-math.pi/2-range_
self.max_alfa=-math.pi/2+range_
self.circle=self.create_oval(self.pad,self.pad,size-self.pad,size-self.pad, width=2)
self.itemconfig(self.circle,fill="white")
self.mid=size/2
self.r=(size-2*self.pad)/2
self.alfa=0
if minorscale==None:
self.minorscale=0
else:
self.minorscale=minorscale
if majorscale==None:
self.majorscale=float((self.max_-self.min_)/10)
else:
self.majorscale=majorscale
if text!=None: t=self.create_text([self.mid,self.mid-size/12],font="Arial %d bold" % (size/10),text=text)
if subtext!=None: t=self.create_text([self.mid,self.mid+size/12],font="Arial %d" % (size/30+5),text=subtext)
if region1!=None: self.draw_region(region1)
if region2!=None: self.draw_region(region2)
if region3!=None: self.draw_region(region3)
self.draw_ticks()
self.line = self.create_line([self.mid,self.mid, self.mid+self.r*math.cos(self.alfa), self.mid+self.r*math.sin(self.alfa)],fill="red", arrow="last", arrowshape=(0.9*self.r,self.r,self.r/20))
self.itemconfig(self.line,width=3)
# create the hal pin
if halpin == None:
self.halpin = "meter."+str(pyvcp_meter.n)+".value"
pyvcp_meter.n += 1
pycomp.newpin(self.halpin, HAL_FLOAT, HAL_IN)
self.value = pycomp[self.halpin]
def rad2deg(self, rad): return rad*180/math.pi
def value2angle(self, value):
#returns angle for a given value
scale = (self.max_-self.min_)/(self.max_alfa-self.min_alfa)
alfa = self.min_alfa + (value-self.min_)/scale
if alfa > self.max_alfa:
alfa = self.max_alfa
elif alfa < self.min_alfa:
alfa = self.min_alfa
return alfa
def p2c(self, radius, angle):
#returns the cathesian coordinates (x,y) for given polar coordinates
#radius in percent of self.r; angle in radians
return self.mid+radius*self.r*math.cos(angle), self.mid+radius*self.r*math.sin(angle)
def update(self,pycomp):
self.value = pycomp[self.halpin]
self.alfa = self.value2angle(self.value)
x,y = self.p2c(0.8, self.alfa)
self.coords(self.line,self.mid,self.mid,x,y)
def draw_region(self, (start, end, color)):
#Draws a colored region on the canvas between start and end
start = self.value2angle(start)
start = -self.rad2deg(start)
end = self.value2angle(end)
end = -self.rad2deg(end)
extent = end-start
halfwidth = math.floor(0.1*self.r/2)+1
xy = self.pad+halfwidth, self.pad+halfwidth, self.size-self.pad-halfwidth, self.size-self.pad-halfwidth
self.create_arc(xy, start=start, extent=extent, outline=color, width=(halfwidth-1)*2, style="arc")
def draw_ticks(self):
value = self.min_
while value <= self.max_:
alfa = self.value2angle(value)
xy1 = self.p2c(1,alfa)
xy2 = self.p2c(0.85,alfa)
xytext = self.p2c(0.75,alfa)
self.create_text(xytext,font="Arial %d" % (self.size/30+5), text="%g" % value)
self.create_line(xy1, xy2, width=2)
value = value + self.majorscale
#minor ticks
value = self.min_
if self.minorscale > 0:
while value <= self.max_:
if (value % self.majorscale) != 0:
alfa = self.value2angle(value)
xy1 = self.p2c(1,alfa)
xy2 = self.p2c(0.9,alfa)
self.create_line(xy1, xy2)
value = value + self.minorscale
# -------------------------------------------
class pyvcp_jogwheel(Canvas):
"""" A jogwheel that outputs a HAL_FLOAT count
reacts to both mouse-wheel and mouse dragging
<jogwheel>
[ <cpr>33</cpr> ] (counts per revolution)
[ <halpin>"myjogwheel"</halpin> ]
[ <size>300</size> ]
</jogwheel>
"""
# FIXME:
# -jogging should be enabled only when the circle has focus
# -circle should maintain focus when mouse over dot
# -jogging by dragging with the mouse could work better
# -add a scaled output, scale changes when alt/ctrl/shift is held down
n=0
def __init__(self,root,pycomp,halpin=None,size=200,cpr=40,**kw):
pad=size/10
self.count=0
Canvas.__init__(self,root,width=size,height=size)
pad2=pad-size/15
self.circle2=self.create_oval(pad2,pad2,size-pad2,size-pad2,width=3)# edge circle
self.circle=self.create_oval(pad,pad,size-pad,size-pad)
self.itemconfig(self.circle,fill="lightgrey",activefill="lightgrey")
self.mid=size/2
self.r=(size-2*pad)/2
self.alfa=0
self.d_alfa=2*math.pi/cpr
self.size=size
self.dot = self.create_oval(self.dot_coords())
self.itemconfig(self.dot,fill="black")
self.line = self.create_line( self.mid+(self.r*1)*math.cos(self.alfa), \
self.mid+(self.r*1)*math.sin(self.alfa), \
self.mid+(self.r*1.1)*math.cos(self.alfa), \
self.mid+(self.r*1.1)*math.sin(self.alfa))
self.itemconfig(self.line,arrow="last",arrowshape=(10,10,10))
self.itemconfig(self.line,width=8)
self.bind('<Button-4>',self.wheel_up)
self.bind('<Button-5>',self.wheel_down)
self.bind('<Button1-Motion>',self.motion)
self.bind('<ButtonPress>',self.bdown)
self.draw_ticks(cpr)
self.dragstartx=0
self.dragstarty=0
self.dragstart=0
# create the hal pin
if halpin == None:
halpin = "jogwheel."+str(pyvcp_jogwheel.n)+".count"
pyvcp_jogwheel.n += 1
pycomp.newpin(halpin, HAL_FLOAT, HAL_OUT)
self.halpin=halpin
pycomp[self.halpin] = self.count
self.pycomp=pycomp
def dot_coords(self):
DOTR=0.06*self.size
DOTPOS=0.85
midx = self.mid+DOTPOS*self.r*math.cos(self.alfa)
midy = self.mid+DOTPOS*self.r*math.sin(self.alfa)
return midx-DOTR, midy-DOTR,midx+DOTR,midy+DOTR
def bdown(self,event):
self.dragstartx=event.x
self.dragstarty=event.y
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
def motion(self,event):
dragstop = math.atan2((event.y-self.mid),(event.x-self.mid))
delta = dragstop - self.dragstart
if delta>=self.d_alfa:
self.up()
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
elif delta<=-self.d_alfa:
self.down()
self.dragstart=math.atan2((event.y-self.mid),(event.x-self.mid))
def wheel_up(self,event):
self.up()
def wheel_down(self,event):
self.down()
def down(self):
self.alfa-=self.d_alfa
self.count-=1
self.pycomp[self.halpin] = self.count
self.update_dot()
def up(self):
self.alfa+=self.d_alfa
self.count+=1
self.pycomp[self.halpin] = self.count
self.update_dot()
def update_dot(self):
self.coords(self.dot, self.dot_coords() )
self.coords(self.line, self.mid+(self.r*1)*math.cos(self.alfa),self.mid+(self.r*1)*math.sin(self.alfa), \
self.mid+(self.r*1.1)*math.cos(self.alfa), \
self.mid+(self.r*1.1)*math.sin(self.alfa))
def draw_ticks(self,cpr):
for n in range(0,cpr):
startx=self.mid+self.r*math.cos(n*self.d_alfa)
starty=self.mid+self.r*math.sin(n*self.d_alfa)
stopx=self.mid+1.15*self.r*math.cos(n*self.d_alfa)
stopy=self.mid+1.15*self.r*math.sin(n*self.d_alfa)
self.create_line([startx,starty,stopx,stopy])
def update(self,pycomp):
# this is stupid, but required for updating pin
# when first connected to a signal
self.pycomp[self.halpin] = self.count
# -------------------------------------------
class pyvcp_radiobutton(Frame):
n=0
def __init__(self,master,pycomp,halpin=None,initval=0,choices=[],**kw):
f=Frame.__init__(self,master,bd=2,relief=GROOVE)
self.v = IntVar()
self.v.set(1)
self.choices=choices
if halpin == None:
halpin = "radiobutton."+str(pyvcp_radiobutton.n)
pyvcp_radiobutton.n += 1
self.halpins=[]
n=0
for c in choices:
b=Radiobutton(self,f, text=str(c)
,variable=self.v, value=pow(2,n))
b.pack()
if n==initval:
b.select()
c_halpin=halpin+"."+str(c)
pycomp.newpin(c_halpin, HAL_BIT, HAL_OUT)
self.halpins.append(c_halpin)
n+=1
# FIXME
# this is a fairly stupid way of updating the pins
# since the calculation is done every 100ms wether a change
# has happened or not. see below.
def update(self,pycomp):
index=math.log(self.v.get(),2)
index=int(index)
for pin in self.halpins:
pycomp[pin]=0;
pycomp[self.halpins[index]]=1;
# FIXME
# this would be a much better way of updating the
# pins, but at the moment I can't get it to work
# this is never called even if I set command=self.update()
# in the call to Radiobutton above
def changed(self):
index=math.log(self.v.get(),2)
index=int(index)
print "active:",self.halpins[index]
# -------------------------------------------
class pyvcp_label(Label):
""" Static text label
<label>
<text>"My Label:"</text>
<halpin>"name"</halpin>
<disable_pin>True</disable_pin>
</label>
"""
n=0
def __init__(self,master,pycomp,halpin=None,disable_pin=False,**kw):
Label.__init__(self,master,**kw)
self.disable_pin=disable_pin
if disable_pin:
if halpin == None:
halpin = "label."+str(pyvcp_label.n)
pyvcp_label.n += 1
halpin_disable = halpin+".disable"
self.halpin_disable = halpin_disable
pycomp.newpin(halpin_disable, HAL_BIT, HAL_IN)
def update(self,pycomp):
if self.disable_pin:
is_disabled = pycomp[self.halpin_disable]
if is_disabled == 1: Label.config(self,state=DISABLED)
else: Label.config(self,state=NORMAL)
else:pass
# -------------------------------------------
class pyvcp_vbox(Frame):
""" Box in which widgets are packed vertically
<vbox>
<relief>GROOVE</relief> (FLAT, SUNKEN, RAISED, GROOVE, RIDGE)
<bd>3</bd> (border width)
place widgets here
</vbox>
"""
def __init__(self,master,pycomp,bd=0,relief=FLAT):
Frame.__init__(self,master,bd=bd,relief=relief)
self.fill = 'x'
self.side = 'top'
self.anchor = 'center'
self.expand = 'yes'
def update(self,pycomp):
pass
def add(self, container, widget):
if isinstance(widget, pyvcp_boxexpand):
self.expand = widget.expand
return
if isinstance(widget, pyvcp_boxfill):
self.fill = widget.fill
return
if isinstance(widget, pyvcp_boxanchor):
self.anchor = widget.anchor
return
widget.pack(side=self.side, anchor=self.anchor, fill=self.fill, expand=self.expand)
class pyvcp_boxfill:
def __init__(self, master, pycomp, fill):
self.fill = fill
def update(self, pycomp): pass
class pyvcp_boxanchor:
def __init__(self, master, pycomp, anchor):
self.anchor = anchor
def update(self, pycomp): pass
class pyvcp_boxexpand:
def __init__(self, master, pycomp, expand):
self.expand = expand
def update(self, pycomp): pass
# -------------------------------------------
class pyvcp_hbox(Frame):
""" Box in which widgets are packed horizontally
<vbox>
<relief>GROOVE</relief> (FLAT, SUNKEN, RAISED, GROOVE, RIDGE)
<bd>3</bd> (border width)
place widgets here
</vbox>
"""
def __init__(self,master,pycomp,bd=0,relief=FLAT):
Frame.__init__(self,master,bd=bd,relief=relief)
self.fill = 'y'
self.side = 'left'
self.anchor = 'center'
self.expand = 'yes'
def update(self,pycomp):
pass
def add(self, container, widget):
if isinstance(widget, pyvcp_boxexpand):
self.expand = widget.expand
return
if isinstance(widget, pyvcp_boxfill):
self.fill = widget.fill
return
if isinstance(widget, pyvcp_boxanchor):
self.anchor = widget.anchor
return
widget.pack(side=self.side, anchor=self.anchor, fill=self.fill)
class pyvcp_labelframe(LabelFrame):
"""
frame with a title
"""
def __init__(self,master,pycomp,**kw):
LabelFrame.__init__(self,master,**kw)
self.pack(expand=1,fill=BOTH)
def update(self,pycomp):
pass
def add(self, container, widget):
widget.pack(side="top", fill="both", expand="yes")
class pyvcp_tabs(bwidget.NoteBook):
def __init__(self, master, pycomp, cnf={}, **kw):
self.names = kw.pop("names", [])
self.idx = 0
self._require(master)
Widget.__init__(self, master, "NoteBook", cnf, kw)
def update(self, pycomp): pass
def add(self, container, child):
child.pack(side="top", fill="both", anchor="ne")
if self.idx == 1:
self.raise_page(self.names[0])
def getcontainer(self):
if len(self.names) < self.idx:
self.names.append("Tab-%d" % self.idx)
name = self.names[self.idx]
self.idx += 1
return self.insert("end", name, text=name)
# -------------------------------------------
class pyvcp_spinbox(Spinbox):
""" (control) controls a float, also shown as text
reacts to the mouse wheel
<spinbox>
[ <halpin>"my-spinbox"</halpin> ]
[ <min_>55</min_> ] sets the minimum value to 55
[ <max_>123</max_> ] sets the maximum value to 123
[ <initval>100</initval> ] sets intial value to 100 TJP 12 04 2007
</spinbox>
"""
# FIXME: scale resolution when shift/ctrl/alt is held down?
n=0
def __init__(self,master,pycomp,halpin=None,
min_=0,max_=100,initval=0,resolution=1,format="2.1f",**kw):
self.v = DoubleVar()
if 'increment' not in kw: kw['increment'] = resolution
if 'from' not in kw: kw['from'] = min_
if 'to' not in kw: kw['to'] = max_
if 'format' not in kw: kw['format'] = "%" + format
kw['command'] = self.command
Spinbox.__init__(self,master,textvariable=self.v,**kw)
if halpin == None:
halpin = "spinbox."+str(pyvcp_spinbox.n)
pyvcp_spinbox.n += 1
self.halpin=halpin
if initval < min_:
self.value=min_
elif initval > max_:
self.value=max_
else:
self.value=initval
self.oldvalue=min_
self.format = "%(b)"+format
self.max_=max_
self.min_=min_
self.resolution=resolution
self.v.set( str( self.format % {'b':self.value} ) )
pycomp.newpin(halpin, HAL_FLOAT, HAL_OUT)
self.bind('<Button-4>',self.wheel_up)
self.bind('<Button-5>',self.wheel_down)
def command(self):
self.value = self.v.get()
def update(self,pycomp):
pycomp[self.halpin] = self.value
if self.value != self.oldvalue:
self.v.set( str( self.format % {'b':self.value} ) )
self.oldvalue=self.value
def wheel_up(self,event):
self.value += self.resolution
if self.value > self.max_:
self.value = self.max_
def wheel_down(self,event):
self.value -= self.resolution
if self.value < self.min_:
self.value = self.min_
# -------------------------------------------
class pyvcp_number(Label):
""" (indicator) shows a float as text """
n=0
def __init__(self,master,pycomp,halpin=None,format="2.1f",**kw):
self.v = StringVar()
self.format=format
Label.__init__(self,master,textvariable=self.v,**kw)
if halpin == None:
halpin = "number."+str(pyvcp_number.n)
pyvcp_number.n += 1
self.halpin=halpin
self.value=0.0
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':self.value} ) )
pycomp.newpin(halpin, HAL_FLOAT, HAL_IN)
def update(self,pycomp):
newvalue = pycomp[self.halpin]
if newvalue != self.value:
self.value=newvalue
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':newvalue} ) )
class pyvcp_u32(Label):
""" (indicator) shows a u32 as text """
n=0
def __init__(self,master,pycomp,halpin=None,format="d",**kw):
self.v = StringVar()
self.format=format
Label.__init__(self,master,textvariable=self.v,**kw)
if halpin == None:
halpin = "number."+str(pyvcp_number.n)
pyvcp_number.n += 1
self.halpin=halpin
self.value=0.0
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':self.value} ) )
pycomp.newpin(halpin, HAL_U32, HAL_IN)
def update(self,pycomp):
newvalue = pycomp[self.halpin]
if newvalue != self.value:
self.value=newvalue
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':newvalue} ) )
class pyvcp_s32(Label):
""" (indicator) shows a s32 as text """
n=0
def __init__(self,master,pycomp,halpin=None,format="d",**kw):
self.v = StringVar()
self.format=format
Label.__init__(self,master,textvariable=self.v,**kw)
if halpin == None:
halpin = "number."+str(pyvcp_number.n)
pyvcp_number.n += 1
self.halpin=halpin
self.value=0.0
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':self.value} ) )
pycomp.newpin(halpin, HAL_S32, HAL_IN)
def update(self,pycomp):
newvalue = pycomp[self.halpin]
if newvalue != self.value:
self.value=newvalue
dummy = "%(b)"+self.format
self.v.set( str( dummy % {'b':newvalue} ) )
class pyvcp_timer(Label):
""" (indicator) shows elapsed time as HH:MM:SS
two pins - run and reset
time advances whenever run is true
time holds whenever run is false
time resets to zero on a rising edge of reset
"""
n=0
def __init__(self,master,pycomp,halpin=None,**kw):
self.v = StringVar()
Label.__init__(self,master,textvariable=self.v,**kw)
if halpin == None:
halpin = "timer."+str(pyvcp_timer.n)
pyvcp_timer.n += 1
self.halpins=[]
c_halpin=halpin+".reset"
pycomp.newpin(c_halpin, HAL_BIT, HAL_IN)
self.halpins.append(c_halpin)
c_halpin=halpin+".run"
pycomp.newpin(c_halpin, HAL_BIT, HAL_IN)
self.halpins.append(c_halpin)
self.resetvalue=0
self.runvalue=0
# starttime is the time of the last rising edge of 'run'
self.starttime=0
# basetime is the sum of all prior 'run=1' periods
self.basetime=0
self.currtime=0
self.v.set( "00:00:00")
def update(self,pycomp):
resetvalue = pycomp[self.halpins[0]]
runvalue = pycomp[self.halpins[1]]
if resetvalue != self.resetvalue:
self.resetvalue=resetvalue
if resetvalue == 1:
self.basetime=0
self.starttime=time.time()
if runvalue != self.runvalue:
self.runvalue=runvalue
if runvalue == 1:
# rising edge
self.starttime = time.time()
else:
# falling edge
self.basetime += time.time() - self.starttime
if runvalue == 1:
total=self.basetime + time.time() - self.starttime
else:
total=self.basetime
hr = int(total / 3600)
remainder = total - hr*3600
mn = int(remainder / 60)
sec = int(remainder - mn*60)
self.v.set( str( "%02d:%02d:%02d" % (hr,mn,sec) ) )
# -------------------------------------------
class pyvcp_bar(Canvas):
""" (indicator) a bar-indicator for a float"""
n=0
# FIXME logarithmic scale?
def __init__(self,master,pycomp,
fillcolor="green",bgcolor="grey",
halpin=None,min_=0.0,max_=100.0,**kw):
self.cw=200 # canvas width
self.ch=50 # canvas height
self.bh=30 # bar height
self.bw=150 # bar width
self.pad=((self.cw-self.bw)/2)
Canvas.__init__(self,master,width=self.cw,height=self.ch)
if halpin == None:
halpin = "bar."+str(pyvcp_bar.n)
pyvcp_bar.n += 1
self.halpin=halpin
self.endval=max_
self.startval=min_
self.value=0.0 # some dummy value to start with
pycomp.newpin(halpin, HAL_FLOAT, HAL_IN)
# the border
border=self.create_rectangle(self.pad,1,self.pad+self.bw,self.bh)
self.itemconfig(border,fill=bgcolor)
# the bar
tmp=self.bar_coords()
start=tmp[0]
end=tmp[1]
self.bar=self.create_rectangle(start,2,end,self.bh-1)
self.itemconfig(self.bar,fill=fillcolor)
# start text
start_text=self.create_text(self.pad,self.bh+10,text=str(self.startval) )
#end text
end_text=self.create_text(self.pad+self.bw,self.bh+10,text=str(self.endval) )
# value text
self.val_text=self.create_text(self.pad+self.bw/2,
self.bh/2,text=str(self.value) )
def bar_coords(self):
""" calculates the coordinates in pixels for the bar """
# the bar should start at value = zero
# and extend to value = self.value
# it should not extend beyond the initial box reserved for the bar
min_pixels=self.pad
max_pixels=self.pad+self.bw
bar_end = min_pixels + ((float)(max_pixels-min_pixels)/(float)(self.endval-self.startval)) * (self.value-self.startval)
if bar_end>max_pixels:
bar_end = max_pixels
elif bar_end < min_pixels:
bar_end = min_pixels
bar_start = min_pixels + ((float)(max_pixels-min_pixels)/(float)(self.endval-self.startval)) * (0-self.startval)
if bar_start < min_pixels: # don't know if this is really needed
bar_start = min_pixels
return [bar_start, bar_end]
def update(self,pycomp):
# update value
newvalue=pycomp[self.halpin]
if newvalue != self.value:
self.value = newvalue
# percent = self.value/(self.endval-self.startval)
# if percent < 0.0:
# percent = 0
# elif percent > 1.0:
# percent = 1.0
# set value text
valtext = str( "%(b)3.1f" % {'b':self.value} )
self.itemconfig(self.val_text,text=valtext)
# set bar size
tmp=self.bar_coords()
start=tmp[0]
end=tmp[1]
self.coords(self.bar, start, 2,
end, self.bh-1)
# -------------------------------------------
class pyvcp_led(Canvas):
""" (indicator) a LED
<led>
<on_color>"colorname"</on_color> Default color red
<off_color>"colorname"</off_color> Default color green
<disable_pin>True</disable_pin> Optional halpin sets led to disable_color
<disable_color>"colorname"</disable_color> Default color gray80
</led>"""
n=0
def __init__(self,master,pycomp, halpin=None,disable_pin=False,
off_color="red",on_color="green",disabled_color="gray80",size=20,**kw):
Canvas.__init__(self,master,width=size,height=size,bd=0)
self.off_color=off_color
self.on_color=on_color
self.disabled_color=disabled_color
self.disable_pin = disable_pin
self.oh=self.create_oval(1,1,size,size)
self.state = 0
self.itemconfig(self.oh,fill=off_color)
if halpin == None:
halpin = "led."+str(pyvcp_led.n)
pyvcp_led.n+=1
self.halpin=halpin
pycomp.newpin(halpin, HAL_BIT, HAL_IN)
if disable_pin:
halpin_disable = halpin+".disable"
self.halpin_disable = halpin_disable
pycomp.newpin(halpin_disable, HAL_BIT, HAL_IN)
def update(self,pycomp):
newstate = pycomp[self.halpin]
if newstate == 1:
self.itemconfig(self.oh,fill=self.on_color)
self.state=1
else:
self.itemconfig(self.oh,fill=self.off_color)
self.state=0
if self.disable_pin:
is_disabled = pycomp[self.halpin_disable]
if is_disabled == 1:
self.itemconfig(self.oh,fill=self.disabled_color)
# -------------------------------------------
class pyvcp_rectled(Canvas):
""" (indicator) a LED
<rectled>
<on_color>"colorname"</on_color> Default color red
<off_color>"colorname"</off_color> Default color green
<disable_pin>True</disable_pin> Optional halpin sets led to disable_color
<disable_color>"somecolor"</disable_color> Default color light gray
</rectled>"""
n=0
def __init__(self,master,pycomp, halpin=None,disable_pin=False,
off_color="red",on_color="green",disabled_color="gray80",height=10,width=30,**kw):
Canvas.__init__(self,master,width=width,height=height,bd=2)
self.off_color=off_color
self.on_color=on_color
self.disabled_color=disabled_color
self.disable_pin = disable_pin
self.oh=self.create_rectangle(1,1,width,height)
self.state=0
self.itemconfig(self.oh,fill=off_color)
if halpin == None:
halpin = "led."+str(pyvcp_led.n)
pyvcp_led.n+=1
self.halpin=halpin
pycomp.newpin(halpin, HAL_BIT, HAL_IN)
if disable_pin:
halpin_disable = halpin+".disable"
self.halpin_disable = halpin_disable
pycomp.newpin(halpin_disable, HAL_BIT, HAL_IN)
def update(self,pycomp):
newstate = pycomp[self.halpin]
if newstate == 1:
self.itemconfig(self.oh,fill=self.on_color)
self.state=1
else:
self.itemconfig(self.oh,fill=self.off_color)
self.state=0
if self.disable_pin:
is_disabled = pycomp[self.halpin_disable]
if is_disabled == 1:
self.itemconfig(self.oh,fill=self.disabled_color)
# -------------------------------------------
class pyvcp_checkbutton(Checkbutton):
""" (control) a check button
halpin is 1 when button checked, 0 otherwise
<checkbutton>
[ <halpin>"my-checkbutton"</halpin> ]
[ <initval>1</initval> ] sets intial value to 1, all values >=0.5 are assumed to be 1
</checkbutton>
"""
n=0
def __init__(self,master,pycomp,halpin=None,initval=0,**kw):
self.v = BooleanVar(master)
Checkbutton.__init__(self,master,variable=self.v,onvalue=1, offvalue=0,**kw)
if halpin == None:
halpin = "checkbutton."+str(pyvcp_checkbutton.n)
pyvcp_checkbutton.n += 1
self.halpin=halpin
if initval >= 0.5:
self.value=1
else:
self.value=0
self.v.set(self.value)
pycomp.newpin(halpin, HAL_BIT, HAL_OUT)
def update(self,pycomp):
pycomp[self.halpin]=self.v.get()
# -------------------------------------------
class pyvcp_button(Button):
""" (control) a button
halpin is 1 when button pressed, 0 otherwise
optional halpin.disable disables the button
<button>
<halpin>"name"</halpin>
<disablepin>True</disablepin>
</button>"""
n=0
def __init__(self,master,pycomp,halpin=None,disable_pin=False,**kw):
Button.__init__(self,master,**kw)
if halpin == None:
halpin = "button."+str(pyvcp_button.n)
pyvcp_button.n += 1
self.halpin=halpin
pycomp.newpin(halpin, HAL_BIT, HAL_OUT)
self.disable_pin = disable_pin
if not disable_pin == False:
halpin_disable = halpin + ".disable"
pycomp.newpin(halpin_disable, HAL_BIT, HAL_IN)
self.halpin_disable=halpin_disable
self.state=0;
self.bind("<ButtonPress>", self.pressed)
self.bind("<ButtonRelease>", self.released)
self.pycomp = pycomp
def pressed(self,event):
if self.disable_pin:
is_disabled = self.pycomp[self.halpin_disable]
if is_disabled == 1: return
self.pycomp[self.halpin]=1
def released(self,event):
if self.disable_pin:
is_disabled = self.pycomp[self.halpin_disable]
if is_disabled == 1: return
self.pycomp[self.halpin]=0
def update(self,pycomp):
if self.disable_pin:
is_disabled = pycomp[self.halpin_disable]
if is_disabled == 1: Button.config(self,state=DISABLED)
else: Button.config(self,state=NORMAL)
else:pass
# -------------------------------------------
class pyvcp_scale(Scale):
""" (control) a slider
halpin-i is integer output
halpin-f is float output
<scale>
[ <halpin>"my-scale"</halpin> ]
[ <resolution>0.1</resolution> ] scale value a whole number must end in '.'
[ <orient>HORIZONTAL</orient> ] aligns the scale horizontal
[ <min_>-33</min_> ] sets the minimum value to -33
[ <max_>26</max_> ] sets the maximum value to 26
[ <initval>10</initval> ] sets intial value to 10
</scale>
"""
# FIXME scale resolution when ctrl/alt/shift is held down?
# FIXME allow user to specify size
n=0
def __init__(self,master,pycomp,
resolution=1,halpin=None,min_=0,max_=10,initval=0,**kw):
self.resolution=resolution
Scale.__init__(self,master,resolution=self.resolution,
from_=min_,to=max_,**kw)
if halpin == None:
halpin = "scale."+str(pyvcp_scale.n)
pyvcp_scale.n += 1
self.halpin=halpin
pycomp.newpin(halpin+"-i", HAL_S32, HAL_OUT)
pycomp.newpin(halpin+"-f", HAL_FLOAT, HAL_OUT)
self.bind('<Button-4>',self.wheel_up)
self.bind('<Button-5>',self.wheel_down)
if initval < min_:
self.value=min_
elif initval > max_:
self.value=max_
else:
self.value=initval
self.set(self.value)
def update(self,pycomp):
pycomp[self.halpin+"-f"]=self.get()
pycomp[self.halpin+"-i"]=int(self.get())
def wheel_up(self,event):
self.set(self.get()+self.resolution)
def wheel_down(self,event):
self.set(self.get()-self.resolution)
class pyvcp_table(Frame):
def __init__(self, master, pycomp, flexible_rows=[], flexible_columns=[], uniform_columns="", uniform_rows=""):
Frame.__init__(self, master)
for r in flexible_rows:
self.grid_rowconfigure(r, weight=1)
for c in flexible_columns:
self.grid_columnconfigure(c, weight=1)
for i, r in enumerate(uniform_rows):
self.grid_rowconfigure(i+1, uniform=r)
for i, c in enumerate(uniform_columns):
self.grid_columnconfigure(i+1, uniform=c)
self._r = self._c = 0
self.occupied = {}
self.span = (1,1)
self.sticky = "ne"
def add(self, container, child):
if isinstance(child, pyvcp_tablerow):
self._r += 1
self._c = 1
return
elif isinstance(child, pyvcp_tablespan):
self.span = child.span
return
elif isinstance(child, pyvcp_tablesticky):
self.sticky = child.sticky
return
r, c = self._r, self._c
while self.occupied.has_key((r, c)):
c = c + 1
rs, cs = self.span
child.grid(row=r, column=c, rowspan=rs, columnspan=cs,
sticky=self.sticky)
for ri in range(r, r+rs):
for ci in range(c, c+cs):
self.occupied[ri,ci] = True
self.span = 1,1
self._c = c+cs
def update(self, pycomp): pass
class pyvcp_tablerow:
def __init__(self, master, pycomp): pass
def update(self, pycomp): pass
class pyvcp_tablespan:
def __init__(self, master, pycomp, rows=1, columns=1):
self.span = rows, columns
def update(self, pycomp): pass
class pyvcp_tablesticky:
def __init__(self, master, pycomp, sticky):
self.sticky = sticky
def update(self, pycomp): pass
class pyvcp_include(Frame):
def __init__(self, master, pycomp, src, expand="yes", fill="both", anchor="center", prefix=None, **kw):
Frame.__init__(self,master,**kw)
self.master = master
self.fill = fill
self.anchor = anchor
self.expand = expand
if prefix is not None:
oldprefix = pycomp.getprefix()
pycomp.setprefix(prefix)
import vcpparse, xml.dom.minidom, xml.parsers.expat
try:
doc = xml.dom.minidom.parse(src)
except xml.parsers.expat.ExpatError, detail:
print "Error: could not open",src,"!"
print detail
sys.exit(1)
# find the pydoc element
for e in doc.childNodes:
if e.nodeType == e.ELEMENT_NODE and e.localName == "pyvcp":
break
if e.localName != "pyvcp":
print "Error: no pyvcp element in file!"
sys.exit()
pyvcproot=e
vcpparse.nodeiterator(pyvcproot,self)
if prefix is not None:
pycomp.setprefix(oldprefix)
def update(self, pycomp): pass
def add(self, container, widget):
widget.pack(fill=self.fill, anchor=self.anchor, expand=self.expand)
class _pyvcp_dummy:
def add(self, container, widget): pass
def update(self, pycomp): pass
def pack(self, *args, **kw): pass
class pyvcp_title(_pyvcp_dummy):
def __init__(self, master, pycomp, title, iconname=None):
master.wm_title(title)
if iconname: master.wm_iconname(iconname)
class pyvcp_axisoptions(_pyvcp_dummy):
def __init__(self, master, pycomp):
import rs274.options
rs274.options.install(master)
class pyvcp_option(_pyvcp_dummy):
def __init__(self, master, pycomp, pattern, value, priority=None):
master.option_add(pattern, value, priority)
class pyvcp_image(_pyvcp_dummy):
all_images = {}
def __init__(self, master, pycomp, name, **kw):
self.all_images[name] = PhotoImage(name, kw, master)
class _pyvcp_image(Label):
def __init__(self, master, pycomp, images, halpin=None, **kw):
Label.__init__(self, master, **kw)
if isinstance(images, basestring): images = images.split()
self.images = images
if halpin == None:
halpin = "number."+str(pyvcp_number.n)
pyvcp_number.n += 1
self.halpin = halpin
self.value = 0
self.last = None
pycomp.newpin(halpin, self.pintype, HAL_IN)
def update(self, pycomp):
l = pycomp[self.halpin]
if l != self.last:
try:
self.configure(image=self.images[l])
except (IndexError, KeyError):
print >>sys.stderr, "Unknown image #%d on %s" % (l, self.halpin)
self.last = l
class pyvcp_image_bit(_pyvcp_image):
pintype = HAL_BIT
class pyvcp_image_u32(_pyvcp_image):
pintype = HAL_U32
# This must come after all the pyvcp_xxx classes
elements = []
__all__ = []
for _key in globals().keys():
if _key.startswith("pyvcp_"):
elements.append(_key[6:])
__all__.append(_key)
if __name__ == '__main__':
print "You can't run pyvcp_widgets.py by itself..."
# vim:sts=4:sw=4:et: | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of Beam TFX runner."""
import datetime
from typing import Any, Iterable, List, Optional, Text, Union
from absl import logging
import apache_beam as beam
from tfx.dsl.compiler import compiler
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_component
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as pipeline_py
from tfx.orchestration.beam.legacy import beam_dag_runner as legacy_beam_dag_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.orchestration.portable import tfx_runner
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import telemetry_utils
from google.protobuf import any_pb2
from google.protobuf import message
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class PipelineNodeAsDoFn(beam.DoFn):
"""Wrap node as beam DoFn."""
def __init__(self, pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection_config: metadata.ConnectionConfigType,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
executor_spec: Optional[message.Message],
custom_driver_spec: Optional[message.Message],
deployment_config: Optional[message.Message]):
"""Initializes the PipelineNodeAsDoFn.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection_config: ML metadata connection config.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
executor_spec: Specification for the executor of the node. This is
expected for all nodes. This will be used to determine the specific
ExecutorOperator class to be used to execute and will be passed into
ExecutorOperator.
custom_driver_spec: Specification for custom driver. This is expected only
for advanced use cases.
deployment_config: Deployment Config for the pipeline.
"""
self._pipeline_node = pipeline_node
self._mlmd_connection_config = mlmd_connection_config
self._pipeline_info = pipeline_info
self._pipeline_runtime_spec = pipeline_runtime_spec
self._executor_spec = executor_spec
self._custom_driver_spec = custom_driver_spec
self._node_id = pipeline_node.node_info.id
self._deployment_config = deployment_config
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes node based on signals.
Args:
element: a signal element to trigger the node.
*signals: side input signals indicate completeness of upstream nodes.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
logging.info('node %s is running.', self._node_id)
self._run_node()
logging.info('node %s is finished.', self._node_id)
def _run_node(self) -> None:
platform_config = self._extract_platform_config(self._deployment_config,
self._node_id)
launcher.Launcher(
pipeline_node=self._pipeline_node,
mlmd_connection=metadata.Metadata(self._mlmd_connection_config),
pipeline_info=self._pipeline_info,
pipeline_runtime_spec=self._pipeline_runtime_spec,
executor_spec=self._executor_spec,
platform_config=platform_config,
custom_driver_spec=self._custom_driver_spec).launch()
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
_PIPELINE_NODE_DO_FN_CLS = PipelineNodeAsDoFn
def __new__(
cls,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as a TFX orchestrator.
Create the legacy BeamDagRunner object if any of the legacy
`beam_orchestrator_args` or `config` arguments are passed. A migration
guide will be provided in a future TFX version for users of these arguments.
Args:
beam_orchestrator_args: Deprecated beam args for the beam orchestrator.
Note that this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components. If
this option is used, the legacy non-IR-based BeamDagRunner will be
constructed.
config: Deprecated optional pipeline config for customizing the launching
of each component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher. If this option
is used, the legacy non-IR-based BeamDagRunner will be constructed.
Returns:
Legacy or IR-based BeamDagRunner object.
"""
if beam_orchestrator_args or config:
logging.info(
'Using the legacy BeamDagRunner since `beam_orchestrator_args` or '
'`config` argument was passed.')
return legacy_beam_dag_runner.BeamDagRunner(
beam_orchestrator_args=beam_orchestrator_args, config=config)
else:
return super(BeamDagRunner, cls).__new__(cls)
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
def _build_local_platform_config(
self, node_id: str,
spec: any_pb2.Any) -> local_deployment_config_pb2.LocalPlatformConfig:
"""Builds LocalPlatformConfig given the any proto from IntermediateDeploymentConfig."""
result = local_deployment_config_pb2.LocalPlatformConfig()
if spec.Is(result.docker_platform_config.DESCRIPTOR):
spec.Unpack(result.docker_platform_config)
else:
raise ValueError(
'Platform config of {} is expected to be of one of the '
'types of tfx.orchestration.deployment_config.LocalPlatformConfig.config '
'but got type {}'.format(node_id, spec.type_url))
return result
def _extract_deployment_config(
self, pipeline: pipeline_pb2.Pipeline
) -> local_deployment_config_pb2.LocalDeploymentConfig:
"""Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig."""
return runner_utils.extract_local_deployment_config(pipeline)
def _extract_executor_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
return runner_utils.extract_executor_spec(deployment_config, node_id)
def _extract_custom_driver_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str
) -> Optional[message.Message]:
return runner_utils.extract_custom_driver_spec(deployment_config, node_id)
def _connection_config_from_deployment_config(self,
deployment_config: Any) -> Any:
return deployment_config.metadata_connection_config
def run(self, pipeline: Union[pipeline_pb2.Pipeline,
pipeline_py.Pipeline]) -> None:
"""Deploys given logical pipeline on Beam.
Args:
pipeline: Logical pipeline in IR format.
"""
if isinstance(pipeline, pipeline_py.Pipeline):
for component in pipeline.components:
# TODO(b/187122662): Pass through pip dependencies as a first-class
# component flag.
if isinstance(component, base_component.BaseComponent):
component._resolve_pip_dependencies( # pylint: disable=protected-access
pipeline.pipeline_info.pipeline_root)
if isinstance(pipeline, pipeline_py.Pipeline):
c = compiler.Compiler()
pipeline = c.compile(pipeline)
run_id = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')
# Substitute the runtime parameter to be a concrete run_id
runtime_parameter_utils.substitute_runtime_parameter(
pipeline, {
constants.PIPELINE_RUN_ID_PARAMETER_NAME: run_id,
})
deployment_config = self._extract_deployment_config(pipeline)
connection_config = self._connection_config_from_deployment_config(
deployment_config)
logging.info('Running pipeline:\n %s', pipeline)
logging.info('Using deployment config:\n %s', deployment_config)
logging.info('Using connection config:\n %s', connection_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline() as p:
# Uses for triggering the node DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of node to its signal.
signal_map = {}
# pipeline.nodes are in topological order.
for node in pipeline.nodes:
# TODO(b/160882349): Support subpipeline
pipeline_node = node.pipeline_node
node_id = pipeline_node.node_info.id
executor_spec = self._extract_executor_spec(deployment_config,
node_id)
custom_driver_spec = self._extract_custom_driver_spec(
deployment_config, node_id)
# Signals from upstream nodes.
signals_to_wait = []
for upstream_node in pipeline_node.upstream_nodes:
assert upstream_node in signal_map, ('Nodes are not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
logging.info('Node %s depends on %s.', node_id,
[s.producer.full_label for s in signals_to_wait])
# Each signal is an empty PCollection. AsIter ensures a node will
# be triggered after upstream nodes are finished.
signal_map[node_id] = (
root
| 'Run[%s]' % node_id >> beam.ParDo(
self._PIPELINE_NODE_DO_FN_CLS(
pipeline_node=pipeline_node,
mlmd_connection_config=connection_config,
pipeline_info=pipeline.pipeline_info,
pipeline_runtime_spec=pipeline.runtime_spec,
executor_spec=executor_spec,
custom_driver_spec=custom_driver_spec,
deployment_config=deployment_config),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
logging.info('Node %s is scheduled.', node_id) | unknown | codeparrot/codeparrot-clean | ||
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class SWAReaderTest( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.SWAReader()
self.assertEqual( r["fileName"].getTypedValue(), "" )
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
self.assertEqual( r["fileName"].getTypedValue(), "test/IECore/data/swaFiles/test.swa" )
def testReading( self ) :
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
o = r.read()
IECore.ObjectWriter( o, "/tmp/trees4.cob" ).write()
self.failUnless( o.isInstanceOf( IECore.PointsPrimitive.staticTypeId() ) )
self.assertEqual( o.numPoints, 5 + 6 )
self.failUnless( o.arePrimitiveVariablesValid() )
self.failUnless( "P" in o )
self.failUnless( "xAxis" in o )
self.failUnless( "yAxis" in o )
self.failUnless( "zAxis" in o )
self.failUnless( "scale" in o )
self.failUnless( "treeName" in o )
self.failUnless( "treeNameIndices" in o )
self.assertEqual( o["P"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["xAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["yAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["zAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["scale"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeNameIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeName"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.failUnless( isinstance( o["P"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["xAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["yAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["zAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["scale"].data, IECore.FloatVectorData ) )
self.failUnless( isinstance( o["treeNameIndices"].data, IECore.IntVectorData ) )
self.failUnless( isinstance( o["treeName"].data, IECore.StringVectorData ) )
self.assertEqual( o["treeName"].data, IECore.StringVectorData( [ "Acacia_RT", "BroadLeaf_HighDetail" ] ) )
self.assertEqual( o["P"].data[0], IECore.V3f( 3750.05, 1556.86, -2149.22 ) )
self.assertEqual( o["yAxis"].data[0], IECore.V3f( 0.0176831, 0.998519, 0.0514542 ) )
self.assertEqual( o["xAxis"].data[0], IECore.V3f( 0.0179192, -0.0517705, 0.998498 ) )
self.assertEqual( o["zAxis"].data[0], o["xAxis"].data[0].cross( o["yAxis"].data[0] ) )
self.assertAlmostEqual( o["scale"].data[0], 6.4516, 6 )
self.assertAlmostEqual( o["scale"].data[1], 6.7, 6 )
self.assertEqual( o["treeNameIndices"].data, IECore.IntVectorData( [ 0 ] * 5 + [ 1 ] * 6 ) )
def testCanRead( self ) :
self.failUnless( IECore.SWAReader.canRead( "test/IECore/data/swaFiles/test.swa" ) )
self.failIf( IECore.IDXReader.canRead( "test/IECore/data/exrFiles/carPark.exr" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/idxFiles/test.idx" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/empty" ) )
def testRegistration( self ) :
r = IECore.Reader.create( "test/IECore/data/swaFiles/test.swa" )
self.failUnless( isinstance( r, IECore.SWAReader ) )
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef SRC_BASE_OBJECT_H_
#define SRC_BASE_OBJECT_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include <type_traits> // std::remove_reference
#include "base_object_types.h"
#include "memory_tracker.h"
#include "node_v8_embedder.h"
#include "util.h"
#include "v8.h"
namespace node {
class Environment;
class IsolateData;
class Realm;
template <typename T, bool kIsWeak>
class BaseObjectPtrImpl;
namespace worker {
class TransferData;
}
class BaseObject : public MemoryRetainer {
public:
enum InternalFields { kEmbedderType, kSlot, kInternalFieldCount };
// Associates this object with `object`. It uses the 1st internal field for
// that, and in particular aborts if there is no such field.
// This is the designated constructor.
BaseObject(Realm* realm, v8::Local<v8::Object> object);
// Convenient constructor for constructing BaseObject in the principal realm.
inline BaseObject(Environment* env, v8::Local<v8::Object> object);
~BaseObject() override;
BaseObject() = delete;
// Returns the wrapped object. Returns an empty handle when
// persistent.IsEmpty() is true.
inline v8::Local<v8::Object> object() const;
// Same as the above, except it additionally verifies that this object
// is associated with the passed Isolate in debug mode.
inline v8::Local<v8::Object> object(v8::Isolate* isolate) const;
inline v8::Global<v8::Object>& persistent();
inline Environment* env() const;
inline Realm* realm() const;
// Get a BaseObject* pointer, or subclass pointer, for the JS object that
// was also passed to the `BaseObject()` constructor initially.
// This may return `nullptr` if the C++ object has not been constructed yet,
// e.g. when the JS object used `MakeLazilyInitializedJSTemplate`.
static inline void SetInternalFields(IsolateData* isolate_data,
v8::Local<v8::Object> object,
void* slot);
static inline bool IsBaseObject(IsolateData* isolate_data,
v8::Local<v8::Object> object);
static inline void TagBaseObject(IsolateData* isolate_data,
v8::Local<v8::Object> object);
static void LazilyInitializedJSTemplateConstructor(
const v8::FunctionCallbackInfo<v8::Value>& args);
static inline BaseObject* FromJSObject(v8::Local<v8::Value> object);
template <typename T>
static inline T* FromJSObject(v8::Local<v8::Value> object);
// Global alias for FromJSObject() to avoid churn.
template <typename T>
static inline T* Unwrap(v8::Local<v8::Value> obj) {
return BaseObject::FromJSObject<T>(obj);
}
// Make the `v8::Global` a weak reference and, `delete` this object once
// the JS object has been garbage collected and there are no (strong)
// BaseObjectPtr references to it.
void MakeWeak();
// Undo `MakeWeak()`, i.e. turn this into a strong reference that is a GC
// root and will not be touched by the garbage collector.
inline void ClearWeak();
// Reports whether this BaseObject is using a weak reference or detached,
// i.e. whether is can be deleted by GC once no strong BaseObjectPtrs refer
// to it anymore.
inline bool IsWeakOrDetached() const;
// Utility to create a FunctionTemplate with one internal field (used for
// the `BaseObject*` pointer) and a constructor that initializes that field
// to `nullptr`.
static v8::Local<v8::FunctionTemplate> MakeLazilyInitializedJSTemplate(
IsolateData* isolate);
static v8::Local<v8::FunctionTemplate> MakeLazilyInitializedJSTemplate(
Environment* env);
// Setter/Getter pair for internal fields that can be passed to SetAccessor.
template <int Field>
static void InternalFieldGet(const v8::FunctionCallbackInfo<v8::Value>& args);
template <int Field, bool (v8::Value::*typecheck)() const>
static void InternalFieldSet(const v8::FunctionCallbackInfo<v8::Value>& args);
// This is a bit of a hack. See the override in async_wrap.cc for details.
virtual bool IsDoneInitializing() const;
// Can be used to avoid this object keeping itself alive as a GC root
// indefinitely, for example when this object is owned and deleted by another
// BaseObject once that is torn down. This can only be called when there is
// a BaseObjectPtr to this object.
inline void Detach();
// Interface for transferring BaseObject instances using the .postMessage()
// method of MessagePorts (and, by extension, Workers).
// GetTransferMode() returns a transfer mode that indicates how to deal with
// the current object:
// - kDisallowCloneAndTransfer:
// No transfer or clone is possible, either because this type of
// BaseObject does not know how to be transferred, or because it is not
// in a state in which it is possible to do so (e.g. because it has
// already been transferred).
// - kTransferable:
// This object can be transferred in a destructive fashion, i.e. will be
// rendered unusable on the sending side of the channel in the process
// of being transferred. (In C++ this would be referred to as movable but
// not copyable.) Objects of this type need to be listed in the
// `transferList` argument of the relevant postMessage() call in order to
// make sure that they are not accidentally destroyed on the sending side.
// TransferForMessaging() will be called to get a representation of the
// object that is used for subsequent deserialization.
// The NestedTransferables() method can be used to transfer other objects
// along with this one, if a situation requires it.
// - kCloneable:
// This object can be cloned without being modified.
// CloneForMessaging() will be called to get a representation of the
// object that is used for subsequent deserialization, unless the
// object is listed in transferList and is kTransferable, in which case
// TransferForMessaging() is attempted first.
// - kTransferableAndCloneable:
// This object can be transferred or cloned.
// After a successful clone, FinalizeTransferRead() is called on the receiving
// end, and can read deserialize JS data possibly serialized by a previous
// FinalizeTransferWrite() call.
// By default, a BaseObject is kDisallowCloneAndTransfer and a JS Object is
// kCloneable unless otherwise specified.
enum TransferMode : uint32_t {
kDisallowCloneAndTransfer = 0,
kTransferable = 1 << 0,
kCloneable = 1 << 1,
kTransferableAndCloneable = kTransferable | kCloneable,
};
virtual TransferMode GetTransferMode() const;
virtual std::unique_ptr<worker::TransferData> TransferForMessaging();
virtual std::unique_ptr<worker::TransferData> CloneForMessaging() const;
virtual v8::Maybe<std::vector<BaseObjectPtrImpl<BaseObject, false>>>
NestedTransferables() const;
virtual v8::Maybe<void> FinalizeTransferRead(
v8::Local<v8::Context> context, v8::ValueDeserializer* deserializer);
// Indicates whether this object is expected to use a strong reference during
// a clean process exit (due to an empty event loop).
virtual bool IsNotIndicativeOfMemoryLeakAtExit() const;
virtual inline void OnGCCollect();
virtual inline bool is_snapshotable() const { return false; }
private:
v8::Local<v8::Object> WrappedObject() const override;
void DeleteMe();
// persistent_handle_ needs to be at a fixed offset from the start of the
// class because it is used by src/node_postmortem_metadata.cc to calculate
// offsets and generate debug symbols for BaseObject, which assumes that the
// position of members in memory are predictable. For more information please
// refer to `doc/contributing/node-postmortem-support.md`
friend int GenDebugSymbols();
friend class CleanupQueue;
template <typename T, bool kIsWeak>
friend class BaseObjectPtrImpl;
v8::Global<v8::Object> persistent_handle_;
// Metadata that is associated with this BaseObject if there are BaseObjectPtr
// or BaseObjectWeakPtr references to it.
// This object is deleted when the BaseObject itself is destroyed, and there
// are no weak references to it.
struct PointerData {
// Number of BaseObjectPtr instances that refer to this object. If this
// is non-zero, the BaseObject is always a GC root and will not be destroyed
// during cleanup until the count drops to zero again.
unsigned int strong_ptr_count = 0;
// Number of BaseObjectWeakPtr instances that refer to this object.
unsigned int weak_ptr_count = 0;
// Indicates whether MakeWeak() has been called.
bool wants_weak_jsobj = false;
// Indicates whether Detach() has been called. If that is the case, this
// object will be destroyed once the strong pointer count drops to zero.
bool is_detached = false;
// Reference to the original BaseObject. This is used by weak pointers.
BaseObject* self = nullptr;
};
inline bool has_pointer_data() const;
// This creates a PointerData struct if none was associated with this
// BaseObject before.
PointerData* pointer_data();
// Functions that adjust the strong pointer count.
void decrease_refcount();
void increase_refcount();
Realm* realm_;
PointerData* pointer_data_ = nullptr;
ListNode<BaseObject> base_object_list_node_;
friend class BaseObjectList;
};
class BaseObjectList
: public ListHead<BaseObject, &BaseObject::base_object_list_node_>,
public MemoryRetainer {
public:
void Cleanup();
SET_MEMORY_INFO_NAME(BaseObjectList)
SET_SELF_SIZE(BaseObjectList)
void MemoryInfo(node::MemoryTracker* tracker) const override;
};
#define ASSIGN_OR_RETURN_UNWRAP(ptr, obj, ...) \
do { \
*ptr = static_cast<typename std::remove_reference<decltype(*ptr)>::type>( \
BaseObject::FromJSObject(obj)); \
if (*ptr == nullptr) return __VA_ARGS__; \
} while (0)
// Implementation of a generic strong or weak pointer to a BaseObject.
// If strong, this will keep the target BaseObject alive regardless of other
// circumstances such as the GC or Environment cleanup.
// If weak, destruction behaviour is not affected, but the pointer will be
// reset to nullptr once the BaseObject is destroyed.
// The API matches std::shared_ptr closely. However, this class is not thread
// safe, that is, we can't have different BaseObjectPtrImpl instances in
// different threads referring to the same BaseObject instance.
template <typename T, bool kIsWeak>
class BaseObjectPtrImpl final {
public:
inline BaseObjectPtrImpl();
inline ~BaseObjectPtrImpl();
inline explicit BaseObjectPtrImpl(T* target);
// Copy and move constructors. Note that the templated version is not a copy
// or move constructor in the C++ sense of the word, so an identical
// untemplated version is provided.
template <typename U, bool kW>
inline BaseObjectPtrImpl(const BaseObjectPtrImpl<U, kW>& other);
inline BaseObjectPtrImpl(const BaseObjectPtrImpl& other);
template <typename U, bool kW>
inline BaseObjectPtrImpl& operator=(const BaseObjectPtrImpl<U, kW>& other);
inline BaseObjectPtrImpl& operator=(const BaseObjectPtrImpl& other);
inline BaseObjectPtrImpl(BaseObjectPtrImpl&& other);
inline BaseObjectPtrImpl& operator=(BaseObjectPtrImpl&& other);
inline BaseObjectPtrImpl(std::nullptr_t);
inline BaseObjectPtrImpl& operator=(std::nullptr_t);
inline void reset(T* ptr = nullptr);
inline T* get() const;
inline T& operator*() const;
inline T* operator->() const;
inline operator bool() const;
template <typename U, bool kW>
inline bool operator ==(const BaseObjectPtrImpl<U, kW>& other) const;
template <typename U, bool kW>
inline bool operator !=(const BaseObjectPtrImpl<U, kW>& other) const;
private:
union {
BaseObject* target; // Used for strong pointers.
BaseObject::PointerData* pointer_data; // Used for weak pointers.
} data_;
inline BaseObject* get_base_object() const;
inline BaseObject::PointerData* pointer_data() const;
};
template <typename T, bool kIsWeak>
inline static bool operator==(const BaseObjectPtrImpl<T, kIsWeak>,
const std::nullptr_t);
template <typename T, bool kIsWeak>
inline static bool operator==(const std::nullptr_t,
const BaseObjectPtrImpl<T, kIsWeak>);
template <typename T>
using BaseObjectPtr = BaseObjectPtrImpl<T, false>;
template <typename T>
using BaseObjectWeakPtr = BaseObjectPtrImpl<T, true>;
// Create a BaseObject instance and return a pointer to it.
// This variant leaves the object as a GC root by default.
template <typename T, typename... Args>
inline BaseObjectPtr<T> MakeBaseObject(Args&&... args);
// Create a BaseObject instance and return a pointer to it.
// This variant makes the object a weak GC root by default.
template <typename T, typename... Args>
inline BaseObjectWeakPtr<T> MakeWeakBaseObject(Args&&... args);
// Create a BaseObject instance and return a pointer to it.
// This variant detaches the object by default, meaning that the caller fully
// owns it, and once the last BaseObjectPtr to it is destroyed, the object
// itself is also destroyed.
template <typename T, typename... Args>
inline BaseObjectPtr<T> MakeDetachedBaseObject(Args&&... args);
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_BASE_OBJECT_H_ | c | github | https://github.com/nodejs/node | src/base_object.h |
from django.core.checks.compatibility.django_1_8_0 import \
check_duplicate_template_settings
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckDuplicateTemplateSettingsTest(SimpleTestCase):
def test_not_raised_if_no_templates_setting(self):
self.assertEqual(check_duplicate_template_settings(None), [])
@override_settings(
TEMPLATES=[{'BACKEND': 'django.template.backends.django.DjangoTemplates'}],
TEMPLATE_DIRS=['/path/to/dirs'],
)
def test_duplicate_setting(self):
result = check_duplicate_template_settings(None)
self.assertEqual(result[0].id, '1_8.W001')
@override_settings(
TEMPLATES=[{'BACKEND': 'django.template.backends.django.DjangoTemplates'}],
TEMPLATE_DIRS=['/path/to/dirs'],
TEMPLATE_DEBUG=True,
)
def test_multiple_duplicate_settings(self):
result = check_duplicate_template_settings(None)
self.assertEqual(len(result), 1)
self.assertIn('TEMPLATE_DIRS', result[0].msg)
self.assertIn('TEMPLATE_DEBUG', result[0].msg) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
from slicc.symbols import Type
class LiteralExprAST(ExprAST):
def __init__(self, slicc, literal, type):
super(LiteralExprAST, self).__init__(slicc)
self.literal = literal
self.type = type
def __repr__(self):
return "[Literal: %s]" % self.literal
def generate(self, code):
fix = code.nofix()
if self.type == "std::string":
code('("${{self.literal}}")')
elif self.type == "bool":
code('(${{str(self.literal).lower()}})')
else:
code('(${{self.literal}})')
code.fix(fix)
type = self.symtab.find(self.type, Type)
if type is None:
# Can't find the type
self.error("Internal: can't primitive type '%s'" % self.type)
return type | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# Authors: Nicolas Bessi, Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Financial Reports - Webkit',
'description': """
Financial Reports - Webkit
==========================
This module adds or replaces the following standard OpenERP financial reports:
- General ledger
- Trial Balance (simple or comparative view)
- Partner ledger
- Partner balance
- Open invoices report
- Aged Partner Balance
Main improvements per report:
-----------------------------
The General ledger: details of all entries posted in your books sorted by
account.
* Filter by account is available in the wizard (no need to go to the
Chart of Accounts to do this anymore) or by View account (the report
will display all regular children accounts) i.e. you can select all
P&L accounts.
* The report only prints accounts with moves OR with a non
null balance. No more endless report with empty accounts (field:
display account is hidden)
* initial balance computation on the fly if no open entry posted
* Thanks to a new checkbox in the account form, you will have the
possibility to centralize any account you like. This means you do
not want to see all entries posted under the account ‘VAT on sales’;
you will only see aggregated amounts by periods.
* Counterpart account is displayed for each transaction (3 accounts max.)
to ease searching.
* Better ergonomy on the wizard: important information is displayed in
the top part, filters are in the middle, and options are in the
bottom or on a separate tab. There is more specific filtering on
separate tabs. No more unique wizard layout for all financial
reports (we have removed the journal tab for the GL report)
* improved report style
The partner ledger: details of entries relative to payable &
receivable accounts posted in your books sorted by account and
partner.
* Filter by partner now available
* Now you can see Accounts then Partner with subtotals for each
account allowing you to check you data with trial balance and
partner balance for instance. Accounts are ordered in the same way as
in the Chart of account
* Period have been added (date only is not filled in since date can be
outside period)
* Reconciliation code added
* Subtotal by account
* Alphabetical sorting (same as in partner balance)
Open invoice report : other version of the partner ledger showing
unreconciled / partially reconciled entries.
* Possibility to print unreconciled transactions only at any date in
the past (thanks to the new field: `last_rec_date` which computes
the last move line reconciliation date). No more pain to get open
invoices at the last closing date.
* no initial balance computed because the report shows open invoices
from previous years.
The Trial balance: list of accounts with balances
* You can either see the columns: initial balance, debit, credit,
end balance or compare balances over 4 periods of your choice
* You can select the "opening" filter to get the opening trial balance
only
* If you create an extra virtual chart (using consolidated account) of
accounts for your P&L and your balance sheet, you can print your
statutory accounts (with comparison over years for instance)
* If you compare 2 periods, you will get the differences in values and
in percent
The Partner balance: list of account with balances
* Subtotal by account and partner
* Alphabetical sorting (same as in partner balance)
Aged Partner Balance: Summary of aged open amount per partner
This report is an accounting tool helping in various tasks.
You can credit control or partner balance provisions computation for instance.
The aged balance report allows you to print balances per partner
like the trial balance but add an extra information :
* It will split balances into due amounts
(due date not reached à the end date of the report) and overdue amounts
Overdue data are also split by period.
* For each partner following columns will be displayed:
* Total balance (all figures must match with same date partner balance
report).
This column equals the sum of all following columns)
* Due
* Overdue <= 30 days
* Overdue <= 60 days
* Overdue <= 90 days
* Overdue <= 120 days
* Older
Hypothesis / Contraints of aged partner balance
* Overdues columns will be by default be based on 30 days range fix number of
days. This can be changed by changes the RANGES constraint
* All data will be displayed in company currency
* When partial payments, the payment must appear in the same colums than the
invoice (Except if multiple payment terms)
* Data granularity: partner (will not display figures at invoices level)
* The report aggregate data per account with sub-totals
* Initial balance must be calculated the same way that
the partner balance / Ignoring the opening entry
in special period (idem open invoice report)
* Only accounts with internal type payable or receivable are considered
(idem open invoice report)
* If maturity date is null then use move line date
Limitations:
------------
In order to run properly this module makes sure you have installed the
library `wkhtmltopdf` for the pdf rendering (the library path must be
set in a System Parameter `webkit_path`).
Initial balances in these reports are based either on opening entry
posted in the opening period or computed on the fly. So make sure
that your past accounting opening entries are in an opening period.
Initials balances are not computed when using the Date filter (since a
date can be outside its logical period and the initial balance could
be different when computed by data or by initial balance for the
period). The opening period is assumed to be the Jan. 1st of the year
with an opening flag and the first period of the year must start also
on Jan 1st.
Totals for amounts in currencies are effective if the partner belongs to
an account with a secondary currency.
HTML headers and footers are deactivated for these reports because of
an issue in wkhtmltopdf
(http://code.google.com/p/wkhtmltopdf/issues/detail?id=656) Instead,
the header and footer are created as text with arguments passed to
wkhtmltopdf. The texts are defined inside the report classes.
""",
'version': '1.1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'category': 'Finance',
'website': 'http://www.camptocamp.com',
'images': [
'images/ledger.png', ],
'depends': ['account',
'report_webkit'],
'demo': [],
'data': ['account_view.xml',
'data/financial_webkit_header.xml',
'report/report.xml',
'wizard/wizard.xml',
'wizard/balance_common_view.xml',
'wizard/general_ledger_wizard_view.xml',
'wizard/partners_ledger_wizard_view.xml',
'wizard/trial_balance_wizard_view.xml',
'wizard/partner_balance_wizard_view.xml',
'wizard/open_invoices_wizard_view.xml',
'wizard/aged_partner_balance_wizard.xml',
'wizard/print_journal_view.xml',
'report_menus.xml',
],
# tests order matter
'test': ['tests/general_ledger.yml',
'tests/partner_ledger.yml',
'tests/trial_balance.yml',
'tests/partner_balance.yml',
'tests/open_invoices.yml',
'tests/aged_trial_balance.yml'],
# 'tests/account_move_line.yml'
'active': False,
'installable': True,
'application': True,
} | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_
#define TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tensor {
// DeepCopy returns a tensor whose contents are a deep copy of the
// contents of 'other'. This function is intended only for
// convenience, not speed.
//
// REQUIRES: 'other' must point to data stored in CPU memory.
// REQUIRES: 'other' must be a Tensor of a copy-able type if
// 'other' is not appropriately memory-aligned.
Tensor DeepCopy(const Tensor& other);
// Deep copies input to output. This function is similar to above, but assumes
// that the memory for the output has already been allocated.
void DeepCopy(const Tensor& input, Tensor* output);
// Concatenates 'tensors' into a single tensor, along their 0th dimension.
//
// REQUIRES: All members of 'tensors' must have the same data type parameter.
// REQUIRES: Each member of 'tensors' must have at least one dimension.
// REQUIRES: Each member of 'tensors' must point to data stored in CPU memory.
// REQUIRES: Each member of 'tensors' must be a Tensor of a copy-able type if it
// is not appropriately memory-aligned.
absl::Status Concat(absl::Span<const Tensor> tensors, Tensor* result);
// Splits 'tensor' into 'sizes.size()' individual tensors, along the 0th
// dimension. The ith output tensor has 0th-dimension size 'sizes[i]'.
//
// REQUIRES: 'tensor' must have at least one dimension.
// REQUIRES: 'tensor.dim_size(0)' must equal the sum of the elements of 'sizes'.
// REQUIRES: 'tensor' must point to data stored in CPU memory.
// REQUIRES: 'tensor' must be a Tensor of a copy-able type if it is not
// appropriately memory-aligned.
//
// Split() and Concat() are inverse operations.
absl::Status Split(const Tensor& tensor, absl::Span<const int64_t> sizes,
std::vector<Tensor>* result);
namespace internal {
void SetTensorProtoShape(absl::Span<const size_t> shape,
TensorShapeProto* shape_proto);
template <typename Type>
class TensorProtoFieldHelper : public std::false_type {};
#define DEFINE_PROTO_FIELD_HELPER(TYPE, FIELDNAME) \
template <> \
class TensorProtoFieldHelper<TYPE> : public std::true_type { \
public: \
typedef decltype( \
std::declval<TensorProto>().FIELDNAME##_val(0)) FieldType; \
typedef decltype( \
std::declval<TensorProto>().FIELDNAME##_val()) RepeatedFieldType; \
typedef decltype(std::declval<TensorProto>().mutable_##FIELDNAME##_val()) \
MutableRepeatedFieldType; \
static MutableRepeatedFieldType GetMutableField(TensorProto* proto) { \
return proto->mutable_##FIELDNAME##_val(); \
} \
static RepeatedFieldType& GetField(const TensorProto& proto) { \
return proto.FIELDNAME##_val(); \
} \
}
// The argument pairs in the following macro instantiations encode the
// mapping from C++ type ($1) to repeated field name "$2_val" used for storing
// values in TensorProto. See tensorflow/core/framework/tensor.proto.
DEFINE_PROTO_FIELD_HELPER(float, float);
DEFINE_PROTO_FIELD_HELPER(double, double);
DEFINE_PROTO_FIELD_HELPER(int8_t, int);
DEFINE_PROTO_FIELD_HELPER(uint8_t, int);
DEFINE_PROTO_FIELD_HELPER(int16_t, int);
DEFINE_PROTO_FIELD_HELPER(uint16_t, int);
DEFINE_PROTO_FIELD_HELPER(int32_t, int);
DEFINE_PROTO_FIELD_HELPER(uint32_t, uint32);
DEFINE_PROTO_FIELD_HELPER(int64_t, int64);
DEFINE_PROTO_FIELD_HELPER(uint64_t, uint64);
DEFINE_PROTO_FIELD_HELPER(bool, bool);
DEFINE_PROTO_FIELD_HELPER(qint8, int);
DEFINE_PROTO_FIELD_HELPER(quint8, int);
DEFINE_PROTO_FIELD_HELPER(qint16, int);
DEFINE_PROTO_FIELD_HELPER(quint16, int);
DEFINE_PROTO_FIELD_HELPER(qint32, int);
DEFINE_PROTO_FIELD_HELPER(Eigen::half, half);
DEFINE_PROTO_FIELD_HELPER(bfloat16, half);
DEFINE_PROTO_FIELD_HELPER(complex64, scomplex);
DEFINE_PROTO_FIELD_HELPER(complex128, dcomplex);
#undef DEFINE_PROTO_HELPER
template <typename T>
struct CopyHelper {
template <typename SrcIter, typename DstIter>
static void ToArray(SrcIter begin, SrcIter end, DstIter dst) {
using SrcType = typename std::iterator_traits<SrcIter>::value_type;
using DstType = typename std::iterator_traits<DstIter>::value_type;
std::transform(begin, end, dst, [](const SrcType& x) -> DstType {
return static_cast<DstType>(x);
});
}
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, SrcIter dst) {
std::copy(begin, end, dst);
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
ToArray(begin, end, dst);
}
};
// Overloads for Eigen::half and bfloat16 that are 16 bits in size but are
// stored in an int32 field.
template <>
struct CopyHelper<Eigen::half> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, Eigen::half* dst) {
std::transform(begin, end, dst, [](int x) -> Eigen::half {
return Eigen::numext::bit_cast<Eigen::half>(static_cast<uint16_t>(x));
});
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
std::transform(begin, end, dst, [](Eigen::half h) -> int {
return static_cast<int>(Eigen::numext::bit_cast<uint16_t>(h));
});
}
};
template <>
struct CopyHelper<bfloat16> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, bfloat16* dst) {
std::transform(begin, end, dst, [](int x) -> bfloat16 {
return Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(x));
});
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
std::transform(begin, end, dst, [](bfloat16 bf16) -> int {
return static_cast<int>(Eigen::numext::bit_cast<uint16_t>(bf16));
});
}
};
// Overloads for complex types that store real and imaginary parts
// at indices 2*i and 2*i+1 in float or double field.
template <typename RealType>
struct CopyHelper<std::complex<RealType>> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, std::complex<RealType>* dst) {
RealType* real_dst = reinterpret_cast<RealType*>(dst);
std::copy(begin, end, real_dst);
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
size_t n = std::distance(begin, end);
const RealType* real_begin = reinterpret_cast<const RealType*>(&(*begin));
std::copy_n(real_begin, 2 * n, dst);
}
};
// Helper class to extract and insert values into TensorProto represented as
// repeated fields.
template <typename T>
class TensorProtoHelper : public std::true_type {
public:
using FieldHelper = TensorProtoFieldHelper<T>;
using FieldType = typename TensorProtoFieldHelper<T>::FieldType;
static DataType GetDataType() { return DataTypeToEnum<T>::value; }
// Returns the number of values of type T encoded in the proto.
static size_t NumValues(const TensorProto& proto) {
size_t raw_size = FieldHelper::GetField(proto).size();
return is_complex<T>::value ? raw_size / 2 : raw_size;
}
static void AddValue(const T& value, TensorProto* proto) {
const T* val_ptr = &value;
AddValues(val_ptr, val_ptr + 1, proto);
}
static T GetValue(size_t index, const TensorProto& proto) {
const size_t stride = is_complex<T>::value ? 2 : 1;
T val;
CopyHelper<T>::ToArray(
FieldHelper::GetField(proto).begin() + stride * index,
FieldHelper::GetField(proto).begin() + stride * (index + 1), &val);
return val;
}
template <typename IterType>
static void AddValues(IterType begin, IterType end, TensorProto* proto) {
size_t n = std::distance(begin, end);
FieldType* dst = AppendUninitialized(n, proto);
CopyHelper<T>::FromArray(begin, end, dst);
}
template <typename IterType>
static void CopyValues(IterType dst, const TensorProto& proto) {
CopyHelper<T>::ToArray(FieldHelper::GetField(proto).begin(),
FieldHelper::GetField(proto).end(), dst);
}
static void Truncate(size_t new_size, TensorProto* proto) {
if (is_complex<T>::value) new_size *= 2;
FieldHelper::GetMutableField(proto)->Truncate(new_size);
}
static FieldType* AppendUninitialized(size_t n, TensorProto* proto) {
if (is_complex<T>::value) n *= 2;
auto* field = FieldHelper::GetMutableField(proto);
field->Reserve(field->size() + n);
return reinterpret_cast<FieldType*>(field->AddNAlreadyReserved(n));
}
};
// Specialization for string.
template <>
class TensorProtoHelper<std::string> : public std::true_type {
public:
static DataType GetDataType() { return DataType::DT_STRING; }
static void AddValue(const std::string& value, TensorProto* proto) {
*proto->mutable_string_val()->Add() = value;
}
template <typename IterType>
static void AddValues(IterType begin, IterType end, TensorProto* proto) {
for (IterType it = begin; it != end; ++it) {
AddValue(*it, proto);
}
}
template <typename IterType>
static void CopyToTensorContent(IterType begin, IterType end,
TensorProto* proto) {
AddValues(begin, end, proto);
}
};
template <typename Type, typename IterType>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProto(IterType values_begin, IterType values_end,
const size_t values_size,
const absl::Span<const size_t> shape) {
TensorProto tensor;
TensorShapeProto tensor_shape_proto;
internal::SetTensorProtoShape(shape, &tensor_shape_proto);
if (TensorShape(tensor_shape_proto).num_elements() != values_size) {
LOG(ERROR) << "Shape and number of values (" << values_size
<< ") are incompatible.";
return tensor;
}
using TypeHelper = internal::TensorProtoHelper<Type>;
tensor.set_dtype(TypeHelper::GetDataType());
*tensor.mutable_tensor_shape() = std::move(tensor_shape_proto);
TypeHelper::AddValues(values_begin, values_end, &tensor);
return tensor;
}
} // namespace internal
// Creates a 'TensorProto' with the specified shape and values. The dtype and a
// field to represent data values of the returned 'TensorProto' are determined
// based on Type. Note that unless the argument provided to `values` is already
// an absl::Span, `Type` will need to be provided as a template parameter--the
// compiler can't infer it:
// auto proto = CreateTensorProtoSpan<float>(my_array, shape);
template <typename Type>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProtoSpan(const absl::Span<const Type> values,
const absl::Span<const size_t> shape) {
return internal::CreateTensorProto<Type>(values.begin(), values.end(),
values.size(), shape);
}
// Version of the above that's more convenient if `values` is an std::vector, in
// which case Type can automatically be inferred:
// auto proto = CreateTensorProto(my_vector, shape);
template <typename Type>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProto(const std::vector<Type>& values,
const absl::Span<const size_t> shape) {
// This awkward iterator passing is essentially just to support vector<bool>,
// otherwise we could just represent the vector as a Span.
return internal::CreateTensorProto<Type>(values.begin(), values.end(),
values.size(), shape);
}
// Converts values in tensor to run-length encoded compressed form.
//
// The elements of a tensor can be stored in a TensorProto in one of the
// following two forms:
// 1. As a raw byte string in the field `tensor_content` containing the
// serialized in-memory representation of the tensor.
// 2. As values of a repeated field depending on the datatype, e.g. that
// values of a DT_FLOAT tensor would be stored in the repeated field
// `float_val`.
// Storage scheme 2 may use a simple form of run-length encoding to compress
// data: If the values contains a tail of identical values, the repeated field
// will be truncated such that the number of values in the repeated field is
// less than the number of elements implied by the field`tensor_shape`. The
// original tensor can be recovered by repeating the final value in the repeated
// field.
//
// The TensorProto will be compressed if a) the tensor contains at least
// min_num_elements elements and b) the compressed tensor proto is would be at
// most the size of the original tensor proto divided by min_compression_ratio.
//
// Returns true if the tensor was compressed.
bool CompressTensorProtoInPlace(int64_t min_num_elements,
float min_compression_ratio,
TensorProto* tensor);
inline bool CompressTensorProtoInPlace(TensorProto* tensor) {
static const int64_t kDefaultMinNumElements = 64;
static const float kDefaultMinCompressionRatio = 2.0f;
return CompressTensorProtoInPlace(kDefaultMinNumElements,
kDefaultMinCompressionRatio, tensor);
}
// Make a TensorShape from the contents of shape_t. Shape_t must be a
// 1-dimensional tensor of type int32 or int64.
absl::Status MakeShape(const Tensor& shape_t, TensorShape* out);
} // namespace tensor
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/framework/tensor_util.h |
"""Profiler tools for CherryPy.
CherryPy users
==============
You can profile any of your pages as follows:
from cherrypy.lib import profiler
class Root:
p = profile.Profiler("/path/to/profile/dir")
def index(self):
self.p.run(self._index)
index.exposed = True
def _index(self):
return "Hello, world!"
cherrypy.tree.mount(Root())
You can also turn on profiling for all requests
using the make_app function as WSGI middleware.
CherryPy developers
===================
This module can be used whenever you make changes to CherryPy,
to get a quick sanity-check on overall CP performance. Use the
"--profile" flag when running the test suite. Then, use the serve()
function to browse the results in a web browser. If you run this
module from the command line, it will call serve() for you.
"""
# Make profiler output more readable by adding __init__ modules' parents.
def new_func_strip_path(func_name):
filename, line, name = func_name
if filename.endswith("__init__.py"):
return os.path.basename(filename[:-12]) + filename[-12:], line, name
return os.path.basename(filename), line, name
try:
import profile
import pstats
pstats.func_strip_path = new_func_strip_path
except ImportError:
profile = None
pstats = None
import warnings
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, you can apt-get python2.4-profiler from "
"non-free in a separate step. See http://www.cherrypy.org/wiki/"
"ProfilingOnDebian for details.")
warnings.warn(msg)
import os, os.path
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_count = 0
class Profiler(object):
def __init__(self, path=None):
if not path:
path = os.path.join(os.path.dirname(__file__), "profile")
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def run(self, func, *args, **params):
"""Dump profile data into self.path."""
global _count
c = _count = _count + 1
path = os.path.join(self.path, "cp_%04d.prof" % c)
prof = profile.Profile()
result = prof.runcall(func, *args, **params)
prof.dump_stats(path)
return result
def statfiles(self):
"""statfiles() -> list of available profiles."""
return [f for f in os.listdir(self.path)
if f.startswith("cp_") and f.endswith(".prof")]
def stats(self, filename, sortby='cumulative'):
"""stats(index) -> output of print_stats() for the given profile."""
sio = StringIO.StringIO()
if sys.version_info >= (2, 5):
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
s.strip_dirs()
s.sort_stats(sortby)
s.print_stats()
else:
# pstats.Stats before Python 2.5 didn't take a 'stream' arg,
# but just printed to stdout. So re-route stdout.
s = pstats.Stats(os.path.join(self.path, filename))
s.strip_dirs()
s.sort_stats(sortby)
oldout = sys.stdout
try:
sys.stdout = sio
s.print_stats()
finally:
sys.stdout = oldout
response = sio.getvalue()
sio.close()
return response
def index(self):
return """<html>
<head><title>CherryPy profile data</title></head>
<frameset cols='200, 1*'>
<frame src='menu' />
<frame name='main' src='' />
</frameset>
</html>
"""
index.exposed = True
def menu(self):
yield "<h2>Profiling runs</h2>"
yield "<p>Click on one of the runs below to see profiling data.</p>"
runs = self.statfiles()
runs.sort()
for i in runs:
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (i, i)
menu.exposed = True
def report(self, filename):
import cherrypy
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.stats(filename)
report.exposed = True
class ProfileAggregator(Profiler):
def __init__(self, path=None):
Profiler.__init__(self, path)
global _count
self.count = _count = _count + 1
self.profiler = profile.Profile()
def run(self, func, *args):
path = os.path.join(self.path, "cp_%04d.prof" % self.count)
result = self.profiler.runcall(func, *args)
self.profiler.dump_stats(path)
return result
class make_app:
def __init__(self, nextapp, path=None, aggregate=False):
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
nextapp: the WSGI application to wrap, usually an instance of
cherrypy.Application.
path: where to dump the profiling output.
aggregate: if True, profile data for all HTTP requests will go in
a single file. If False (the default), each HTTP request will
dump its profile data into a separate file.
"""
self.nextapp = nextapp
self.aggregate = aggregate
if aggregate:
self.profiler = ProfileAggregator(path)
else:
self.profiler = Profiler(path)
def __call__(self, environ, start_response):
def gather():
result = []
for line in self.nextapp(environ, start_response):
result.append(line)
return result
return self.profiler.run(gather)
def serve(path=None, port=8080):
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(Profiler(path))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re
from fabric.api import local, warn
from fabric.colors import green, red
if __name__ == '__main__':
local('flake8 --ignore=E126 --ignore=W391 --statistics'
' --exclude=submodules,migrations,south_migrations,build .')
local('coverage run --source="document_library" manage.py test -v 2'
' --traceback --failfast --settings=document_library.tests.settings'
' --pattern="*_tests.py"')
local('coverage html -d coverage --omit="*__init__*,*/settings/*,'
'*/south_migrations/*,*/migrations/*,*/tests/*,*admin*"')
total_line = local('grep -n pc_cov coverage/index.html', capture=True)
percentage = float(re.findall(r'(\d+)%', total_line)[-1])
if percentage < 100:
warn(red('Coverage is {0}%'.format(percentage)))
print(green('Coverage is {0}%'.format(percentage))) | unknown | codeparrot/codeparrot-clean | ||
from common_fixtures import * # NOQA
def test_compute_free(super_client, new_context):
admin_client = new_context.client
count = 5
host = super_client.reload(new_context.host)
image_uuid = new_context.image_uuid
start_free = host.computeFree
assert start_free > count
containers = []
for _ in range(count):
c = admin_client.create_container(imageUuid=image_uuid,
networkMode='bridge',
requestedHostId=host.id)
containers.append(c)
containers = wait_all_success(super_client, containers)
host = super_client.reload(host)
assert host.computeFree == start_free - count
for c in containers:
c.stop(deallocateFromHost=True)
wait_all_success(admin_client, containers)
host = super_client.reload(host)
assert host.computeFree == start_free
def test_inactive_agent(super_client, new_context):
host = super_client.reload(new_context.host)
agent = host.agent()
c = new_context.create_container()
assert c.state == 'running'
agent = super_client.wait_success(agent.deactivate())
assert agent.state == 'inactive'
c = new_context.create_container_no_success()
assert c.transitioning == 'error'
assert c.transitioningMessage == 'Failed to find a placement'
assert c.state == 'removed'
def test_spread(super_client, new_context):
count = 3
client = new_context.client
host2 = register_simulated_host(new_context.client)
host3 = register_simulated_host(new_context.client)
hosts = [new_context.host, host2, host3]
hosts = wait_all_success(super_client, hosts)
for h in hosts:
assert h.state == 'active'
assert h.agent().state == 'active'
assert len(h.agent().storagePools()) == 1
assert h.agent().storagePools()[0].state == 'active'
counts = []
for i, h in enumerate(hosts):
h = super_client.update(h, {
'computeFree': 10000000
})
counts.append(h.computeFree)
containers = []
for _ in range(len(hosts) * count):
c = client.create_container(imageUuid=new_context.image_uuid,
networkMode='bridge')
containers.append(c)
wait_all_success(super_client, containers, timeout=60)
for i, h in enumerate(hosts):
h = super_client.reload(h)
assert counts[i] - count == h.computeFree
def _set_one(super_client, new_context):
return super_client.update(new_context.host, computeFree=1)
def test_allocation_failed_on_create(super_client, new_context):
_set_one(super_client, new_context)
new_context.create_container(networkMode='bridge')
c = new_context.create_container_no_success(networkMode='bridge')
c = super_client.reload(c)
assert c.state == 'removed'
assert c.transitioning == 'error'
assert c.transitioningMessage == 'Failed to find a placement'
assert c.allocationState == 'activating'
assert c.volumes()[0].state == 'removed'
c = super_client.wait_success(super_client.reload(c.purge()))
assert c.state == 'purged'
assert c.allocationState == 'inactive'
def test_allocation_failed_on_start(super_client, new_context):
_set_one(super_client, new_context)
client = new_context.client
c2 = new_context.create_container(networkMode='bridge')
c1 = new_context.create_container(startOnCreate=False,
networkMode='bridge')
c1 = client.wait_transitioning(c1.start())
assert c1.state == 'stopped'
assert c1.transitioning == 'error'
assert c1.transitioningMessage == 'Failed to find a placement'
c2 = client.wait_success(client.delete(c2))
assert c2.state == 'removed'
c2 = client.wait_success(c2.purge())
assert c2.state == 'purged'
c1 = client.wait_success(c1.start())
assert c1.state == 'running'
assert c1.transitioning == 'no'
assert c1.transitioningMessage is None
def test_host_vnet_association(super_client, new_context):
account = new_context.project
image_uuid = new_context.image_uuid
host1 = new_context.host
host2 = register_simulated_host(new_context.client)
host3 = register_simulated_host(new_context.client)
host1 = super_client.update(host1, computeFree=100000)
host2 = super_client.update(host2, computeFree=100000)
host3 = super_client.update(host3, computeFree=100000)
for i in [host1, host2, host3]:
assert i.computeFree == 100000
network = super_client.create_network(accountId=account.id)
vnet = super_client.create_vnet(accountId=account.id,
networkId=network.id,
uri='sim://')
vnet = super_client.wait_success(vnet)
assert vnet.state == 'active'
subnet1 = super_client.create_subnet(accountId=account.id,
networkAddress='192.168.0.0',
cidrSize='16',
networkId=network.id,
startAddress='192.168.0.3',
endAddress='192.168.0.5')
subnet1 = super_client.wait_success(subnet1)
subnet2 = super_client.create_subnet(accountId=account.id,
networkAddress='192.168.2.0',
cidrSize='16',
networkId=network.id,
startAddress='192.168.2.3',
endAddress='192.168.3.5')
subnet2 = super_client.wait_success(subnet2)
subnet_map1 = super_client.create_subnet_vnet_map(accountId=account.id,
subnetId=subnet1.id,
vnetId=vnet.id)
subnet_map1 = super_client.wait_success(subnet_map1)
assert subnet_map1.state == 'active'
subnet_map2 = super_client.create_subnet_vnet_map(accountId=account.id,
subnetId=subnet2.id,
vnetId=vnet.id)
subnet_map2 = super_client.wait_success(subnet_map2)
assert subnet_map2.state == 'active'
vnet_map1 = super_client.create_host_vnet_map(accountId=account.id,
hostId=host1.id,
vnetId=vnet.id)
vnet_map1 = super_client.wait_success(vnet_map1)
assert vnet_map1.state == 'active'
vnet_map2 = super_client.create_host_vnet_map(accountId=account.id,
hostId=host2.id,
vnetId=vnet.id)
vnet_map2 = super_client.wait_success(vnet_map2)
assert vnet_map2.state == 'active'
hosts = set()
for _ in range(3):
vm = super_client.create_virtual_machine(accountId=account.id,
subnetIds=[subnet1.id],
imageUuid=image_uuid)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
hosts.add(vm.hosts()[0].id)
for _ in range(3):
vm = super_client.create_virtual_machine(accountId=account.id,
subnetIds=[subnet2.id],
imageUuid=image_uuid)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
hosts.add(vm.hosts()[0].id)
assert len(hosts) == 2
assert host1.id in hosts
assert host2.id in hosts
def test_allocation_stay_associated_to_host(super_client, context):
c = context.create_container()
c = context.client.wait_success(c.stop())
assert c.state == 'stopped'
assert len(c.hosts()) == 1
def test_vnet_stickiness(super_client, new_context):
account_id = new_context.project.id
network = super_client.list_network(accountId=account_id,
kind='hostOnlyNetwork')[0]
subnet = super_client.list_subnet(accountId=account_id)[0]
image_uuid = new_context.image_uuid
host1 = new_context.host
host2 = register_simulated_host(new_context.client)
host3 = register_simulated_host(new_context.client)
valid_hosts = [host1.id, host2.id, host3.id]
host1 = super_client.update(host1, computeFree=100000)
host2 = super_client.update(host2, computeFree=100000)
host3 = super_client.update(host3, computeFree=100000)
for i in [host1, host2, host3]:
assert i.computeFree == 100000
containers = []
for _ in range(3):
c = super_client.reload(new_context.create_container())
containers.append(c)
actual_hosts = set()
for i in containers:
assert i.state == 'running'
actual_hosts.add(i.hosts()[0].id)
assert actual_hosts == set(valid_hosts)
assert len(network.vnets()) == 3
assert len(subnet.vnets()) == 3
c1_host_id = c.hosts()[0].id
c1_nic = c.nics()[0]
for _ in range(3):
c = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
vnetIds=[c1_nic.vnetId])
c = super_client.wait_success(c)
assert c.hosts()[0].id == c1_host_id
nic = c.nics()[0]
assert nic.subnetId == c1_nic.subnetId
assert nic.vnetId == c1_nic.vnetId
assert nic.networkId == c1_nic.networkId
for _ in range(3):
c = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
networkIds=[network.id],
vnetIds=[c1_nic.vnetId])
c = super_client.wait_success(c)
assert c.hosts()[0].id == c1_host_id
nic = c.nics()[0]
assert nic.subnetId == c1_nic.subnetId
assert nic.vnetId == c1_nic.vnetId
assert nic.networkId == c1_nic.networkId
def test_port_constraint(new_context):
host1 = new_context.host
host2 = register_simulated_host(new_context.client)
containers = []
try:
c = new_context.create_container(requestedHostId=host1.id,
ports=['8081:81/tcp'])
containers.append(c)
# try to deploy another container with same public port + protocol
c2 = new_context\
.super_create_container_no_success(validHostIds=[host1.id],
ports=['8081:81/tcp'])
assert c2.transitioning == 'error'
assert c2.transitioningMessage == 'Failed to find a placement'
assert c2.state == 'removed'
# increase host pool and check whether allocator picks other host
c2 = new_context.super_create_container(validHostIds=[host1.id,
host2.id],
ports=['8081:81/tcp'])
containers.append(c2)
# try different public port
c3 = new_context.super_create_container(validHostIds=[host1.id],
ports=['8082:81/tcp'])
containers.append(c3)
# try different protocol
c4 = new_context.super_create_container(validHostIds=[host1.id],
ports=['8081:81/udp'])
containers.append(c4)
c5 = new_context\
.super_create_container_no_success(validHostIds=[host1.id],
ports=['8081:81/udp'])
assert c5.transitioning == 'error'
assert c5.transitioningMessage == 'Failed to find a placement'
assert c5.state == 'removed'
finally:
for c in containers:
if c is not None:
new_context.delete(c)
def test_request_host_override(new_context):
host = new_context.host
c = None
c2 = None
try:
c = new_context.super_create_container(validHostIds=[host.id],
ports=['8081:81/tcp'])
# try to deploy another container with same public port + protocol
# however, explicitly specify requestedHostId
c2 = new_context.super_create_container(requestedHostId=host.id,
ports=['8081:81/tcp'])
finally:
if c is not None:
new_context.delete(c)
if c2 is not None:
new_context.delete(c2)
def test_host_affinity(super_client, new_context):
host = new_context.host
host2 = register_simulated_host(new_context)
host = super_client.update(host, labels={'size': 'huge',
'latency': 'long'})
host2 = super_client.update(host2, labels={'size': 'tiny',
'latency': 'short'})
containers = []
try:
# test affinity
c = new_context.create_container(
environment={'constraint:size==huge': ''})
assert c.hosts()[0].id == host.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label': 'size=huge'})
assert c.hosts()[0].id == host.id
containers.append(c)
# test anti-affinity
c = new_context.create_container(
environment={'constraint:size!=huge': ''})
assert c.hosts()[0].id == host2.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label_ne':
'size=huge'})
assert c.hosts()[0].id == host2.id
containers.append(c)
# test soft affinity.
# prefer size==huge, but latency==~short if possible
c = new_context.create_container(
environment={
'constraint:size==huge': '',
'constraint:latency==~short': ''
})
assert c.hosts()[0].id == host.id
containers.append(c)
c = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:host_label': 'size=huge',
'io.rancher.scheduler.affinity:host_label_soft_ne':
'latency=short'
})
assert c.hosts()[0].id == host.id
containers.append(c)
# test soft anti-affinity
c = new_context.create_container(
environment={'constraint:latency!=~long': ''})
assert c.hosts()[0].id == host2.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label_soft_ne':
'latency=long'})
assert c.hosts()[0].id == host2.id
containers.append(c)
finally:
for c in containers:
new_context.delete(c)
def test_container_affinity(new_context):
# Two hosts
register_simulated_host(new_context)
containers = []
try:
name1 = 'affinity' + random_str()
c1 = new_context.create_container(
name=name1)
containers.append(c1)
c2 = new_context.create_container(
environment={'affinity:container==' + name1: ''})
containers.append(c2)
# check c2 is on same host as c1
assert c2.hosts()[0].id == c1.hosts()[0].id
c3 = new_context.create_container(
labels={'io.rancher.scheduler.affinity:container': name1})
containers.append(c3)
# check c3 is on same host as c1
assert c3.hosts()[0].id == c1.hosts()[0].id
c4 = new_context.create_container(
environment={'affinity:container==' + c1.uuid: ''})
containers.append(c4)
# check c4 is on same host as c1
assert c4.hosts()[0].id == c1.hosts()[0].id
c5 = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:container': c1.uuid})
containers.append(c5)
# check c5 is on same host as c1
assert c5.hosts()[0].id == c1.hosts()[0].id
c6 = new_context.create_container(
environment={'affinity:container!=' + name1: ''})
containers.append(c6)
# check c6 is not on same host as c1
assert c6.hosts()[0].id != c1.hosts()[0].id
c7 = new_context.create_container(
labels={'io.rancher.scheduler.affinity:container_ne': name1})
containers.append(c7)
# check c7 is not on same host as c1
assert c7.hosts()[0].id != c1.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
def test_container_label_affinity(new_context):
# Two hosts
register_simulated_host(new_context)
containers = []
try:
c1_label = random_str()
c1 = new_context.create_container(
labels={'foo': c1_label}
)
containers.append(c1)
c2 = new_context.create_container(
environment={'affinity:foo==' + c1_label: ''})
containers.append(c2)
# check c2 is on same host as c1
assert c2.hosts()[0].id == c1.hosts()[0].id
c3 = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:container_label':
'foo=' + c1_label}
)
containers.append(c3)
# check c3 is on same host as c1
assert c3.hosts()[0].id == c1.hosts()[0].id
c4_label = random_str()
c4 = new_context.create_container(
environment={'affinity:foo!=' + c1_label: ''},
labels={'foo': c4_label}
)
containers.append(c4)
# check c4 is not on same host as c1
assert c4.hosts()[0].id != c1.hosts()[0].id
c5 = new_context.create_container(
environment={
'affinity:foo!=' + c1_label: '',
'affinity:foo!=~' + c4_label: ''
})
containers.append(c5)
# since we just specified a soft anti-affinity to c4,
# check c5 is on same host as c4
assert c5.hosts()[0].id == c4.hosts()[0].id
c6 = new_context.create_container(
environment={
'affinity:foo!=' + c1_label: '',
},
labels={
'io.rancher.scheduler.affinity:container_label_soft_ne':
'foo=' + c4_label
}
)
containers.append(c6)
assert c6.hosts()[0].id == c4.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
def test_volumes_from_constraint(new_context):
# Three hosts
register_simulated_host(new_context)
register_simulated_host(new_context)
containers = []
try:
# nominal condition. start c1 before c2
c1 = new_context.create_container_no_success(startOnCreate=False)
c2 = new_context.create_container_no_success(startOnCreate=False,
dataVolumesFrom=[c1.id])
c1 = c1.start()
c2 = c2.start()
c1 = new_context.wait_for_state(c1, 'running')
c2 = new_context.wait_for_state(c2, 'running')
containers.append(c1)
containers.append(c2)
assert c1.hosts()[0].id == c2.hosts()[0].id
# less than ideal situation. start c4 before c3
c3 = new_context.create_container_no_success(startOnCreate=False)
c4 = new_context.create_container_no_success(startOnCreate=False,
dataVolumesFrom=[c3.id])
c4 = c4.start()
c3 = c3.start()
c4 = new_context.wait_for_state(c4, 'running')
c3 = new_context.wait_for_state(c3, 'running')
containers.append(c3)
containers.append(c4)
assert c3.hosts()[0].id == c4.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
def test_network_mode_constraint(new_context):
# Three hosts
register_simulated_host(new_context)
register_simulated_host(new_context)
containers = []
try:
c1 = new_context.create_container_no_success(startOnCreate=False)
c2 = new_context.create_container(startOnCreate=False,
networkMode='container',
networkContainerId=c1.id)
c1 = c1.start()
c2 = c2.start()
c1 = new_context.wait_for_state(c1, 'running')
containers.append(c1)
c2 = new_context.wait_for_state(c2, 'running')
containers.append(c2)
assert c1.hosts()[0].id == c2.hosts()[0].id
finally:
for c in containers:
new_context.delete(c) | unknown | codeparrot/codeparrot-clean | ||
# Takes lists of objects returned by the zkclient module, and
# consolidates the information for display.
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('kafka.codec').addHandler(NullHandler())
import struct
import socket
from collections import namedtuple
from kafka.client import KafkaClient
from kafka.common import OffsetRequest
class ProcessorError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
PartitionState = namedtuple('PartitionState',
[
'broker', # Broker host
'topic', # Topic on broker
'partition', # The partition
'earliest', # Earliest offset within partition on broker
'latest', # Current offset within partition on broker
'depth', # Depth of partition on broker.
'spout', # The Spout consuming this partition
'current', # Current offset for Spout
'delta' # Difference between latest and current
])
PartitionsSummary = namedtuple('PartitionsSummary',
[
'total_depth', # Total queue depth.
'total_delta', # Total delta across all spout tasks.
'num_partitions', # Number of partitions.
'num_brokers', # Number of Kafka Brokers.
'partitions' # Tuple of PartitionStates
])
def process(spouts):
'''
Returns a named tuple of type PartitionsSummary.
'''
results = []
total_depth = 0
total_delta = 0
brokers = []
for s in spouts:
for p in s.partitions:
try:
k = KafkaClient(p['broker']['host'], str(p['broker']['port']))
except socket.gaierror, e:
raise ProcessorError('Failed to contact Kafka broker %s (%s)' %
(p['broker']['host'], str(e)))
earliest_off = OffsetRequest(str(p['topic']), p['partition'], -2, 1)
latest_off = OffsetRequest(str(p['topic']), p['partition'], -1, 1)
earliest = k.send_offset_request([earliest_off])[0]
latest = k.send_offset_request([latest_off])[0]
current = p['offset']
brokers.append(p['broker']['host'])
total_depth = total_depth + (latest.offsets[0] - earliest.offsets[0])
total_delta = total_delta + (latest.offsets[0] - current)
results.append(PartitionState._make([
p['broker']['host'],
p['topic'],
p['partition'],
earliest.offsets[0],
latest.offsets[0],
latest.offsets[0] - earliest.offsets[0],
s.id,
current,
latest.offsets[0] - current]))
return PartitionsSummary(total_depth=total_depth,
total_delta=total_delta,
num_partitions=len(results),
num_brokers=len(set(brokers)),
partitions=tuple(results)) | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ConversationItemContent"]
class ConversationItemContent(BaseModel):
id: Optional[str] = None
"""
ID of a previous conversation item to reference (for `item_reference` content
types in `response.create` events). These can reference both client and server
created items.
"""
audio: Optional[str] = None
"""Base64-encoded audio bytes, used for `input_audio` content type."""
text: Optional[str] = None
"""The text content, used for `input_text` and `text` content types."""
transcript: Optional[str] = None
"""The transcript of the audio, used for `input_audio` and `audio` content types."""
type: Optional[Literal["input_text", "input_audio", "item_reference", "text", "audio"]] = None
"""
The content type (`input_text`, `input_audio`, `item_reference`, `text`,
`audio`).
""" | python | github | https://github.com/openai/openai-python | src/openai/types/beta/realtime/conversation_item_content.py |
import os
from rpython.rlib import jit
from bf import bytecode
from bf.parser import parse
# -----------------------------------------------------------------------------
# -- Interpreter --------------------------------------------------------------
def get_location(pc, bc, bcode):
lines, i, x = [], 0, 0
# Append regular instruction
line = "["+bytecode.dumpl(pc, bcode.code, bcode.codemap)+"]"
lines.append(line)
return "; ".join(lines)
jitdriver = jit.JitDriver(greens=['pc', 'bc', 'bcode'],
reds='auto',
get_printable_location = get_location
)
@jit.elidable
def get_matching_codemap(bc, pc): return bc.codemap[pc]
@jit.elidable
def get_matching_code(bc, pc): return bc.code[pc]
def interpret(bcode):
pc = 0
bc = ord(get_matching_code(bcode, pc))
tape = [0] * 30000
pos = 0
while pc < len(bcode.code):
jitdriver.jit_merge_point(pc=pc, bc=bc, bcode=bcode)
bc = ord(get_matching_code(bcode, pc))
# Simple cases
if bc == bytecode.INCT:
tape[pos] += get_matching_codemap(bcode, pc)
elif bc == bytecode.MOVT:
pos += get_matching_codemap(bcode, pc)
diff = pos - len(tape)
if diff >= 0:
tape.extend([0 for _ in range(0,diff)])
elif bc == bytecode.PUTV:
os.write(1, chr(tape[pos]))
elif bc == bytecode.GETV:
tape[pos] = ord(os.read(1, 1)[0])
# Loop case
elif bc == bytecode.LOOP and tape[pos] == 0:
pc = get_matching_codemap(bcode, pc)
elif bc == bytecode.GOTO and tape[pos] != 0:
pc = get_matching_codemap(bcode, pc)
pc += 1
# -----------------------------------------------------------------------------
# -- Driver -------------------------------------------------------------------
def run(data, print_bc):
if print_bc:
print parse(data).dump()
else:
interpret(parse(data)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Cleans up *.mae files.
Accepts multiple filenames and glob as the argument(s).
This version adds in the custom conformational search properties to Maestro
structures, atoms and bonds if they don't already exist. Default for all values
is zero.
"""
import os
import sys
from schrodinger import structure as sch_struct
from clean_mae import PROPERTIES_TO_REMOVE, ATOM_PROPERTIES_TO_REMOVE
ATOM_CS_PROPERTIES = ['b_cs_chig',
'b_cs_comp']
BOND_CS_PROPERTIES = ['b_cs_tors',
'i_cs_rca4_1',
'i_cs_rca4_2',
'i_cs_torc_a1',
'i_cs_torc_a4',
'r_cs_torc_a5',
'r_cs_torc_a6',
'i_cs_torc_b1',
'i_cs_torc_b4',
'r_cs_torc_b5',
'r_cs_torc_b6']
# Updates for new format.
CONV_DIC = {'i_cs_torc_1': 'i_cs_torc_a1',
'i_cs_torc_2': 'i_cs_torc_a4',
'r_cs_torc_5': 'r_cs_torc_a5',
'r_cs_torc_6': 'r_cs_torc_a6'}
if __name__ == "__main__":
for filename in sys.argv[1:]:
structure_reader = sch_struct.StructureReader(filename)
structure_writer = sch_struct.StructureWriter('TEMP.mae')
for structure in structure_reader:
for prop in PROPERTIES_TO_REMOVE:
try:
del structure.property[prop]
except KeyError:
pass
# Change the name too. Why not.
structure.property['s_m_title'] = \
structure.property['s_m_entry_name'] = \
os.path.splitext(
os.path.basename(filename))[0]
for atom in structure.atom:
for prop in ATOM_PROPERTIES_TO_REMOVE:
try:
del atom.property[prop]
except KeyError:
pass
except ValueError:
pass
for prop in ATOM_CS_PROPERTIES:
if not prop in atom.property:
atom.property[prop] = 0
for bond in structure.bond:
# Update 1st.
for k, v in CONV_DIC.iteritems():
if k in bond.property:
bond.property[v] = bond.property[k]
del bond.property[k]
for prop in BOND_CS_PROPERTIES:
if not prop in bond.property:
bond.property[prop] = 0
structure_writer.append(structure)
structure_reader.close()
structure_writer.close()
os.rename('TEMP.mae', filename) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2019 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.reflect;
import static java.lang.annotation.ElementType.CONSTRUCTOR;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.TYPE;
import java.lang.annotation.Target;
/**
* Disables Animal Sniffer's checking of compatibility with older versions of Java/Android.
*
* <p>Each package's copy of this annotation needs to be listed in our {@code pom.xml}.
*/
@Target({METHOD, CONSTRUCTOR, TYPE, FIELD})
@interface IgnoreJRERequirement {} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/reflect/IgnoreJRERequirement.java |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
import erpnext
from frappe.utils import flt, nowdate, add_days, cint
from frappe import _
def reorder_item():
""" Reorder item if stock reaches reorder level"""
# if initial setup not completed, return
if not (frappe.db.a_row_exists("Company") and frappe.db.a_row_exists("Fiscal Year")):
return
if cint(frappe.db.get_value('Stock Settings', None, 'auto_indent')):
return _reorder_item()
def _reorder_item():
material_requests = {"Purchase": {}, "Transfer": {}, "Material Issue": {}, "Manufacture": {}}
warehouse_company = frappe._dict(frappe.db.sql("""select name, company from `tabWarehouse`
where disabled=0"""))
default_company = (erpnext.get_default_company() or
frappe.db.sql("""select name from tabCompany limit 1""")[0][0])
items_to_consider = frappe.db.sql_list("""select name from `tabItem` item
where is_stock_item=1 and has_variants=0
and disabled=0
and (end_of_life is null or end_of_life='0000-00-00' or end_of_life > %(today)s)
and (exists (select name from `tabItem Reorder` ir where ir.parent=item.name)
or (variant_of is not null and variant_of != ''
and exists (select name from `tabItem Reorder` ir where ir.parent=item.variant_of))
)""",
{"today": nowdate()})
if not items_to_consider:
return
item_warehouse_projected_qty = get_item_warehouse_projected_qty(items_to_consider)
def add_to_material_request(item_code, warehouse, reorder_level, reorder_qty, material_request_type, warehouse_group=None):
if warehouse not in warehouse_company:
# a disabled warehouse
return
reorder_level = flt(reorder_level)
reorder_qty = flt(reorder_qty)
# projected_qty will be 0 if Bin does not exist
if warehouse_group:
projected_qty = flt(item_warehouse_projected_qty.get(item_code, {}).get(warehouse_group))
else:
projected_qty = flt(item_warehouse_projected_qty.get(item_code, {}).get(warehouse))
if (reorder_level or reorder_qty) and projected_qty < reorder_level:
deficiency = reorder_level - projected_qty
if deficiency > reorder_qty:
reorder_qty = deficiency
company = warehouse_company.get(warehouse) or default_company
material_requests[material_request_type].setdefault(company, []).append({
"item_code": item_code,
"warehouse": warehouse,
"reorder_qty": reorder_qty
})
for item_code in items_to_consider:
item = frappe.get_doc("Item", item_code)
if item.variant_of and not item.get("reorder_levels"):
item.update_template_tables()
if item.get("reorder_levels"):
for d in item.get("reorder_levels"):
add_to_material_request(item_code, d.warehouse, d.warehouse_reorder_level,
d.warehouse_reorder_qty, d.material_request_type, warehouse_group=d.warehouse_group)
if material_requests:
return create_material_request(material_requests)
def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql("""select item_code, warehouse, projected_qty
from tabBin where item_code in ({0})
and (warehouse != "" and warehouse is not null)"""\
.format(", ".join(["%s"] * len(items_to_consider))), items_to_consider):
if item_code not in item_warehouse_projected_qty:
item_warehouse_projected_qty.setdefault(item_code, {})
if warehouse not in item_warehouse_projected_qty.get(item_code):
item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse)
while warehouse_doc.parent_warehouse:
if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):
item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(projected_qty)
else:
item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)
warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse)
return item_warehouse_projected_qty
def create_material_request(material_requests):
""" Create indent on reaching reorder level """
mr_list = []
exceptions_list = []
def _log_exception():
if frappe.local.message_log:
exceptions_list.extend(frappe.local.message_log)
frappe.local.message_log = []
else:
exceptions_list.append(frappe.get_traceback())
for request_type in material_requests:
for company in material_requests[request_type]:
try:
items = material_requests[request_type][company]
if not items:
continue
mr = frappe.new_doc("Material Request")
mr.update({
"company": company,
"transaction_date": nowdate(),
"material_request_type": "Material Transfer" if request_type=="Transfer" else request_type
})
for d in items:
d = frappe._dict(d)
item = frappe.get_doc("Item", d.item_code)
uom = item.stock_uom
conversion_factor = 1.0
if request_type == 'Purchase':
uom = item.purchase_uom or item.stock_uom
if uom != item.stock_uom:
conversion_factor = frappe.db.get_value("UOM Conversion Detail",
{'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0
mr.append("items", {
"doctype": "Material Request Item",
"item_code": d.item_code,
"schedule_date": add_days(nowdate(),cint(item.lead_time_days)),
"qty": d.reorder_qty / conversion_factor,
"uom": uom,
"stock_uom": item.stock_uom,
"warehouse": d.warehouse,
"item_name": item.item_name,
"description": item.description,
"item_group": item.item_group,
"brand": item.brand,
})
schedule_dates = [d.schedule_date for d in mr.items]
mr.schedule_date = max(schedule_dates or [nowdate()])
mr.insert()
mr.submit()
mr_list.append(mr)
except:
_log_exception()
if mr_list:
if getattr(frappe.local, "reorder_email_notify", None) is None:
frappe.local.reorder_email_notify = cint(frappe.db.get_value('Stock Settings', None,
'reorder_email_notify'))
if(frappe.local.reorder_email_notify):
send_email_notification(mr_list)
if exceptions_list:
notify_errors(exceptions_list)
return mr_list
def send_email_notification(mr_list):
""" Notify user about auto creation of indent"""
email_list = frappe.db.sql_list("""select distinct r.parent
from `tabHas Role` r, tabUser p
where p.name = r.parent and p.enabled = 1 and p.docstatus < 2
and r.role in ('Purchase Manager','Stock Manager')
and p.name not in ('Administrator', 'All', 'Guest')""")
msg = frappe.render_template("templates/emails/reorder_item.html", {
"mr_list": mr_list
})
frappe.sendmail(recipients=email_list,
subject=_('Auto Material Requests Generated'), message = msg)
def notify_errors(exceptions_list):
subject = "[Important] [ERPNext] Auto Reorder Errors"
content = """Dear System Manager,
An error occured for certain Items while creating Material Requests based on Re-order level.
Please rectify these issues:
---
<pre>
%s
</pre>
---
Regards,
Administrator""" % ("\n\n".join(exceptions_list),)
from frappe.email import sendmail_to_system_managers
sendmail_to_system_managers(subject, content) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.