text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
from cloudify.workflows import parameters, ctx
output_path = parameters.output_path
with open(output_path, 'w') as f:
f.write(json.dumps({
'retries': ctx._task_retries,
'retry_interval': ctx._task_retry_interval,
'thread_pool_size': ctx._local_task_thread_pool_size
}))
|
{
"content_hash": "839d30fc8c7eb0e1207a7e19208ed451",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 26.416666666666668,
"alnum_prop": 0.6656151419558359,
"repo_name": "dankilman/clash",
"id": "49f6331cb39352932ef4aa72fdd07ed97ed243e3",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clash/tests/resources/blueprints/task_config/blueprint_workflows/workflow2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86047"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
}
|
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import IonTestFramework, SkipTest
from test_framework.util import *
class ZMQTest (IonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
port = 28332
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that ion has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("iond has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
blockcount += 1
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
|
{
"content_hash": "80fcb0e57bc0914c64f996b3a523bb26",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 121,
"avg_line_length": 34.68571428571428,
"alnum_prop": 0.5881383855024712,
"repo_name": "aspaas/ion",
"id": "d57027ca605e19e56aa4db4023b98998beb0a983",
"size": "3856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/zmq_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616463"
},
{
"name": "C++",
"bytes": "4560754"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "18274"
},
{
"name": "Makefile",
"bytes": "16792"
},
{
"name": "NSIS",
"bytes": "5917"
},
{
"name": "Objective-C++",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "96149"
},
{
"name": "QMake",
"bytes": "20721"
},
{
"name": "Shell",
"bytes": "391146"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('developers', '0005_auto_20150827_1230'),
]
operations = [
migrations.RemoveField(
model_name='preloadtestplan',
name='addon',
),
migrations.DeleteModel(
name='PreloadTestPlan',
),
]
|
{
"content_hash": "84a97f2f33a206ef4ed7e678e23ff264",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 50,
"avg_line_length": 20.9,
"alnum_prop": 0.583732057416268,
"repo_name": "washort/zamboni",
"id": "8c1496f7aa5cacf6821f016e07a322114d9543c7",
"size": "442",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mkt/developers/migrations/0006_auto_20151110_1117.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354243"
},
{
"name": "HTML",
"bytes": "2383319"
},
{
"name": "JavaScript",
"bytes": "532109"
},
{
"name": "Makefile",
"bytes": "4313"
},
{
"name": "Python",
"bytes": "4735484"
},
{
"name": "Shell",
"bytes": "11135"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pyblogit
import pyblogit.blogger
import pyblogit.database_handler
|
{
"content_hash": "24c17c99505d8d482a936b1377e33925",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 17.25,
"alnum_prop": 0.782608695652174,
"repo_name": "jamalmoir/pyblogit",
"id": "37fa9689ecf1d75641d93ca7c4633904c04431a7",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17083"
}
],
"symlink_target": ""
}
|
"""
Unit tests for L{benchmark}.
"""
from twisted.trial.unittest import TestCase
from twisted.python.usage import UsageError
from benchmark import BenchmarkOptions
class BenchmarkOptionsTests(TestCase):
"""
Tests for L{benchmark.BenchmarkOptions}.
"""
def setUp(self):
"""
Create a L{BenchmarkOptions} instance to test.
"""
self.options = BenchmarkOptions()
def test_parameters(self):
"""
The I{--parameters} option can be specified multiple time and
each time specifies the parameters for a particular benchmark
as a comma separated list of integers.
"""
self.options.parseOptions(["--parameters", "foo:1,10,100", "foo"])
self.assertEquals(
self.options['parameters'], {"foo": [1, 10, 100]})
def test_filterBenchmarksWithoutDistribution(self):
"""
If neither I{--hosts-count} nor I{--host-index} are supplied,
L{BenchmarkOptions} takes all positional arguments as the
benchmarks to run.
"""
self.options.parseOptions(["foo", "bar", "baz"])
self.assertEquals(self.options['benchmarks'], ["foo", "bar", "baz"])
def test_hostsCountWithoutIndex(self):
"""
If I{--hosts-count} is provided but I{--host-index} is not, a
L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions, ["--hosts-count=3", "foo"])
self.assertEquals(
str(exc),
"Specify neither or both of hosts-count and host-index")
def test_hostIndexWithoutCount(self):
"""
If I{--host-index} is provided by I{--hosts-count} is not, a
L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions, ["--host-index=3", "foo"])
self.assertEquals(
str(exc),
"Specify neither or both of hosts-count and host-index")
def test_negativeHostsCount(self):
"""
If a negative value is provided for I{--hosts-count}, a
L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions,
["--host-index=1", "--hosts-count=-1", "foo"])
self.assertEquals(
str(exc),
"Specify a positive integer for hosts-count")
def test_nonIntegerHostsCount(self):
"""
If a string which cannot be converted to an integer is
provided for I{--hosts-count}, a L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions,
["--host-index=1", "--hosts-count=hello", "foo"])
self.assertEquals(
str(exc),
"Parameter type enforcement failed: invalid literal for int() with base 10: 'hello'")
def test_negativeHostIndex(self):
"""
If a negative value is provided for I{--host-index}, a
L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions,
["--host-index=-1", "--hosts-count=2", "foo"])
self.assertEquals(
str(exc),
"Specify a positive integer for host-index")
def test_nonIntegerHostIndex(self):
"""
If a string which cannot be converted to an integer is
provided for I{--host-index}, a L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions,
["--host-index=hello", "--hosts-count=2", "foo"])
self.assertEquals(
str(exc),
"Parameter type enforcement failed: invalid literal for int() with base 10: 'hello'")
def test_largeHostIndex(self):
"""
If the integer supplied to I{--host-index} is greater than or
equal to the integer supplied to I{--hosts-count}, a
L{UsageError} is raised.
"""
exc = self.assertRaises(
UsageError,
self.options.parseOptions,
["--hosts-count=2", "--host-index=2", "foo"])
self.assertEquals(
str(exc),
"host-index must be less than hosts-count")
def test_hostIndexAndCount(self):
"""
If I{--hosts-count} and I{--host-index} are supplied, of the
benchmarks supplied as positional arguments, only a subset is
taken as the benchmarks to run. The subset is constructed so
that for a particular value of I{hosts-count}, each benchmark
will only appear in the subset returned for a single value of
I{--host-index}, and all benchmarks will appear in one such
subset.
"""
self.options.parseOptions([
"--hosts-count=3", "--host-index=0",
"foo", "bar", "baz", "quux"])
self.assertEquals(self.options['benchmarks'], ["foo", "quux"])
self.options.parseOptions([
"--hosts-count=3", "--host-index=1",
"foo", "bar", "baz", "quux"])
self.assertEquals(self.options['benchmarks'], ["bar"])
self.options.parseOptions([
"--hosts-count=3", "--host-index=2",
"foo", "bar", "baz", "quux"])
self.assertEquals(self.options['benchmarks'], ["baz"])
|
{
"content_hash": "39f1950eac916dd77387b70d10d73c44",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 97,
"avg_line_length": 35.1948051948052,
"alnum_prop": 0.5701107011070111,
"repo_name": "macosforge/ccs-calendarserver",
"id": "a48c684abd702137bc0deeffc312ff63ece1b2e5",
"size": "6031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/performance/test_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
pyboard interface
This module provides the Pyboard class, used to communicate with and
control a MicroPython device over a communication channel. Both real
boards and emulated devices (e.g. running in QEMU) are supported.
Various communication channels are supported, including a serial
connection, telnet-style network connection, external process
connection.
Example usage:
import pyboard
pyb = pyboard.Pyboard('/dev/ttyACM0')
Or:
pyb = pyboard.Pyboard('192.168.1.1')
Then:
pyb.enter_raw_repl()
pyb.exec('import pyb')
pyb.exec('pyb.LED(1).on()')
pyb.exit_raw_repl()
Note: if using Python2 then pyb.exec must be written as pyb.exec_.
To run a script from the local machine on the board and print out the results:
import pyboard
pyboard.execfile('test.py', device='/dev/ttyACM0')
This script can also be run directly. To execute a local script, use:
./pyboard.py test.py
Or:
python pyboard.py test.py
"""
import sys
import time
import os
import ast
try:
stdout = sys.stdout.buffer
except AttributeError:
# Python2 doesn't have buffer attr
stdout = sys.stdout
def stdout_write_bytes(b):
b = b.replace(b"\x04", b"")
stdout.write(b)
stdout.flush()
class PyboardError(Exception):
pass
class TelnetToSerial:
def __init__(self, ip, user, password, read_timeout=None):
self.tn = None
import telnetlib
self.tn = telnetlib.Telnet(ip, timeout=15)
self.read_timeout = read_timeout
if b"Login as:" in self.tn.read_until(b"Login as:", timeout=read_timeout):
self.tn.write(bytes(user, "ascii") + b"\r\n")
if b"Password:" in self.tn.read_until(b"Password:", timeout=read_timeout):
# needed because of internal implementation details of the telnet server
time.sleep(0.2)
self.tn.write(bytes(password, "ascii") + b"\r\n")
if b"for more information." in self.tn.read_until(
b'Type "help()" for more information.', timeout=read_timeout
):
# login successful
from collections import deque
self.fifo = deque()
return
raise PyboardError("Failed to establish a telnet connection with the board")
def __del__(self):
self.close()
def close(self):
if self.tn:
self.tn.close()
def read(self, size=1):
while len(self.fifo) < size:
timeout_count = 0
data = self.tn.read_eager()
if len(data):
self.fifo.extend(data)
timeout_count = 0
else:
time.sleep(0.25)
if self.read_timeout is not None and timeout_count > 4 * self.read_timeout:
break
timeout_count += 1
data = b""
while len(data) < size and len(self.fifo) > 0:
data += bytes([self.fifo.popleft()])
return data
def write(self, data):
self.tn.write(data)
return len(data)
def inWaiting(self):
n_waiting = len(self.fifo)
if not n_waiting:
data = self.tn.read_eager()
self.fifo.extend(data)
return len(data)
else:
return n_waiting
class ProcessToSerial:
"Execute a process and emulate serial connection using its stdin/stdout."
def __init__(self, cmd):
import subprocess
self.subp = subprocess.Popen(
cmd,
bufsize=0,
shell=True,
preexec_fn=os.setsid,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
# Initially was implemented with selectors, but that adds Python3
# dependency. However, there can be race conditions communicating
# with a particular child process (like QEMU), and selectors may
# still work better in that case, so left inplace for now.
#
# import selectors
# self.sel = selectors.DefaultSelector()
# self.sel.register(self.subp.stdout, selectors.EVENT_READ)
import select
self.poll = select.poll()
self.poll.register(self.subp.stdout.fileno())
def close(self):
import signal
os.killpg(os.getpgid(self.subp.pid), signal.SIGTERM)
def read(self, size=1):
data = b""
while len(data) < size:
data += self.subp.stdout.read(size - len(data))
return data
def write(self, data):
self.subp.stdin.write(data)
return len(data)
def inWaiting(self):
# res = self.sel.select(0)
res = self.poll.poll(0)
if res:
return 1
return 0
class ProcessPtyToTerminal:
"""Execute a process which creates a PTY and prints secondary PTY as
first line of its output, and emulate serial connection using
this PTY."""
def __init__(self, cmd):
import subprocess
import re
import serial
self.subp = subprocess.Popen(
cmd.split(),
bufsize=0,
shell=False,
preexec_fn=os.setsid,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
pty_line = self.subp.stderr.readline().decode("utf-8")
m = re.search(r"/dev/pts/[0-9]+", pty_line)
if not m:
print("Error: unable to find PTY device in startup line:", pty_line)
self.close()
sys.exit(1)
pty = m.group()
# rtscts, dsrdtr params are to workaround pyserial bug:
# http://stackoverflow.com/questions/34831131/pyserial-does-not-play-well-with-virtual-port
self.ser = serial.Serial(pty, interCharTimeout=1, rtscts=True, dsrdtr=True)
def close(self):
import signal
os.killpg(os.getpgid(self.subp.pid), signal.SIGTERM)
def read(self, size=1):
return self.ser.read(size)
def write(self, data):
return self.ser.write(data)
def inWaiting(self):
return self.ser.inWaiting()
class Pyboard:
def __init__(
self, device, baudrate=115200, user="micro", password="python", wait=0, exclusive=True
):
self.in_raw_repl = False
self.use_raw_paste = True
if device.startswith("exec:"):
self.serial = ProcessToSerial(device[len("exec:") :])
elif device.startswith("execpty:"):
self.serial = ProcessPtyToTerminal(device[len("qemupty:") :])
elif device and device[0].isdigit() and device[-1].isdigit() and device.count(".") == 3:
# device looks like an IP address
self.serial = TelnetToSerial(device, user, password, read_timeout=10)
else:
import serial
# Set options, and exclusive if pyserial supports it
serial_kwargs = {"baudrate": baudrate, "interCharTimeout": 1}
if serial.__version__ >= "3.3":
serial_kwargs["exclusive"] = exclusive
delayed = False
for attempt in range(wait + 1):
try:
self.serial = serial.Serial(device, **serial_kwargs)
break
except (OSError, IOError): # Py2 and Py3 have different errors
if wait == 0:
continue
if attempt == 0:
sys.stdout.write("Waiting {} seconds for pyboard ".format(wait))
delayed = True
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
else:
if delayed:
print("")
raise PyboardError("failed to access " + device)
if delayed:
print("")
def close(self):
self.serial.close()
def read_until(self, min_num_bytes, ending, timeout=10, data_consumer=None):
# if data_consumer is used then data is not accumulated and the ending must be 1 byte long
assert data_consumer is None or len(ending) == 1
data = self.serial.read(min_num_bytes)
if data_consumer:
data_consumer(data)
timeout_count = 0
while True:
if data.endswith(ending):
break
elif self.serial.inWaiting() > 0:
new_data = self.serial.read(1)
if data_consumer:
data_consumer(new_data)
data = new_data
else:
data = data + new_data
timeout_count = 0
else:
timeout_count += 1
if timeout is not None and timeout_count >= 100 * timeout:
break
time.sleep(0.01)
return data
def enter_raw_repl(self, soft_reset=True):
self.serial.write(b"\r\x03\x03") # ctrl-C twice: interrupt any running program
# flush input (without relying on serial.flushInput())
n = self.serial.inWaiting()
while n > 0:
self.serial.read(n)
n = self.serial.inWaiting()
self.serial.write(b"\r\x01") # ctrl-A: enter raw REPL
if soft_reset:
data = self.read_until(1, b"raw REPL; CTRL-B to exit\r\n>")
if not data.endswith(b"raw REPL; CTRL-B to exit\r\n>"):
print(data)
raise PyboardError("could not enter raw repl")
self.serial.write(b"\x04") # ctrl-D: soft reset
# Waiting for "soft reboot" independently to "raw REPL" (done below)
# allows boot.py to print, which will show up after "soft reboot"
# and before "raw REPL".
data = self.read_until(1, b"soft reboot\r\n")
if not data.endswith(b"soft reboot\r\n"):
print(data)
raise PyboardError("could not enter raw repl")
data = self.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
if not data.endswith(b"raw REPL; CTRL-B to exit\r\n"):
print(data)
raise PyboardError("could not enter raw repl")
self.in_raw_repl = True
def exit_raw_repl(self):
self.serial.write(b"\r\x02") # ctrl-B: enter friendly REPL
self.in_raw_repl = False
def follow(self, timeout, data_consumer=None):
# wait for normal output
data = self.read_until(1, b"\x04", timeout=timeout, data_consumer=data_consumer)
if not data.endswith(b"\x04"):
raise PyboardError("timeout waiting for first EOF reception")
data = data[:-1]
# wait for error output
data_err = self.read_until(1, b"\x04", timeout=timeout)
if not data_err.endswith(b"\x04"):
raise PyboardError("timeout waiting for second EOF reception")
data_err = data_err[:-1]
# return normal and error output
return data, data_err
def raw_paste_write(self, command_bytes):
# Read initial header, with window size.
data = self.serial.read(2)
window_size = data[0] | data[1] << 8
window_remain = window_size
# Write out the command_bytes data.
i = 0
while i < len(command_bytes):
while window_remain == 0 or self.serial.inWaiting():
data = self.serial.read(1)
if data == b"\x01":
# Device indicated that a new window of data can be sent.
window_remain += window_size
elif data == b"\x04":
# Device indicated abrupt end. Acknowledge it and finish.
self.serial.write(b"\x04")
return
else:
# Unexpected data from device.
raise PyboardError("unexpected read during raw paste: {}".format(data))
# Send out as much data as possible that fits within the allowed window.
b = command_bytes[i : min(i + window_remain, len(command_bytes))]
self.serial.write(b)
window_remain -= len(b)
i += len(b)
# Indicate end of data.
self.serial.write(b"\x04")
# Wait for device to acknowledge end of data.
data = self.read_until(1, b"\x04")
if not data.endswith(b"\x04"):
raise PyboardError("could not complete raw paste: {}".format(data))
def exec_raw_no_follow(self, command):
if isinstance(command, bytes):
command_bytes = command
else:
command_bytes = bytes(command, encoding="utf8")
# check we have a prompt
data = self.read_until(1, b">")
if not data.endswith(b">"):
raise PyboardError("could not enter raw repl")
if self.use_raw_paste:
# Try to enter raw-paste mode.
self.serial.write(b"\x05A\x01")
data = self.serial.read(2)
if data == b"R\x00":
# Device understood raw-paste command but doesn't support it.
pass
elif data == b"R\x01":
# Device supports raw-paste mode, write out the command using this mode.
return self.raw_paste_write(command_bytes)
else:
# Device doesn't support raw-paste, fall back to normal raw REPL.
data = self.read_until(1, b"w REPL; CTRL-B to exit\r\n>")
if not data.endswith(b"w REPL; CTRL-B to exit\r\n>"):
print(data)
raise PyboardError("could not enter raw repl")
# Don't try to use raw-paste mode again for this connection.
self.use_raw_paste = False
# Write command using standard raw REPL, 256 bytes every 10ms.
for i in range(0, len(command_bytes), 256):
self.serial.write(command_bytes[i : min(i + 256, len(command_bytes))])
time.sleep(0.01)
self.serial.write(b"\x04")
# check if we could exec command
data = self.serial.read(2)
if data != b"OK":
raise PyboardError("could not exec command (response: %r)" % data)
def exec_raw(self, command, timeout=10, data_consumer=None):
self.exec_raw_no_follow(command)
return self.follow(timeout, data_consumer)
def eval(self, expression):
ret = self.exec_("print({})".format(expression))
ret = ret.strip()
return ret
def exec_(self, command, data_consumer=None):
ret, ret_err = self.exec_raw(command, data_consumer=data_consumer)
if ret_err:
raise PyboardError("exception", ret, ret_err)
return ret
def execfile(self, filename):
with open(filename, "rb") as f:
pyfile = f.read()
return self.exec_(pyfile)
def get_time(self):
t = str(self.eval("pyb.RTC().datetime()"), encoding="utf8")[1:-1].split(", ")
return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])
def fs_ls(self, src):
cmd = (
"import uos\nfor f in uos.ilistdir(%s):\n"
" print('{:12} {}{}'.format(f[3]if len(f)>3 else 0,f[0],'/'if f[1]&0x4000 else ''))"
% (("'%s'" % src) if src else "")
)
self.exec_(cmd, data_consumer=stdout_write_bytes)
def fs_cat(self, src, chunk_size=256):
cmd = (
"with open('%s') as f:\n while 1:\n"
" b=f.read(%u)\n if not b:break\n print(b,end='')" % (src, chunk_size)
)
self.exec_(cmd, data_consumer=stdout_write_bytes)
def fs_get(self, src, dest, chunk_size=256):
self.exec_("f=open('%s','rb')\nr=f.read" % src)
with open(dest, "wb") as f:
while True:
data = bytearray()
self.exec_("print(r(%u))" % chunk_size, data_consumer=lambda d: data.extend(d))
assert data.endswith(b"\r\n\x04")
try:
data = ast.literal_eval(str(data[:-3], "ascii"))
if not isinstance(data, bytes):
raise ValueError("Not bytes")
except (UnicodeError, ValueError) as e:
raise PyboardError("fs_get: Could not interpret received data: %s" % str(e))
if not data:
break
f.write(data)
self.exec_("f.close()")
def fs_put(self, src, dest, chunk_size=256):
self.exec_("f=open('%s','wb')\nw=f.write" % dest)
with open(src, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
if sys.version_info < (3,):
self.exec_("w(b" + repr(data) + ")")
else:
self.exec_("w(" + repr(data) + ")")
self.exec_("f.close()")
def fs_mkdir(self, dir):
self.exec_("import uos\nuos.mkdir('%s')" % dir)
def fs_rmdir(self, dir):
self.exec_("import uos\nuos.rmdir('%s')" % dir)
def fs_rm(self, src):
self.exec_("import uos\nuos.remove('%s')" % src)
# in Python2 exec is a keyword so one must use "exec_"
# but for Python3 we want to provide the nicer version "exec"
setattr(Pyboard, "exec", Pyboard.exec_)
def execfile(filename, device="/dev/ttyACM0", baudrate=115200, user="micro", password="python"):
pyb = Pyboard(device, baudrate, user, password)
pyb.enter_raw_repl()
output = pyb.execfile(filename)
stdout_write_bytes(output)
pyb.exit_raw_repl()
pyb.close()
def filesystem_command(pyb, args):
def fname_remote(src):
if src.startswith(":"):
src = src[1:]
return src
def fname_cp_dest(src, dest):
src = src.rsplit("/", 1)[-1]
if dest is None or dest == "":
dest = src
elif dest == ".":
dest = "./" + src
elif dest.endswith("/"):
dest += src
return dest
cmd = args[0]
args = args[1:]
try:
if cmd == "cp":
srcs = args[:-1]
dest = args[-1]
if srcs[0].startswith("./") or dest.startswith(":"):
op = pyb.fs_put
fmt = "cp %s :%s"
dest = fname_remote(dest)
else:
op = pyb.fs_get
fmt = "cp :%s %s"
for src in srcs:
src = fname_remote(src)
dest2 = fname_cp_dest(src, dest)
print(fmt % (src, dest2))
op(src, dest2)
else:
op = {
"ls": pyb.fs_ls,
"cat": pyb.fs_cat,
"mkdir": pyb.fs_mkdir,
"rmdir": pyb.fs_rmdir,
"rm": pyb.fs_rm,
}[cmd]
if cmd == "ls" and not args:
args = [""]
for src in args:
src = fname_remote(src)
print("%s :%s" % (cmd, src))
op(src)
except PyboardError as er:
print(str(er.args[2], "ascii"))
pyb.exit_raw_repl()
pyb.close()
sys.exit(1)
_injected_import_hook_code = """\
import uos, uio
class _FS:
class File(uio.IOBase):
def __init__(self):
self.off = 0
def ioctl(self, request, arg):
return 0
def readinto(self, buf):
buf[:] = memoryview(_injected_buf)[self.off:self.off + len(buf)]
self.off += len(buf)
return len(buf)
mount = umount = chdir = lambda *args: None
def stat(self, path):
if path == '_injected.mpy':
return tuple(0 for _ in range(10))
else:
raise OSError(-2) # ENOENT
def open(self, path, mode):
return self.File()
uos.mount(_FS(), '/_')
uos.chdir('/_')
from _injected import *
uos.umount('/_')
del _injected_buf, _FS
"""
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description="Run scripts on the pyboard.")
cmd_parser.add_argument(
"-d",
"--device",
default=os.environ.get("PYBOARD_DEVICE", "/dev/ttyACM0"),
help="the serial device or the IP address of the pyboard",
)
cmd_parser.add_argument(
"-b",
"--baudrate",
default=os.environ.get("PYBOARD_BAUDRATE", "115200"),
help="the baud rate of the serial device",
)
cmd_parser.add_argument("-u", "--user", default="micro", help="the telnet login username")
cmd_parser.add_argument("-p", "--password", default="python", help="the telnet login password")
cmd_parser.add_argument("-c", "--command", help="program passed in as string")
cmd_parser.add_argument(
"-w",
"--wait",
default=0,
type=int,
help="seconds to wait for USB connected board to become available",
)
group = cmd_parser.add_mutually_exclusive_group()
group.add_argument(
"--soft-reset",
default=True,
action="store_true",
help="Whether to perform a soft reset when connecting to the board [default]",
)
group.add_argument(
"--no-soft-reset",
action="store_false",
dest="soft_reset",
)
group = cmd_parser.add_mutually_exclusive_group()
group.add_argument(
"--follow",
action="store_true",
default=None,
help="follow the output after running the scripts [default if no scripts given]",
)
group.add_argument(
"--no-follow",
action="store_false",
dest="follow",
)
group = cmd_parser.add_mutually_exclusive_group()
group.add_argument(
"--exclusive",
action="store_true",
default=True,
help="Open the serial device for exclusive access [default]",
)
group.add_argument(
"--no-exclusive",
action="store_false",
dest="exclusive",
)
cmd_parser.add_argument(
"-f",
"--filesystem",
action="store_true",
help="perform a filesystem action: "
"cp local :device | cp :device local | cat path | ls [path] | rm path | mkdir path | rmdir path",
)
cmd_parser.add_argument("files", nargs="*", help="input files")
args = cmd_parser.parse_args()
# open the connection to the pyboard
try:
pyb = Pyboard(
args.device, args.baudrate, args.user, args.password, args.wait, args.exclusive
)
except PyboardError as er:
print(er)
sys.exit(1)
# run any command or file(s)
if args.command is not None or args.filesystem or len(args.files):
# we must enter raw-REPL mode to execute commands
# this will do a soft-reset of the board
try:
pyb.enter_raw_repl(args.soft_reset)
except PyboardError as er:
print(er)
pyb.close()
sys.exit(1)
def execbuffer(buf):
try:
if args.follow is None or args.follow:
ret, ret_err = pyb.exec_raw(
buf, timeout=None, data_consumer=stdout_write_bytes
)
else:
pyb.exec_raw_no_follow(buf)
ret_err = None
except PyboardError as er:
print(er)
pyb.close()
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if ret_err:
pyb.exit_raw_repl()
pyb.close()
stdout_write_bytes(ret_err)
sys.exit(1)
# do filesystem commands, if given
if args.filesystem:
filesystem_command(pyb, args.files)
del args.files[:]
# run the command, if given
if args.command is not None:
execbuffer(args.command.encode("utf-8"))
# run any files
for filename in args.files:
with open(filename, "rb") as f:
pyfile = f.read()
if filename.endswith(".mpy") and pyfile[0] == ord("M"):
pyb.exec_("_injected_buf=" + repr(pyfile))
pyfile = _injected_import_hook_code
execbuffer(pyfile)
# exiting raw-REPL just drops to friendly-REPL mode
pyb.exit_raw_repl()
# if asked explicitly, or no files given, then follow the output
if args.follow or (args.command is None and not args.filesystem and len(args.files) == 0):
try:
ret, ret_err = pyb.follow(timeout=None, data_consumer=stdout_write_bytes)
except PyboardError as er:
print(er)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if ret_err:
pyb.close()
stdout_write_bytes(ret_err)
sys.exit(1)
# close the connection to the pyboard
pyb.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "d8b4234a863f0a9455c2e7f9b8eeada6",
"timestamp": "",
"source": "github",
"line_count": 758,
"max_line_length": 105,
"avg_line_length": 32.84696569920845,
"alnum_prop": 0.5410876375612499,
"repo_name": "adafruit/circuitpython",
"id": "a43a4d0574857cf0f2b2cecdaeb7e1fdb7186b29",
"size": "25220",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/pyboard.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10241"
},
{
"name": "C",
"bytes": "18450191"
},
{
"name": "C++",
"bytes": "476"
},
{
"name": "CMake",
"bytes": "18203"
},
{
"name": "CSS",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "10126"
},
{
"name": "JavaScript",
"bytes": "13854"
},
{
"name": "Jinja",
"bytes": "11034"
},
{
"name": "Makefile",
"bytes": "330832"
},
{
"name": "Python",
"bytes": "1423935"
},
{
"name": "Shell",
"bytes": "18681"
}
],
"symlink_target": ""
}
|
import lmfit
import numpy as np
from collections import OrderedDict
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from copy import deepcopy
class Scope_Trace_analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
x_ch_idx (int): specifies x channel default = 1
y_ch_idx (int): specifies y channel default = 2
edge_time (float) : time corresponding to rising edge
square_length (float) : duration of the square_pulse
shortest_timescale(float): timescale after rising edge and falling edge
up to which to ignore when calculating the deviation
longest_timescale(float): timescale after rising edge and falling edge
from which to ignore when calculating the deviation
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "x_ch_idx" and "y_ch_idx"
specified in the options dict. If x_ch_idx and y_ch_idx are the same
it will unzip the data.
"""
# Extracting the basic x and y values
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
x_ch_idx = self.options_dict.get('x_ch_idx', 0)
y_ch_idx = self.options_dict.get('y_ch_idx', 1)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][y_ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][y_ch_idx]
self.proc_data_dict['xlabel'] = self.raw_data_dict['value_names'][0][x_ch_idx]
self.proc_data_dict['xunit'] = self.raw_data_dict['value_units'][0][x_ch_idx]
self.proc_data_dict['xvals'] = list(self.raw_data_dict
['measured_values_ord_dict'].values())[x_ch_idx][0]
self.proc_data_dict['yvals'] = list(self.raw_data_dict
['measured_values_ord_dict'].values())[y_ch_idx][0]
# Detect the rising edge and shift the time axis
self.proc_data_dict['square_length'] = self.options_dict.get('square_length', 1e-6)
r_edge_idx = detect_threshold_crossing(
self.proc_data_dict['yvals'], 0.05)
edge_time = self.proc_data_dict['xvals'][r_edge_idx]
print(edge_time)
stop_time = edge_time + self.proc_data_dict['square_length']
r_edge_idx = np.argmin(abs(self.proc_data_dict['xvals'] - edge_time))
f_edge_idx = np.argmin(abs(self.proc_data_dict['xvals'] - stop_time))
self.proc_data_dict['tvals'] = self.proc_data_dict['xvals'] - edge_time
# Setting which part of the experiment to ignore when calculating difference
shortest_timescale = self.options_dict.get('shortest_timescale', 0)
sh_ign_idx = np.argmin(abs(self.proc_data_dict['xvals'] -
shortest_timescale))
longest_timescale = self.options_dict.get('longest_timescale', 40e-6)
lo_ign_idx = np.argmin(abs(self.proc_data_dict['xvals'] -
longest_timescale))
# Determine the mean amplitude of the square pulse
end_of_sq_idx = min(r_edge_idx+lo_ign_idx, f_edge_idx)
self.proc_data_dict['sq_amp'] = np.mean(
self.proc_data_dict['yvals'][r_edge_idx+sh_ign_idx:end_of_sq_idx])
self.proc_data_dict['background_amp'] = np.mean(
self.proc_data_dict['yvals'][:r_edge_idx])
# Determine the expected waveform while ignoring the
self.proc_data_dict['expected_wf'] = np.ones(
len(self.proc_data_dict['tvals']))*self.proc_data_dict['background_amp']
self.proc_data_dict['expected_wf'][r_edge_idx:f_edge_idx] = self.proc_data_dict['sq_amp']
diff_to_exp = self.proc_data_dict['yvals'] - self.proc_data_dict['expected_wf']
# parts in cost function to ignore
diff_to_exp[:r_edge_idx+sh_ign_idx] = 0 # part at short timescale
diff_to_exp[r_edge_idx+lo_ign_idx:f_edge_idx] = 0 # part in pulse for long timescale
diff_to_exp[f_edge_idx:] = 0 # part before the pulse
deviation = np.sqrt(np.sum((diff_to_exp)**2))/len(diff_to_exp)
self.proc_data_dict['diff_to_exp'] = diff_to_exp
self.proc_data_dict['deviation'] = deviation
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['tvals'],
'xlabel': self.proc_data_dict['xlabel'],
'xunit': self.proc_data_dict['xunit'],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'Scope Trace',
'marker':'',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['expected_wf'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['tvals'],
'xlabel': self.proc_data_dict['xlabel'],
'xunit': self.proc_data_dict['xunit'],
'yvals': self.proc_data_dict['expected_wf'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'Desired waveform',
'marker':'',
'do_legend': True,
'legend_pos': 'upper right'}
dev_msg = "Deviation from expected {:.4g}".format(
self.proc_data_dict['deviation'])
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.8,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': dev_msg}
self.plot_dicts['fine'] = deepcopy(self.plot_dicts['main'])
self.plot_dicts['fine']['ax_id'] = 'fine'
self.plot_dicts['fine']['yrange'] = (self.proc_data_dict['sq_amp']*.95,
self.proc_data_dict['sq_amp']*1.05)
self.plot_dicts['fine']['xrange'] = (
-0.05*self.proc_data_dict['square_length'],
1.05*self.proc_data_dict['square_length'])
self.plot_dicts['fine_exp'] = deepcopy(self.plot_dicts['expected_wf'])
self.plot_dicts['fine_exp']['ax_id'] = 'fine'
self.plot_dicts['plus_percent'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'fine',
'func': 'axhline',
'plot_kws': {'y': self.proc_data_dict['sq_amp']*1.01,
'ls':':', 'c':'grey', 'label':r'$\pm$ 1\%'}}
self.plot_dicts['minus_percent'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'fine',
'func': 'axhline',
'plot_kws': {'y': self.proc_data_dict['sq_amp']*0.99,
'ls':':', 'c':'grey'}}
self.plot_dicts['plus_tenth_percent'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'fine',
'func': 'axhline',
'plot_kws': {'y': self.proc_data_dict['sq_amp']*1.001,
'ls':'--', 'c':'grey', 'label':r'$\pm$ 0.1\%'}}
self.plot_dicts['minus_tenth_percent'] = {
'plotfn': self.plot_matplot_ax_method,
'ax_id': 'fine',
'func': 'axhline',
'plot_kws': {'y': self.proc_data_dict['sq_amp']*0.999,
'ls':'--', 'c':'grey'}}
self.plot_dicts['diff_to_exp'] = {
'plotfn': self.plot_line,
'ax_id': 'diff_to_exp',
'xvals': self.proc_data_dict['tvals'],
'xlabel': self.proc_data_dict['xlabel'],
'xunit': self.proc_data_dict['xunit'],
'yvals': self.proc_data_dict['diff_to_exp'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'Difference to desired waveform',
'marker':'',
'do_legend': True,
'legend_pos': 'upper right'}
def detect_threshold_crossing(signal, frac_of_max=0.10):
"""
Detects the first crossing of some threshold and returns the index
"""
th = signal > frac_of_max*np.max(signal)
# marks all but the first occurence of True to False
th[1:][th[:-1] & th[1:]] = False
return np.where(th)[0][0]
|
{
"content_hash": "8ac6e158b77e81b39a8a4c37e21b8f61",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 97,
"avg_line_length": 42.32017543859649,
"alnum_prop": 0.5513524717587315,
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"id": "2f801dd8ffc7bd2d571ce22e806fd6657a8d5e6e",
"size": "9649",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycqed/analysis_v2/distortions_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8748"
},
{
"name": "C++",
"bytes": "8802"
},
{
"name": "Cython",
"bytes": "8291"
},
{
"name": "OpenQASM",
"bytes": "15894"
},
{
"name": "Python",
"bytes": "7978715"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class OperationPropertiesFormatServiceSpecification(Model):
"""Specification of the service.
:param metric_specifications: Operation service specification.
:type metric_specifications:
list[~azure.mgmt.network.v2018_01_01.models.MetricSpecification]
:param log_specifications: Operation log specification.
:type log_specifications:
list[~azure.mgmt.network.v2018_01_01.models.LogSpecification]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
}
def __init__(self, *, metric_specifications=None, log_specifications=None, **kwargs) -> None:
super(OperationPropertiesFormatServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = metric_specifications
self.log_specifications = log_specifications
|
{
"content_hash": "9b9751d526cc0871cc2f2f7594b988ce",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 98,
"avg_line_length": 43.04347826086956,
"alnum_prop": 0.7191919191919192,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "aa8bf97f45644c0ab2febdda2234a65f006fd7a5",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/operation_properties_format_service_specification_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Common functionality for the application object model
The object model must be initialized at service start via
solum.objects.load()
and all objects should be retrieved via
solum.objects.registry.<class>
in application code.
"""
from oslo.config import cfg
from solum.objects import registry
from solum.openstack.common.db import api # noqa
from solum.openstack.common import importutils
db_opts = [
cfg.StrOpt('schema_mode',
default='new',
help="The version of the schema that should be "
"running: 'old', 'transition', 'new'")
]
CONF = cfg.CONF
CONF.register_opts(db_opts, "database")
_BACKEND_MAPPING = {'sqlalchemy': 'solum.objects.sqlalchemy'}
def transition_schema():
"""Is the new schema in write-only mode."""
return cfg.CONF.database.schema_mode == 'transition'
def new_schema():
"""Should objects be writing to the new schema."""
return cfg.CONF.database.schema_mode != 'old'
def load():
"""Ensure that the object model is initialized."""
registry.clear()
backend_name = CONF.database.backend
backend_path = _BACKEND_MAPPING.get(backend_name, backend_name)
backend_mod = importutils.import_module(backend_path)
backend_mod.load()
registry = registry.Registry()
|
{
"content_hash": "75681da5bd5058ed0fea9da7af1b5eab",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 67,
"avg_line_length": 24.37735849056604,
"alnum_prop": 0.6896284829721362,
"repo_name": "gilbertpilz/solum",
"id": "f9ff06099ccfa39023ee1edb62658c1390160d7f",
"size": "1872",
"binary": false,
"copies": "7",
"ref": "refs/heads/camp/item-1",
"path": "solum/objects/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "75"
},
{
"name": "Python",
"bytes": "888136"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "37758"
}
],
"symlink_target": ""
}
|
"""An example of persistence for a directed graph structure. The
graph is stored as a collection of edges, each referencing both a
"lower" and an "upper" node in a table of nodes. Basic persistence
and querying for lower- and upper- neighbors are illustrated::
n2 = Node(2)
n5 = Node(5)
n2.add_neighbor(n5)
print n2.higher_neighbors()
"""
|
{
"content_hash": "b5b66c9664f4e0faac539f2999718791",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 32.63636363636363,
"alnum_prop": 0.7103064066852368,
"repo_name": "rclmenezes/sqlalchemy",
"id": "629808abe91ffede9e5354bc74da686701357f99",
"size": "359",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/graphs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38103"
},
{
"name": "CSS",
"bytes": "7760"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Makefile",
"bytes": "7072"
},
{
"name": "Python",
"bytes": "7243712"
},
{
"name": "TeX",
"bytes": "13927"
}
],
"symlink_target": ""
}
|
"""Implementation of the convolutional neural net."""
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, TensorBoard
import os
import numpy as np
from config import MODELS_DIR, TENSORBOARD_DIR
from io_util import save_makedirs, save_model
def train_model(model,
features,
labels,
tile_size,
model_id,
nb_epoch=10,
checkpoints=False,
tensorboard=False):
"""Train a model with the given features and labels."""
# The features and labels are a list of triples when passed
# to the function. Each triple contains the tile and information
# about its source image and its postion in the source. To train
# the model we extract just the tiles.
X, y = get_matrix_form(features, labels, tile_size)
X = normalise_input(X)
# Directory which is used to store the model and its weights.
model_dir = os.path.join(MODELS_DIR, model_id)
checkpointer = None
if checkpoints:
checkpoints_file = os.path.join(model_dir, "weights.hdf5")
checkpointer = ModelCheckpoint(checkpoints_file)
tensorboarder = None
if tensorboard:
log_dir = os.path.join(TENSORBOARD_DIR, model_id)
tensorboarder = TensorBoard(log_dir=log_dir)
callbacks = [c for c in [checkpointer, tensorboarder] if c]
print("Start training.")
model.fit(X, y, nb_epoch=nb_epoch, callbacks=callbacks, validation_split=0.1)
save_model(model, model_dir)
return model
def init_model(tile_size,
model_id,
architecture='one_layer',
nb_filters_1=64,
filter_size_1=12,
stride_1=(4, 4),
pool_size_1=(3, 3),
nb_filters_2=128,
filter_size_2=4,
stride_2=(1, 1),
learning_rate=0.005,
momentum=0.9,
decay=0.002):
"""Initialise a new model with the given hyperparameters and save it for later use."""
num_channels = 3
model = Sequential()
if architecture == 'one_layer':
model.add(
Convolution2D(
nb_filters_1,
filter_size_1,
filter_size_1,
subsample=stride_1,
input_shape=(tile_size, tile_size, num_channels)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_1))
model.add(Flatten())
model.add(Dense(tile_size * tile_size))
model.add(Activation('sigmoid'))
elif architecture == 'two_layer':
model.add(
Convolution2D(
nb_filters_1,
filter_size_1,
filter_size_1,
subsample=stride_1,
input_shape=(tile_size, tile_size, num_channels)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_1))
model.add(
Convolution2D(
nb_filters_2,
filter_size_2,
filter_size_2,
subsample=stride_2))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(tile_size * tile_size))
model.add(Activation('sigmoid'))
model = compile_model(model, learning_rate, momentum, decay)
# Print a summary of the model to the console.
model.summary()
model_dir = os.path.join(MODELS_DIR, model_id)
save_makedirs(model_dir)
save_model(model, model_dir)
return model
def compile_model(model, learning_rate, momentum, decay):
"""Compile the keras model with the given hyperparameters."""
optimizer = SGD(lr=learning_rate, momentum=momentum, decay=decay)
model.compile(
loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def normalise_input(features):
"""Normalise the features such that all values are in the range [0,1]."""
features = features.astype(np.float32)
return np.multiply(features, 1.0 / 255.0)
def get_matrix_form(features, labels, tile_size):
"""Transform a list of triples of features and labels. To a matrix which contains
only the tiles used for training the model."""
features = [tile for tile, position, path in features]
labels = [tile for tile, position, path in labels]
# The model will have one output corresponding to each pixel in the feature tile.
# So we need to transform the labels which are given as a 2D bitmap into a vector.
labels = np.reshape(labels, (len(labels), tile_size * tile_size))
return np.array(features), np.array(labels)
|
{
"content_hash": "d4e2a7abc25ff67c916145b4cb00c8c6",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 90,
"avg_line_length": 32.53691275167785,
"alnum_prop": 0.614480198019802,
"repo_name": "treigerm/WaterNet",
"id": "2f8e59fdb6e497ac7fc7924bcbd05608180fbb5a",
"size": "4848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waterNet/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37654"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
ROLE_DUPLICATES_QUERY = """
SELECT id FROM (
SELECT
id, ROW_NUMBER() OVER (
PARTITION BY github_user, github_repo
ORDER BY modified DESC) AS rnum
FROM main_role) temp
WHERE temp.rnum > 1;
"""
def drop_role_duplicates(apps, schema_editor):
db_alias = schema_editor.connection.alias
Role = apps.get_model('main', 'Role')
# NOTE(cutwater): All on_delete constraints in Django are software
# defined, so we have to first query ids for deletion and then delete
# Role objects with ORM.
roles = Role.objects.using(db_alias).raw(ROLE_DUPLICATES_QUERY)
for role in (
Role.objects.using(db_alias)
.filter(pk__in=(r.id for r in roles))):
# NOTE(cutwater): When calling .delete() on QuerySet, Django ORM
# it seems that on_delete is not executed, so we have to execute
# .delete() on each object specifically.
role.delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0055_contentblock'),
]
operations = [
# NOTE(cutwater): Since Django creates all constraints as DEFERRED,
# we need to set them to IMMEDIATE to perform DDL and DML queries
# in one single transaction.
migrations.RunSQL('SET CONSTRAINTS ALL IMMEDIATE',
reverse_sql=migrations.RunSQL.noop),
migrations.RunPython(drop_role_duplicates,
reverse_code=migrations.RunPython.noop),
migrations.AlterUniqueTogether(
name='role',
unique_together={
('namespace', 'name'),
('github_user', 'github_repo')
},
),
migrations.RunSQL(sql=migrations.RunSQL.noop,
reverse_sql='SET CONSTRAINTS ALL IMMEDIATE'),
]
|
{
"content_hash": "945e4ae574506a31206394616c985be2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 33.785714285714285,
"alnum_prop": 0.6152219873150105,
"repo_name": "chouseknecht/galaxy",
"id": "5501a36fc1e0ac2e21dbd40adc6a90d317dfdd23",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "galaxy/main/migrations/0056_role_unique_repos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106646"
},
{
"name": "HTML",
"bytes": "180778"
},
{
"name": "JavaScript",
"bytes": "555658"
},
{
"name": "Makefile",
"bytes": "5862"
},
{
"name": "Python",
"bytes": "526433"
},
{
"name": "Shell",
"bytes": "3147"
},
{
"name": "VCL",
"bytes": "1235"
}
],
"symlink_target": ""
}
|
INGRESS_DIR = 'ingress'
EGRESS_DIR = 'egress'
STATUS_BUILDING = 'building'
STATUS_ACTIVE = 'active'
STATUS_ERROR = 'error'
STATUS_DELETING = 'deleting'
SERVICE_CHAIN = 'servicechain'
SERVICECHAIN_TOPIC = 'q-servicechain-plugin'
SERVICECHAIN_AGENT_TOPIC = 'q-servicechain-agent'
AGENT_TYPE_SERVICECHAIN = 'Service chain agent'
PORTFLOW_OPT_ADD = 'add-flows'
PROTFLOW_OPT_DELETE = 'delete-flows'
PROTFLOW_OPT_UPDATE = 'update-flows'
MAX_HASH = 16
|
{
"content_hash": "7a112972224f6a293b31f9d01e3908b3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 23.68421052631579,
"alnum_prop": 0.7444444444444445,
"repo_name": "nash-x/hws",
"id": "cc7fc006107fa9f41fddc5a5bd46eba7e8c5c99b",
"size": "1125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/services/servicechain/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "PLpgSQL",
"bytes": "12782"
},
{
"name": "Python",
"bytes": "20443623"
},
{
"name": "Shell",
"bytes": "4643"
}
],
"symlink_target": ""
}
|
import re
import threading
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed, ImproperlyConfigured
from django.core.urlresolvers import set_urlconf
from .utils import from_dotted_path
_thread_local = threading.local()
class SubdomainMiddleware(object):
"""
Adjust incoming request's urlconf based on `settings.SUBDOMAINS`.
Overview
========
This middleware routes requests to specific subdomains to different URL
schemes ("urlconf").
For example, if you own ``example.com`` but want to serve specific content
at ``api.example.com` and ``beta.example.com``, add the following to your
``settings.py``:
from dynamic_subdomains.defaults import patterns, subdomain
SUBDOMAINS = patterns(
subdomain('api', 'path.to.api.urls', name='api'),
subdomain('beta', 'path.to.beta.urls', name='beta'),
)
This causes requests to ``{api,beta}.example.com`` to be routed to their
corresponding urlconf. You can use your ``urls.py`` as a template for these
urlconfs.
Patterns are evaluated in order. If no pattern matches, the request is
processed in the usual way, ie. using ``settings.ROOT_URLCONF``.
Pattern format
==============
The patterns on the left-hand side are regular expressions. For example,
the following ``settings.SUBDOMAINS`` will route ``foo.example.com`` and
``bar.example.com`` to the same urlconf.
SUBDOMAINS = patterns(
subdomain(r'(foo|bar)', 'path.to.urls', name='foo-or-bar'),
)
.. note:
* Patterns are matched against the extreme left of the requested host
* It is implied that all patterns end either with a literal full stop
(ie. ".") or an end of line metacharacter.
* As with all regular expressions, various metacharacters need quoting.
Dynamic subdomains using regular expressions
============================================
Patterns being regular expressions allows setups to feature dynamic (or
"wildcard") subdomain schemes:
SUBDOMAINS = patterns(
subdomain('www', ROOT_URLCONF, name='static'),
subdomain('\w+', 'path.to.custom_urls', name='wildcard'),
)
Here, requests to ``www.example.com`` will be routed as normal but a
request to ``lamby.example.com`` is routed to ``path.to.custom_urls``.
As patterns are matched in order, we placed ``www`` first as it otherwise
would have matched against ``\w+`` and thus routed to the wrong
destination.
Alternatively, we could have used negative lookahead:
SUBDOMAINS = patterns(
subdomain('(?!www)\w+', 'path.to.custom_urls', name='wildcard'),
)
Callback methods to simplify dynamic subdomains
===============================================
The previous section outlined using regular expressions to implement
dynamic subdomains.
However, inside every view referenced by the target urlconf we would have
to parse the subdomain from ``request.get_host()`` and lookup its
corresponding object instance, violating DRY. If these dynamic subdomains
had a lot of views this would become particularly unwieldy.
To remedy this, you can optionally specify a callback method to be called
if your subdomain matches:
SUBDOMAINS = patterns(
subdomain('www', ROOT_URLCONF, name='static'),
subdomain('(?P<username>\w+)', 'path.to.custom_urls',
callback='path.to.custom_fn', name='with-callback'),
)
[..]
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
def custom_fn(request, username):
request.viewing_user = get_object_or_404(User, username=username)
This example avoids the duplicated work in every view by attaching a
``viewing_user`` instance to the request object. Views referenced by the
"dynamic" urlconf can now assume that this object exists.
The custom method is called with the ``request`` object and any named
captured arguments, similar to regular Django url processing.
Callbacks may return either ``None`` or an ``HttpResponse`` object. If it
returns ``None``, the request continues to be processed and the appropriate
view is eventually called. If a callback returns an ``HttpResponse``
object, that ``HttpResponse`` is returned to the client without any further
processing.
.. note:
Callbacks are executed with the urlconf set to the second argument in
the ``SUBDOMAINS`` list. For example, in the example above, the
callback will be executed with the urlconf as ``path.to.custom_urls``
and not the default urlconf.
This can cause problems when reversing URLs within your callback as
they may not be "visible" to ``django.core.urlresolvers.reverse`` as
they are specified in (eg.) the default urlconf.
To remedy this, specify the ``urlconf`` parameter when calling
``reverse``.
Notes
=====
* When using dynamic subdomains based on user input, ensure users cannot
specify names that conflict with static subdomains such as "www" or
their subdomain will not be accessible.
* Don't forget to add ``handler404`` and ``handler500`` entries for your
custom urlconfs.
"""
def __init__(self):
try:
settings.SUBDOMAINS
except AttributeError:
raise ImproperlyConfigured("Missing settings.SUBDOMAINS setting")
try:
self.default = settings.SUBDOMAINS[settings.SUBDOMAIN_DEFAULT]
except AttributeError:
raise ImproperlyConfigured(
"Missing settings.SUBDOMAIN_DEFAULT setting"
)
except KeyError:
raise ImproperlyConfigured(
"settings.SUBDOMAIN_DEFAULT does not point to a valid domain"
)
if not settings.SUBDOMAINS:
raise MiddlewareNotUsed()
# Compile subdomains. We add a literal fullstop to the end of every
# pattern to avoid rather unwieldy escaping in every definition.
for subdomain in settings.SUBDOMAINS.values():
callback = subdomain.get('callback', lambda *args, **kwargs: None)
if isinstance(callback, (basestring,)):
callback = from_dotted_path(callback)
subdomain['_regex'] = re.compile(r'%s(\.|$)' % subdomain['regex'])
subdomain['_callback'] = callback
def process_request(self, request):
if not getattr(_thread_local, 'enabled', True):
return
host = request.get_host()
# Find best match, falling back to settings.SUBDOMAIN_DEFAULT
for subdomain in settings.SUBDOMAINS.values():
match = subdomain['_regex'].match(host)
if match:
kwargs = match.groupdict()
break
else:
kwargs = {}
subdomain = self.default
urlconf = subdomain['urlconf']
callback = subdomain['_callback']
request.urlconf = urlconf
try:
set_urlconf(urlconf)
return callback(request, **kwargs)
finally:
set_urlconf(None)
|
{
"content_hash": "091dab1e8e5e764086ba033044592c77",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 36.05392156862745,
"alnum_prop": 0.6420122365737594,
"repo_name": "playfire/django-dynamic-subdomains",
"id": "974a754ac97f8b88c6ae050c6f99bed50e3670db",
"size": "7355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_subdomains/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17477"
}
],
"symlink_target": ""
}
|
"""
Django settings for jobvisualization project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2ivu!k(bf(+$e_9#75-a+@(#z#*r8u(bqd%k^%d8@z==r_ae@*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.data_storage',
'apps.mapvisuals',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'jobvisualization.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jobvisualization.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Parse database configuration from $DATABASE_URL
# DATABASES['default'] = dj_database_url.config()
# # Enable Connection Pooling (if desired)
# DATABASES['default']['ENGINE'] = 'django_postgrespool'
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
{
"content_hash": "8e9899b49458bbfccf53219ef6e8f9c2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 74,
"avg_line_length": 27.40495867768595,
"alnum_prop": 0.6975271411338962,
"repo_name": "nmccrory/job-visualization",
"id": "61eeb09a909f06509380f195d42bf2a0e8d7cff6",
"size": "3316",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "jobvisualization/jobvisualization/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6499"
},
{
"name": "Python",
"bytes": "13148"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class MacAddress(A10BaseClass):
""" :param static_list: {"minItems": 1, "items": {"type": "static"}, "uniqueItems": true, "array": [{"required": ["mac", "vlan"], "properties": {"dest": {"default": 0, "optional": true, "type": "number", "description": "Trap MAC with this DA to CPU", "format": "flag"}, "mac": {"optional": false, "type": "string", "description": "Configure a Static MAC address", "format": "mac-address"}, "vlan": {"description": "VLAN Id", "format": "number", "optional": false, "maximum": 4094, "minimum": 2, "type": "number", "$ref": "/axapi/v3/network/vlan"}, "port": {"optional": true, "type": "number", "description": "Ethernet Port on which the Address is applicable (Port Value (Defualt VLAN is 1))", "format": "interface"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/network/mac-address/static/{mac}+{vlan}"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Configure a MAC address.
Class mac-address supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/network/mac-address`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "mac-address"
self.a10_url="/axapi/v3/network/mac-address"
self.DeviceProxy = ""
self.static_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "4046b67ce8906dcdbc3c3aa5d408388b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 965,
"avg_line_length": 51.68571428571428,
"alnum_prop": 0.6301824212271974,
"repo_name": "amwelch/a10sdk-python",
"id": "a35f80524bb17fcfa3b62d8797a4c76e6d70095f",
"size": "1809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/network/network_mac_address.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
import smbus
from time import *
class i2c_device:
def __init__(self, addr, port=1):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd)
|
{
"content_hash": "bf6eb3d12e90d7f25cef89dd229d462c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 53,
"avg_line_length": 24.676470588235293,
"alnum_prop": 0.6436233611442194,
"repo_name": "jaschawilcox/raspi-lockout",
"id": "95fff2fa70b550b7edfbb5a8f72de276f011dd56",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i2c_lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19508"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
orm.Project.objects.filter(project_type='pebblejs').update(sdk_version='3')
def backwards(self, orm):
orm.Project.objects.filter(project_type='pebblejs').update(sdk_version='2')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['ide.Project']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'6c855f5d-99f5-436c-93cf-9282a2c455be'", 'max_length': '36'})
},
'ide.buildsize': {
'Meta': {'object_name': 'BuildSize'},
'binary_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sizes'", 'to': "orm['ide.BuildResult']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'resource_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'worker_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.project': {
'Meta': {'object_name': 'Project'},
'app_capabilities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_company_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_is_watchface': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_jshint': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'app_keys': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'app_long_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_platforms': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_uuid': ('django.db.models.fields.CharField', [], {'default': "'78a89854-17e3-41e4-ab00-72b61dd516b0'", 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'app_version_label': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_branch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'github_hook_build': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_hook_uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'github_last_commit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'optimisation': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'project_type': ('django.db.models.fields.CharField', [], {'default': "'native'", 'max_length': '10'}),
'sdk_version': ('django.db.models.fields.CharField', [], {'default': "'2'", 'max_length': '6'})
},
'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu_icon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['ide.Project']"})
},
'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'compatibility': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': "orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tracking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.resourcevariant': {
'Meta': {'unique_together': "(('resource_file', 'variant'),)", 'object_name': 'ResourceVariant'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_legacy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variants'", 'to': "orm['ide.ResourceFile']"}),
'variant': ('django.db.models.fields.IntegerField', [], {})
},
'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': "orm['ide.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'default': "'app'", 'max_length': '10'})
},
'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': ['ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'ide.usergithub': {
'Meta': {'object_name': 'UserGithub'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'github'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'ide.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autocomplete': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'keybinds': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'tab_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'cloudpebble'", 'max_length': '50'}),
'use_spaces': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'whats_new': ('django.db.models.fields.PositiveIntegerField', [], {'default': '15'})
}
}
complete_apps = ['ide']
symmetrical = True
|
{
"content_hash": "8f645566249a5297feac495aabcb5955",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 195,
"avg_line_length": 82.63924050632912,
"alnum_prop": 0.552806923489316,
"repo_name": "pebble/cloudpebble",
"id": "7ffe3e1d9e0a3ea502d02600af8bd3905c3f177b",
"size": "13081",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ide/migrations/0033_migrate_pebblejs_to_sdk3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4664"
},
{
"name": "CSS",
"bytes": "70652"
},
{
"name": "HTML",
"bytes": "122226"
},
{
"name": "JavaScript",
"bytes": "508689"
},
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "Python",
"bytes": "950740"
},
{
"name": "Shell",
"bytes": "7895"
}
],
"symlink_target": ""
}
|
class FDSObjectSummary(object):
"""
The FDS Object Summary class.
"""
def __init__(self):
self.bucket_name = None
self.object_name = None
self.owner = None
self.size = None
|
{
"content_hash": "c3ff4a421a8ae8cf32c12e88973dcaf1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 31,
"avg_line_length": 19.8,
"alnum_prop": 0.6212121212121212,
"repo_name": "XiaoMi/galaxy-fds-sdk-python",
"id": "158f0ec7b02ace82e04ec11f02dacf040acaa064",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fds/model/fds_object_summary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "215501"
}
],
"symlink_target": ""
}
|
import unittest
import array
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from Crypto import Random
class SimpleAESException(Exception):
pass
class PaddingError(SimpleAESException):
pass
class SimpleAES(object):
'''
AES CBC PKCS#7
Note that arguments `salt` and `c` are used exclusively for key
expansion with the PBKDF2 function. Setting `salt` but leaving c=0
raises an exception as salt will have no effect unless c > 0.
'''
KEY_SIZE = 32
def __init__(self, key, iv=None, salt=None, c=0):
self._key = key
self._iv = iv
self._salt = salt
self._c = c
def _new_cipher(self, **kw):
iv = kw.get('iv', self._iv or SimpleAES.new_iv())
salt = kw.get('salt', self._salt)
c = kw.get('c', self._c)
key = self._derive_key(self._key, salt, c)
cipher = AES.new(key, AES.MODE_CBC, iv)
return cipher
@classmethod
def new_salt(cls):
return Random.new().read(cls.KEY_SIZE)
@classmethod
def new_iv(cls):
return Random.new().read(AES.block_size)
def _derive_key(self, inkey, salt, c):
if not salt and not c:
return inkey
elif (salt and not c) or (c and not salt):
errmsg = 'salt requires c > 0 and vice versa'
raise SimpleAESException(errmsg)
key = PBKDF2(inkey, salt, self.KEY_SIZE, self._c)
return key
def _pkcs7_encode(self, data):
_pcount = 16 - (len(data) % 16)
pkcs7 = data + chr(_pcount) * _pcount
return pkcs7
def _pkcs7_decode(self, pkcs7):
try:
assert len(pkcs7)
assert len(pkcs7) % 16 == 0
p = ord(pkcs7[-1])
assert 1 <= p <= 16
prange = pkcs7[-p:]
assert prange == chr(p) * p
except AssertionError:
raise PaddingError
data = pkcs7[:-p]
return data
def _encrypt(self, plaintext, **kw):
pkcs7 = self._pkcs7_encode(plaintext)
cipher = self._new_cipher(**kw)
ciphertext = cipher.encrypt(pkcs7)
return ciphertext
def _decrypt(self, ciphertext, **kw):
cipher = self._new_cipher(**kw)
pkcs7 = cipher.decrypt(ciphertext)
plaintext = self._pkcs7_decode(pkcs7)
return plaintext
def _tostring(self, val):
if isinstance(val, array.array):
val = val.tostring()
elif type(val) is str:
pass
else:
errmsg = 'Only arrays and strings are accepted (not {0}).'
errmsg = errmsg.format(type(val))
raise SimpleAESException(errmsg)
return val
def encrypt(self, plaintext, **kw):
plaintext = self._tostring(plaintext)
return self._encrypt(plaintext, **kw)
def decrypt(self, ciphertext, **kw):
ciphertext = self._tostring(ciphertext)
return self._decrypt(ciphertext, **kw)
def raises(ex, fn, *args, **kw):
try:
fn(*args, **kw)
except ex:
return True
return False
class Test_SimpleAES(unittest.TestCase):
def test_test(self):
assert 1 == 1
def test_vectors_simple(self):
from operator import itemgetter
from aes_cbc_pkcs7_testdata import vectors
for v in vectors:
key, iv, pt = itemgetter('key', 'IV', 'plaintext')(v)
aes = SimpleAES(key=key, iv=iv)
ct = aes.encrypt(pt)
assert ct == v['ciphertext']
pt = aes.decrypt(ct)
assert pt == v['plaintext']
def test_vectors_with_pbkdf2(self):
from operator import itemgetter
from aes_cbc_pkcs7_testdata import vectors
for c in (3, 11, 29, 107, 383):
for v in vectors:
key, iv, pt = itemgetter('key', 'IV', 'plaintext')(v)
salt = SimpleAES.new_salt()
aes = SimpleAES(key=key, iv=iv, salt=salt, c=c)
ct = aes.encrypt(pt)
assert ct != v['ciphertext']
pt = aes.decrypt(ct)
assert pt == v['plaintext']
def test_cipher_padding_errors(self):
from operator import itemgetter
from aes_cbc_pkcs7_testdata import vectors
for c in xrange(1, 32):
for v in vectors:
key, iv, ct = itemgetter('key', 'IV', 'ciphertext')(v)
salt = SimpleAES.new_salt()
aes = SimpleAES(key=key, iv=iv, salt=salt, c=c)
ct = chr((ord(ct[0]) + 1) % 256) + ct[1:]
try:
pt = aes.decrypt(ct)
except PaddingError:
pass
else:
assert pt != v['plaintext']
if 0:
print len(pt), pt.encode('hex')
print len(v['plaintext']), v['plaintext'].encode('hex')
print '------'
def _test():
import doctest
import unittest
unittest.main()
print(doctest.testmod())
if __name__ == "__main__":
_test()
|
{
"content_hash": "d4968ef307e4d9db0410585259c905cf",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 29.436781609195403,
"alnum_prop": 0.5355329949238579,
"repo_name": "kristerhedfors/paddingoracle",
"id": "3a732f6426f8c49e94fc90cdea5dae98558ff56d",
"size": "5208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpleaes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "14189"
}
],
"symlink_target": ""
}
|
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/opt/opscode/embedded'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.7.7",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
|
{
"content_hash": "3de4e81409285e9ff93bf01e182b9f81",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 78,
"avg_line_length": 27.827731092436974,
"alnum_prop": 0.619507775932357,
"repo_name": "jtimberman/omnibus",
"id": "085e863be4b1f6fd910bb76c420daf2409aa6442",
"size": "6696",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/libxml2-2.7.7/python/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: group
author: Stephen Fromm
version_added: "0.0.2"
short_description: Add or remove groups
requirements: [ groupadd, groupdel, groupmod ]
description:
- Manage presence of groups on a host.
options:
name:
required: true
description:
- Name of the group to manage.
gid:
required: false
description:
- Optional I(GID) to set for the group.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group should be present or not on the remote host.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If I(yes), indicates that the group created is a system group.
'''
EXAMPLES = '''
# Example group command from Ansible Playbooks
- group: name=somegroup state=present
'''
import grp
import syslog
import platform
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Group, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.syslogging = False
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id='+kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id='+kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac OS X Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manupulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'create' ]
if self.gid is not None:
cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'delete' ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self, gid=None):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'edit' ]
if gid is not None:
cmd += [ '-i', gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
gid=dict(default=None, type='str'),
system=dict(default=False, type='bool'),
),
supports_check_mode=True
)
group = Group(module)
if group.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
{
"content_hash": "33f1dd74b9419934d22ce4cb68b12bf2",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 105,
"avg_line_length": 29.376744186046512,
"alnum_prop": 0.536811272957568,
"repo_name": "mith1979/ansible_automation",
"id": "51254f4ef76aa3ca3ea1f1c0e0522f0cf3a55ae0",
"size": "13375",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/system/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import logging
try:
import psycopg2 as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
from librdbms.server.rdbms_base_lib import BaseRDBMSDataTable, BaseRDBMSResult, BaseRDMSClient
LOG = logging.getLogger(__name__)
class DataTable(BaseRDBMSDataTable): pass
class Result(BaseRDBMSResult): pass
class PostgreSQLClient(BaseRDMSClient):
"""Same API as Beeswax"""
data_table_cls = DataTable
result_cls = Result
def __init__(self, *args, **kwargs):
super(PostgreSQLClient, self).__init__(*args, **kwargs)
self.connection = Database.connect(**self._conn_params)
@property
def _conn_params(self):
params = {
'user': self.query_server['username'],
'password': self.query_server['password'],
'host': self.query_server['server_host'],
'port': self.query_server['server_port'] == 0 and 5432 or self.query_server['server_port'],
'database': self.query_server['name']
}
if self.query_server['options']:
params.update(self.query_server['options'])
# handle transaction commits manually.
if 'autocommit' in params:
del params['autocommit']
return params
def use(self, database):
# No op since postgresql requires a new connection per database
pass
def execute_statement(self, statement):
cursor = self.connection.cursor()
cursor.execute(statement)
self.connection.commit()
if cursor.description:
columns = [column[0] for column in cursor.description]
else:
columns = []
return self.data_table_cls(cursor, columns)
def get_databases(self):
# List all the schemas in the database
try:
cursor = self.connection.cursor()
cursor.execute('SELECT nspname from pg_catalog.pg_namespace')
self.connection.commit()
return [row[0] for row in cursor.fetchall()]
except Exception:
LOG.exception('Failed to select nspname from pg_catalog.pg_namespace')
return [self._conn_params['database']]
def get_tables(self, database, table_names=[]):
cursor = self.connection.cursor()
query = "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '%s'" % database
if table_names:
clause = ' OR '.join(["tablename LIKE '%%%(table)s%%'" % {'table': table} for table in table_names])
query += ' AND (%s)' % clause
cursor.execute(query)
self.connection.commit()
return [row[0] for row in cursor.fetchall()]
def get_columns(self, database, table, names_only=True):
cursor = self.connection.cursor()
query = """
SELECT
a.attname as "name",
pg_catalog.format_type(a.atttypid, a.atttypmod) as "datatype"
FROM
pg_catalog.pg_attribute a
WHERE
a.attnum > 0
AND NOT a.attisdropped
AND a.attrelid = (
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname ~ '^(%(table)s)$'
AND n.nspname = '%(database)s'
AND pg_catalog.pg_table_is_visible(c.oid)
)
""" % {'table': table, 'database': database}
cursor.execute(query)
self.connection.commit()
if names_only:
columns = [row[0] for row in cursor.fetchall()]
else:
columns = [dict(name=row[0], type=row[1], comment='') for row in cursor.fetchall()]
return columns
def get_sample_data(self, database, table, column=None, limit=100):
column = '"%s"' % column if column else '*'
statement = 'SELECT %s FROM "%s"."%s" LIMIT %d' % (column, database, table, limit)
return self.execute_statement(statement)
|
{
"content_hash": "c93a806d133ef3d27834c5826020b404",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 106,
"avg_line_length": 31.008196721311474,
"alnum_prop": 0.6402326196140629,
"repo_name": "jayceyxc/hue",
"id": "ec9a29b919bb1acfb1e66b91c47af4970c602b9c",
"size": "4575",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "desktop/libs/librdbms/src/librdbms/server/postgresql_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2716690"
},
{
"name": "C++",
"bytes": "200268"
},
{
"name": "CSS",
"bytes": "630891"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23982883"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5068327"
},
{
"name": "Lex",
"bytes": "36239"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146292"
},
{
"name": "Mako",
"bytes": "3334641"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45608023"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46700"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "517693"
},
{
"name": "Yacc",
"bytes": "381310"
}
],
"symlink_target": ""
}
|
from silopub import util
from silopub.ext import db
from silopub.models import Account, Tumblr
from flask import Blueprint, current_app, redirect, url_for, request, flash
from flask import session
from requests_oauthlib import OAuth1Session, OAuth1
import html
import requests
import sys
REQUEST_TOKEN_URL = 'https://www.tumblr.com/oauth/request_token'
AUTHORIZE_URL = 'https://www.tumblr.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://www.tumblr.com/oauth/access_token'
USER_INFO_URL = 'https://api.tumblr.com/v2/user/info'
CREATE_POST_URL = 'https://api.tumblr.com/v2/blog/{}/post'
FETCH_POST_URL = 'https://api.tumblr.com/v2/blog/{}/posts'
SERVICE_NAME = 'tumblr'
tumblr = Blueprint('tumblr', __name__)
@tumblr.route('/tumblr/authorize', methods=['POST'])
def authorize():
try:
callback_uri = url_for('.callback', _external=True)
return redirect(get_authorize_url(callback_uri))
except:
current_app.logger.exception('Starting Tumblr authorization')
flash(html.escape(str(sys.exc_info()[0])), 'danger')
return redirect(url_for('views.index'))
def get_authorize_url(callback_uri, **kwargs):
oauth = OAuth1Session(
client_key=current_app.config['TUMBLR_CLIENT_KEY'],
client_secret=current_app.config['TUMBLR_CLIENT_SECRET'],
callback_uri=callback_uri)
r = oauth.fetch_request_token(REQUEST_TOKEN_URL)
session['oauth_token_secret'] = r.get('oauth_token_secret')
return oauth.authorization_url(AUTHORIZE_URL)
@tumblr.route('/tumblr/callback')
def callback():
try:
callback_uri = url_for('.callback', _external=True)
result = process_callback(callback_uri)
if 'error' in result:
flash(result['error'], category='danger')
return redirect(url_for('views.index'))
account = result['account']
flash('Authorized {}: {}'.format(account.username, ', '.join(
s.domain for s in account.sites)))
return redirect(url_for('views.setup_account', service=SERVICE_NAME,
user_id=account.user_id))
except:
current_app.logger.exception('During Tumblr authorization callback')
flash(html.escape(str(sys.exc_info()[0])), 'danger')
return redirect(url_for('views.index'))
def process_callback(callback_uri):
verifier = request.args.get('oauth_verifier')
request_token = request.args.get('oauth_token')
if not verifier or not request_token:
# user declined
return {'error': 'Tumblr authorization declined'}
request_token_secret = session.get('oauth_token_secret')
oauth = OAuth1Session(
client_key=current_app.config['TUMBLR_CLIENT_KEY'],
client_secret=current_app.config['TUMBLR_CLIENT_SECRET'],
resource_owner_key=request_token,
resource_owner_secret=request_token_secret)
oauth.parse_authorization_response(request.url)
# get the access token and secret
r = oauth.fetch_access_token(ACCESS_TOKEN_URL)
token = r.get('oauth_token')
secret = r.get('oauth_token_secret')
info_resp = oauth.get(USER_INFO_URL).json()
user_info = info_resp.get('response', {}).get('user')
user_id = username = user_info.get('name')
account = Account.query.filter_by(
service='tumblr', user_id=user_id).first()
if not account:
account = Account(service='tumblr', user_id=user_id)
db.session.add(account)
account.username = username
account.user_info = user_info
account.token = token
account.token_secret = secret
sites = []
for blog in user_info.get('blogs', []):
sites.append(Tumblr(
url=blog.get('url'),
domain=util.domain_for_url(blog.get('url')),
site_id=blog.get('name'),
site_info=blog))
account.update_sites(sites)
db.session.commit()
util.set_authed(account.sites)
return {'account': account}
def publish(site):
auth = OAuth1(
client_key=current_app.config['TUMBLR_CLIENT_KEY'],
client_secret=current_app.config['TUMBLR_CLIENT_SECRET'],
resource_owner_key=site.account.token,
resource_owner_secret=site.account.token_secret)
create_post_url = CREATE_POST_URL.format(site.domain)
photo_url = util.get_first(util.get_possible_array_value(request.form, 'photo'))
photo_file = util.get_first(util.get_possible_array_value(request.files, 'photo'))
if photo_url:
data = util.trim_nulls({
'type': 'photo',
'slug': request.form.get('slug'),
'caption': request.form.get('content[html]') or
request.form.get('content') or request.form.get('name') or
request.form.get('summary'),
'source': photo_url
})
r = requests.post(create_post_url, data=data, auth=auth)
elif photo_file:
# tumblr signs multipart in a weird way. first sign the request as if
# it's application/x-www-form-urlencoded, then recreate the request as
# multipart but use the signed headers from before. Mostly cribbed from
# https://github.com/tumblr/pytumblr/blob/\
# 20e7e38ba6f0734335deee64d4cae45fa8a2ce90/pytumblr/request.py#L101
# The API documentation and some of the code samples gave me the
# impression that you could also send files just as part of the
# form-encoded data but I couldnit make it work
# https://www.tumblr.com/docs/en/api/v2#pphoto-posts
# https://gist.github.com/codingjester/1649885#file-upload-php-L56
data = util.trim_nulls({
'type': 'photo',
'slug': request.form.get('slug'),
'caption': request.form.get('content[html]') or
request.form.get('content') or request.form.get('name') or
request.form.get('summary'),
})
fake_req = requests.Request('POST', create_post_url, data=data)
fake_req = fake_req.prepare()
auth(fake_req)
real_headers = dict(fake_req.headers)
# manually strip these, requests will recalculate them for us
del real_headers['Content-Type']
del real_headers['Content-Length']
current_app.logger.info(
'uploading photo to tumblr %s, headers=%r',
create_post_url, real_headers)
r = requests.post(create_post_url, data=data, files={
'data': photo_file,
}, headers=real_headers)
else:
data = util.trim_nulls({
# one of: text, photo, quote, link, chat, audio, video
'type': 'text',
'slug': request.form.get('slug'),
'title': request.form.get('name'),
'body': util.get_complex_content(request.form),
})
current_app.logger.info(
'posting to tumblr %s, data=%r', create_post_url, data)
r = requests.post(create_post_url, data=data, auth=auth)
current_app.logger.info(
'response from tumblr %r, data=%r, headers=%r',
r, r.content, r.headers)
if r.status_code // 100 != 2:
current_app.logger.warn(
'Tumblr publish failed with response %s', r.text)
return util.wrap_silo_error_response(r)
location = None
if 'Location' in r.headers:
location = r.headers['Location']
else:
# only get back the id, look up the url
post_id = r.json().get('response').get('id')
r = requests.get(FETCH_POST_URL.format(site.domain), params={
'api_key': current_app.config['TUMBLR_CLIENT_KEY'],
'id': post_id,
})
if r.status_code // 100 == 2:
posts = r.json().get('response', {}).get('posts', [])
if posts:
location = posts[0].get('post_url')
return util.make_publish_success_response(location)
|
{
"content_hash": "f513fe928c5aef38e123d47dfbb0763c",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 86,
"avg_line_length": 37.85990338164251,
"alnum_prop": 0.6265152481817022,
"repo_name": "kylewm/feverdream",
"id": "deb6e62e7b034abfcd13e8841b055a5aacbbb0e8",
"size": "7837",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "silopub/tumblr.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "119663"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
import os
import re
import csv
import generate_task_summaries
emails_by_task = {}
email_to_name = {}
DO_NOT_INCLUDE = ["bigbench@googlegroups.com"]
def get_name_for_email(line, email):
# Match: My Name (myemail@gmail.com)
match = re.search(r"([\w\s\.,<>/-]+)\s+\(?\[?'?`?" + re.escape(email), line)
if not match:
# Match: [Kevin Gimpel](https://home.ttic.edu/~kgimpel/) (kgimpel@ttic.edu)
match = re.search(r"\[([\w\s\.,<>/-]+)\]\([^)]+\)\s+\(?'?`?" + re.escape(email), line)
if match:
name = match.group(1).strip()
assert not ":" in name
for i in range(3):
# Do this a few times to deal with Oxford comma
name = re.sub(r"^(and |, )", r"", name)
name = re.sub(r"^\s*-\s*", r"", name)
name = re.sub(r"\s*[,-]\s*$", r"", name)
name = re.sub(r"<sup>.*</sup>", r"", name)
return name
else:
return None
try:
os.chdir(generate_task_summaries.bench_dir)
except FileNotFoundError:
raise ValueError(f"Cannot chdir to {generate_task_summaries.bench_dir}")
for root, dirs, files in os.walk("."):
dirs.sort()
for fname in files:
if fname == "README.md":
path = os.path.join(root, fname)
components = path.split("/")
assert components[0] == "."
task = components[1]
with open(path, "r", encoding="utf-8") as f:
in_authors_section = False
for line in f:
authors_match = re.search(r"Authors?|Contributors?", line, re.IGNORECASE)
emails_match = re.findall(r'[\w.+-]+@[\w-]+\.[\w.-]+', line)
whitespace_match = re.match(r"^\s*$", line)
if authors_match:
in_authors_section = True
elif in_authors_section:
if emails_match or whitespace_match:
# Sometimes we see "Authors:" and then emails span
# several lines. Stay in authors section as long as we
# keep seeing emails.
pass
elif task in emails_by_task and len(emails_by_task[task]) > 0:
# Stop after we're done with one authors section.
# Make sure we found some emails, rather than just
# saw the word "authors" somewhere.
break
else:
# We saw "authors" but found no emails, so keep
# looking.
in_authors_section = False
if in_authors_section and emails_match:
if not task in emails_by_task:
emails_by_task[task] = list()
for email in emails_match:
if email in DO_NOT_INCLUDE:
continue
if not email in emails_by_task[task]:
emails_by_task[task].append(email)
name = get_name_for_email(line, email)
if name:
email_to_name[email] = name
else:
print(f"WARNING: Name not found for email {email}\n"
f"LINE: {line.strip()}\n"
f"FILE: {path}\n")
elif emails_match:
print(f"WARNING: Email found in non-authors section, skipping.\n"
f"LINE: {line.strip()}\n"
f"FILE: {path}\n")
# Save the results
tsv_filename = "task_authors.tsv"
with open(tsv_filename, "w", encoding="utf-8") as tsv_file:
writer = csv.DictWriter(
tsv_file,
delimiter="\t",
fieldnames=["task", "email", "name"])
writer.writeheader()
for task in sorted(emails_by_task):
for email in emails_by_task[task]:
try:
name = email_to_name[email]
except KeyError:
name = ""
writer.writerow({"task": task, "email": email, "name": name})
print(f"Results saved to {os.path.join(os.getcwd(), tsv_filename)}\n")
|
{
"content_hash": "586a0d4e7979c5288e76f4e5a32891b4",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 94,
"avg_line_length": 38.504347826086956,
"alnum_prop": 0.46160794941282746,
"repo_name": "google/BIG-bench",
"id": "23930ff156b5f107dc8865ece53aa20c117a9d6d",
"size": "5028",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bigbench/task_postprocessing_scripts/parse_author_emails.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1453"
},
{
"name": "Jupyter Notebook",
"bytes": "638615"
},
{
"name": "Python",
"bytes": "1564542"
},
{
"name": "Shell",
"bytes": "1436"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import pyeparse as pp
fname = '../pyeparse/tests/data/test_raw.edf'
raw = pp.read_raw(fname)
# visualize initial calibration
raw.plot_calibration(title='5-Point Calibration')
# create heatmap
raw.plot_heatmap(start=3., stop=60.)
# find events and epoch data
events = raw.find_events('SYNCTIME', event_id=1)
tmin, tmax, event_id = -0.5, 1.5, 1
epochs = pp.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax)
# access pandas data frame and plot single epoch
fig, ax = plt.subplots()
ax.plot(epochs[3].get_data('xpos')[0], epochs[3].get_data('ypos')[0])
# iterate over and access numpy arrays.
# find epochs withouth loss of tracking / blinks
print(len([e for e in epochs if not np.isnan(e).any()]))
fig, ax = plt.subplots()
ax.set_title('Superimposed saccade responses')
n_trials = 12 # first 12 trials
for epoch in epochs[:n_trials]:
ax.plot(epochs.times * 1e3, epoch[0].T)
time_mask = epochs.times > 0
times = epochs.times * 1e3
fig, ax = plt.subplots()
ax.plot(times[time_mask], epochs.data[0, 0, time_mask])
ax.set_title('Post baseline saccade (X, pos)')
# plot single trials
epochs.plot(picks=['xpos'], draw_discrete='saccades')
plt.show()
|
{
"content_hash": "b9f7a0a9cb4f377d0f75ad53e5784c9d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 69,
"avg_line_length": 27.68888888888889,
"alnum_prop": 0.6990369181380417,
"repo_name": "drammock/pyeparse",
"id": "a8d4d348ca415ef24037df093d1f3905ba41e84d",
"size": "1328",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/plot_from_raw_to_epochs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "701"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "119299"
}
],
"symlink_target": ""
}
|
"""Define tests for the Ambient PWS config flow."""
import json
from unittest.mock import patch
import aioambient
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_API_KEY
from tests.common import MockConfigEntry, load_fixture, mock_coro
@pytest.fixture
def get_devices_response():
"""Define a fixture for a successful /devices response."""
return mock_coro()
@pytest.fixture
def mock_aioambient(get_devices_response):
"""Mock the aioambient library."""
with patch("homeassistant.components.ambient_station.config_flow.Client") as Client:
Client().api.get_devices.return_value = get_devices_response
yield Client
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
MockConfigEntry(
domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)]
)
async def test_invalid_api_key(hass, mock_aioambient):
"""Test that an invalid API/App Key throws an error."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "invalid_key"}
@pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])])
async def test_no_devices(hass, mock_aioambient):
"""Test that an account with no associated devices throws an error."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "no_devices"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
@pytest.mark.parametrize(
"get_devices_response",
[mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))],
)
async def test_step_user(hass, mock_aioambient):
"""Test that the user step works."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "67890fghij67"
assert result["data"] == {
CONF_API_KEY: "12345abcde12345abcde",
CONF_APP_KEY: "67890fghij67890fghij",
}
|
{
"content_hash": "e8ea5a1830988bd1a7e5d5dc5e7e650b",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 88,
"avg_line_length": 34.04807692307692,
"alnum_prop": 0.7048856255295114,
"repo_name": "FreekingDean/home-assistant",
"id": "806d31b5386b6d2c9aebfee336e6a6c8c822625e",
"size": "3541",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/ambient_station/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import importlib
import io
import operator
import pkgutil
import traceback
import types
from docutils import nodes
from docutils.parsers import rst
from docutils import utils
TAG = ':yaql:'
def _get_modules_names(package):
"""Get names of modules in package"""
return sorted(
map(operator.itemgetter(1),
pkgutil.walk_packages(package.__path__,
'{0}.'.format(package.__name__))))
def _get_functions_names(module):
"""Get names of the functions in the current module"""
return [name for name in dir(module) if
isinstance(getattr(module, name, None), types.FunctionType)]
def write_method_doc(method, output):
"""Construct method documentation from a docstring.
1) Strip TAG
2) Embolden function name
3) Add :callAs: after :signature:
"""
msg = "Error: function {0} has no valid YAQL documentation."
if method.__doc__:
doc = method.__doc__
try:
# strip TAG
doc = doc[doc.index(TAG) + len(TAG):]
# embolden function name
line_break = doc.index('\n')
yaql_name = doc[:line_break]
(emit_header, is_overload) = yield yaql_name
if emit_header:
output.write(yaql_name)
output.write('\n')
output.write('~' * len(yaql_name))
output.write('\n')
doc = doc[line_break:]
# add :callAs: parameter
try:
signature_index = doc.index(':signature:')
position = doc.index(' :', signature_index +
len(':signature:'))
if hasattr(method, '__yaql_function__'):
if (method.__yaql_function__.name and
'operator' in method.__yaql_function__.name):
call_as = 'operator'
elif (method.__yaql_function__.is_function and
method.__yaql_function__.is_method):
call_as = 'function or method'
elif method.__yaql_function__.is_method:
call_as = 'method'
else:
call_as = 'function'
else:
call_as = 'function'
call_as_str = ' :callAs: {0}\n'.format(call_as)
text = doc[:position] + call_as_str + doc[position:]
except ValueError:
text = doc
if is_overload:
text = '* ' + '\n '.join(text.split('\n'))
output.write(text)
else:
output.write(text)
except ValueError:
yield method.func_name
output.write(msg.format(method.func_name))
def write_module_doc(module, output):
"""Generate and write rst document for module.
Generate and write rst document for the single module.
:parameter module: takes a Python module which should be documented.
:type module: Python module
:parameter output: takes file to which generated document will be written.
:type output: file
"""
functions_names = _get_functions_names(module)
if module.__doc__:
output.write(module.__doc__)
output.write('\n')
seq = []
for name in functions_names:
method = getattr(module, name)
it = write_method_doc(method, output)
try:
name = next(it)
seq.append((name, it))
except StopIteration:
pass
seq.sort(key=operator.itemgetter(0))
prev_name = None
for i, item in enumerate(seq):
name = item[0]
emit_header = name != prev_name
prev_name = name
if emit_header:
overload = i < len(seq) - 1 and seq[i + 1][0] == name
else:
overload = True
try:
item[1].send((emit_header, overload))
except StopIteration:
pass
output.write('\n\n')
output.write('\n')
def write_package_doc(package, output):
"""Writes rst document for the package.
Generate and write rst document for the modules in the given package.
:parameter package: takes a Python package which should be documented
:type package: Python module
:parameter output: takes file to which generated document will be written.
:type output: file
"""
modules = _get_modules_names(package)
for module_name in modules:
module = importlib.import_module(module_name)
write_module_doc(module, output)
def generate_doc(source):
try:
package = importlib.import_module(source)
except ImportError:
return 'Error: No such module {0}'.format(source)
out = io.StringIO()
try:
if hasattr(package, '__path__'):
write_package_doc(package, out)
else:
write_module_doc(package, out)
res = out.getvalue()
return res
except Exception as e:
return '.. code-block:: python\n\n Error: {0}\n {1}\n\n'.format(
str(e), '\n '.join([''] + traceback.format_exc().split('\n')))
class YaqlDocNode(nodes.General, nodes.Element):
source = None
def __init__(self, source):
self.source = source
super(YaqlDocNode, self).__init__()
class YaqlDocDirective(rst.Directive):
has_content = False
required_arguments = 1
def run(self):
return [YaqlDocNode(self.arguments[0])]
def render(app, doctree, fromdocname):
for node in doctree.traverse(YaqlDocNode):
new_doc = utils.new_document('YAQL', doctree.settings)
content = generate_doc(node.source)
rst.Parser().parse(content, new_doc)
node.replace_self(new_doc.children)
def setup(app):
app.add_node(YaqlDocNode)
app.add_directive('yaqldoc', YaqlDocDirective)
app.connect('doctree-resolved', render)
return {'version': '0.1'}
|
{
"content_hash": "af1b9e9128747bc9b88d621c27a70451",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 78,
"avg_line_length": 30,
"alnum_prop": 0.5585,
"repo_name": "openstack/yaql",
"id": "caba4023ea8c6d21f56afb0a65efc74ea01211df",
"size": "6611",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/_exts/yaqlautodoc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "481598"
}
],
"symlink_target": ""
}
|
import unittest
import os
import comm
import commands
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_list_target_platforms(self):
comm.setUp()
os.chdir(comm.TEMP_DATA_PATH)
cmd = "crosswalk-app platforms"
status = os.popen(cmd).readlines()
self.assertEquals("deb", status[0].strip(" *\n"))
self.assertEquals("android", status[1].strip(" *\n"))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0abaa592f544889100d8b127ac10979d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.6394849785407726,
"repo_name": "crosswalk-project/crosswalk-test-suite",
"id": "6bef381d188c5770e4c847d1246e9e987cd707b6",
"size": "2013",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "apptools/apptools-linux-tests/apptools/target_platforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2738"
},
{
"name": "C#",
"bytes": "1437"
},
{
"name": "CSS",
"bytes": "63576"
},
{
"name": "Cucumber",
"bytes": "133383"
},
{
"name": "GLSL",
"bytes": "2187925"
},
{
"name": "HTML",
"bytes": "23702581"
},
{
"name": "Java",
"bytes": "1755638"
},
{
"name": "JavaScript",
"bytes": "3166019"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "37474"
},
{
"name": "Python",
"bytes": "1882174"
},
{
"name": "Shell",
"bytes": "614247"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class VoipList(ListResource):
def __init__(self, version, account_sid, country_code):
"""
Initialize the VoipList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.voip.VoipList
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipList
"""
super(VoipList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
self._uri = '/Accounts/{account_sid}/AvailablePhoneNumbers/{country_code}/Voip.json'.format(**self._solution)
def stream(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Streams VoipInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Lists VoipInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
limit=limit,
page_size=page_size,
))
def page(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of VoipInstance records from the API.
Request is executed immediately
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of VoipInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipPage
"""
data = values.of({
'AreaCode': area_code,
'Contains': contains,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'InLocality': in_locality,
'FaxEnabled': fax_enabled,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return VoipPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of VoipInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of VoipInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return VoipPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.VoipList>'
class VoipPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the VoipPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.voip.VoipPage
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipPage
"""
super(VoipPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of VoipInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance
"""
return VoipInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.VoipPage>'
class VoipInstance(InstanceResource):
def __init__(self, version, payload, account_sid, country_code):
"""
Initialize the VoipInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance
"""
super(VoipInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload.get('friendly_name'),
'phone_number': payload.get('phone_number'),
'lata': payload.get('lata'),
'locality': payload.get('locality'),
'rate_center': payload.get('rate_center'),
'latitude': deserialize.decimal(payload.get('latitude')),
'longitude': deserialize.decimal(payload.get('longitude')),
'region': payload.get('region'),
'postal_code': payload.get('postal_code'),
'iso_country': payload.get('iso_country'),
'address_requirements': payload.get('address_requirements'),
'beta': payload.get('beta'),
'capabilities': payload.get('capabilities'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
@property
def friendly_name(self):
"""
:returns: A formatted version of the phone number
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The LATA of this phone number
:rtype: unicode
"""
return self._properties['lata']
@property
def locality(self):
"""
:returns: The locality or city of this phone number's location
:rtype: unicode
"""
return self._properties['locality']
@property
def rate_center(self):
"""
:returns: The rate center of this phone number
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude of this phone number's location
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude of this phone number's location
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The two-letter state or province abbreviation of this phone number's location
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal or ZIP code of this phone number's location
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The ISO country code of this phone number
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The type of Address resource the phone number requires
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def beta(self):
"""
:returns: Whether the phone number is new to the Twilio platform
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: Whether a phone number can receive calls or messages
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.VoipInstance>'
|
{
"content_hash": "2d4d4f3596fde60206ef26e4fec6f692",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 171,
"avg_line_length": 44.71145374449339,
"alnum_prop": 0.6399330016256959,
"repo_name": "twilio/twilio-python",
"id": "bd28b4bf57860908edecb2576259ccd45b5e93f8",
"size": "20314",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/api/v2010/account/available_phone_number/voip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
class HelpDialog(BoxLayout):
'''HelpDialog, in which help will be displayed from help.rst.
It emits 'on_cancel' event when 'Cancel' button is released.
'''
rst = ObjectProperty(None)
'''rst is reference to `kivy.uix.rst.RstDocument` to display help from
help.rst
'''
__events__ = ('on_cancel',)
def on_cancel(self, *args):
'''Default handler for 'on_cancel' event
'''
pass
|
{
"content_hash": "51ccca2d8264b9ec29086833cde7a251",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 26.4,
"alnum_prop": 0.6420454545454546,
"repo_name": "aron-bordin/kivy-designer",
"id": "9266a4258c078608467cf75e82843deade83977a",
"size": "528",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "designer/components/dialogs/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2239"
},
{
"name": "Makefile",
"bytes": "914"
},
{
"name": "Python",
"bytes": "487442"
},
{
"name": "Ruby",
"bytes": "12440"
},
{
"name": "Shell",
"bytes": "13"
}
],
"symlink_target": ""
}
|
import pytest
from pysagereader.sage_ii_reader import SAGEIILoaderV700
import os
def test_loader():
sage = SAGEIILoaderV700(os.path.join(os.path.dirname(__file__), 'data'))
data = sage.load_data(min_date='1984', max_date='1985')
assert len(data.time) == 238
if __name__ == '__main__':
test_loader()
|
{
"content_hash": "64a921f373bf53b2c5f3259ab81eaccd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 76,
"avg_line_length": 22.785714285714285,
"alnum_prop": 0.664576802507837,
"repo_name": "LandonRieger/pySAGE",
"id": "9700f7b2d0a1533effb082ae792bd6a676fca498",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144887"
}
],
"symlink_target": ""
}
|
import json
from indy import IndyError
from indy import did
import pytest
from indy.error import ErrorCode
@pytest.mark.asyncio
async def test_store_their_did_works(wallet_handle, did_my):
await did.store_their_did(wallet_handle, json.dumps({"did": did_my}))
@pytest.mark.asyncio
async def test_store_their_did_works_for_invalid_json(wallet_handle):
with pytest.raises(IndyError) as e:
await did.store_their_did(wallet_handle, '{"field":"value"}')
assert ErrorCode.CommonInvalidStructure == e.value.error_code
@pytest.mark.asyncio
async def test_store_their_did_works_for_invalid_handle(wallet_handle, did_my):
with pytest.raises(IndyError) as e:
await did.store_their_did(wallet_handle + 1, json.dumps({"did": did_my}))
assert ErrorCode.WalletInvalidHandle == e.value.error_code
@pytest.mark.asyncio
async def test_store_their_did_works_with_verkey(wallet_handle, did_my1, verkey_my1):
await did.store_their_did(wallet_handle, json.dumps({"did": did_my1, "verkey": verkey_my1}))
@pytest.mark.asyncio
async def test_store_their_did_works_without_did(wallet_handle, verkey_my1):
with pytest.raises(IndyError) as e:
await did.store_their_did(wallet_handle, json.dumps({"verkey": verkey_my1}))
assert ErrorCode.CommonInvalidStructure == e.value.error_code
@pytest.mark.asyncio
async def test_store_their_did_works_for_invalid_did(wallet_handle):
with pytest.raises(IndyError) as e:
await did.store_their_did(wallet_handle, '{"did": "invalid_base58_string"}')
assert ErrorCode.CommonInvalidStructure == e.value.error_code
|
{
"content_hash": "e5a44380b749fd45d3aa3d36e58ca212",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 96,
"avg_line_length": 34.891304347826086,
"alnum_prop": 0.735202492211838,
"repo_name": "anastasia-tarasova/indy-sdk",
"id": "0292f6164cf444a272395a05f22064ee294e3227",
"size": "1605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wrappers/python/tests/did/test_store_their_did.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "207870"
},
{
"name": "C#",
"bytes": "842011"
},
{
"name": "C++",
"bytes": "229233"
},
{
"name": "CSS",
"bytes": "137079"
},
{
"name": "Dockerfile",
"bytes": "23945"
},
{
"name": "Groovy",
"bytes": "102863"
},
{
"name": "HTML",
"bytes": "897750"
},
{
"name": "Java",
"bytes": "882162"
},
{
"name": "JavaScript",
"bytes": "185247"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Objective-C",
"bytes": "584121"
},
{
"name": "Objective-C++",
"bytes": "706749"
},
{
"name": "Perl",
"bytes": "8271"
},
{
"name": "Python",
"bytes": "750776"
},
{
"name": "Ruby",
"bytes": "80525"
},
{
"name": "Rust",
"bytes": "5872898"
},
{
"name": "Shell",
"bytes": "251160"
},
{
"name": "Swift",
"bytes": "1114"
},
{
"name": "TypeScript",
"bytes": "197439"
}
],
"symlink_target": ""
}
|
"""The functions to handle the main function"""
from argparse import ArgumentParser, SUPPRESS
from ORCSchlange.command.fetch import FetchReporeter
from ORCSchlange.command.db import DbCommand
__version__ = "0.7.1"
"""The version of the package"""
def main():
"""The main function that loads the commands."""
parser = ArgumentParser(prog='orcs', description="A simple tool to interact with the ORICID-Public-API.")
add_global(parser)
subparsers = parser.add_subparsers(metavar="The ORC-Schlange commands are:")
fetch = subparsers.add_parser('fetch',
help="""Fetch the information from the ORICID-Public-API.
Call "fetch -h" for more details.""")
add_fetch(fetch)
db = subparsers.add_parser('db',
help='Manage the SQLite DB that contains the orcids. Call \"db -h\" for more details.',
add_help=False)
add_db(db)
args = parser.parse_args()
args.func(args)
def add_global(parser):
"""Add the global arguments to the parser.
:param parser: The global ArgumentParser
"""
parser.set_defaults(func=lambda x: parser.print_help())
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='store_true', dest="verbose", help="Create verbose output")
def add_fetch(fetch):
"""Add the fetch arguments to the fetch command.
:param fetch: The fetch ArgumentParser.
"""
fetch.set_defaults(func=lambda args: FetchReporeter(args).fetch(), config=1)
fetch.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
fetch.add_argument('--html', action='store_false', dest="html",
help="Is a html output created. (default: %(default)s)")
fetch.add_argument('--bib', action='store_true', dest="bib", help="Is a bib output created. (default: %(default)s)")
fetch.add_argument('--path', action='store', dest="path",
help="The path where the output is created. (default: %(default)s)", default="output/")
fetch.add_argument('--name', action='store', dest="name", help="The name of the output. (default: %(default)s)",
default="index")
fetch.add_argument('--jQuery', action='store_true', dest="jquery",
help="Copy jQuery version 3.2.1 to the output path. (default: %(default)s)")
api = fetch.add_argument_group(title="API-Configuration",
description="""To interact with the ORCID-API the client-id and client-secret
need to set or loaded. The default is the sandbox""")
api.add_argument("--sandbox", action='store_const', const=0, dest="config",
help="Run in the ORCID-Sandbox These need no further options.")
api.add_argument("--db", action='store_const', const=1, dest="config",
help="Load the options out of the SQLite DB.These need that they are added before with db addAPI.")
api.add_argument("--file", action='store', dest="config",
help="""Load the options out of the file that is given. These need that the file is in a json
format that have a field \"client_id\" and \"client_secret\".""")
api.add_argument("--inline", nargs=2, dest="config", help="Give the data inline. First the id then the secret.")
def add_db(db):
"""Add the db arguments and subcommands to the db command
:param db: The db ArgumentParser.
"""
db.set_defaults(func=lambda args: db.print_help() if not args.test else DbCommand(args).create_test())
db.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
db.add_argument('-t', '--test', action='store_true', help=SUPPRESS)
db.add_argument('-h', "--help", action='store_true', help=SUPPRESS)
dbsubs = db.add_subparsers(title="db", description="Manage the SQLite DB that contains the orcids",
metavar="The databank functions are:")
add_dbs = dbsubs.add_parser('add', help='Add an new ORCID to the DB')
add_dbs.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
add_adddb(add_dbs)
conf_db = dbsubs.add_parser('addConf', help='Add an new Config to the DB')
conf_db.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
add_conf(conf_db)
print_db = dbsubs.add_parser('print', help='Print the content of the databank')
print_db.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
print_db.set_defaults(func=lambda args: DbCommand(args).prints())
clean_db = dbsubs.add_parser('clean', help='Reset the databank')
clean_db.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
clean_db.set_defaults(func=lambda args: DbCommand(args).clean())
create_db = dbsubs.add_parser('create', help='Create a new databank')
create_db.add_argument('--dbfile', action='store', dest="dbfile", help="The SQLite DB file that is used.",
default="people.db")
create_db.set_defaults(func=lambda args: DbCommand(args).create())
def add_adddb(add_dbs):
"""Add the arguments to the add command
:param add_dbs: THe db add ArgumentParser.
"""
add_dbs.add_argument('orchid', action="store", help="The new added ORCID.")
add_dbs.add_argument('start', action="store", help="""The date after the ORCID data is
fetched in form "YYYY-MM-DD".""")
add_dbs.add_argument('stop', action="store",
help="The date until the ORCID data is fetched in form \"YYYY-MM-DD\".",
nargs="?")
add_dbs.set_defaults(func=lambda args: DbCommand(args).add())
add_dbs.set_defaults(func=lambda args: DbCommand(args).add())
def add_conf(conf_db):
"""Add the arguments to the addConf command
:param conf_db: THe db addConf ArgumentParser.
"""
conf_db.add_argument('cliend_id', action="store", help="The client id of you app.")
conf_db.add_argument('clien_secret', action="store", help="The client secret of you app.")
conf_db.add_argument('auth', action="store", help="The url to authenticate.", nargs="?",
default="https://orcid.org/oauth/token")
conf_db.add_argument('api', action="store", help="The url of the api.", nargs="?",
default="https://pub.orcid.org/v2.0/")
conf_db.set_defaults(func=lambda args: DbCommand(args).add_conf())
|
{
"content_hash": "a605cb63c6adf990e1594eab8bea64a7",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 120,
"avg_line_length": 50.55714285714286,
"alnum_prop": 0.6134501271545635,
"repo_name": "ScaDS/ORC-Schlange",
"id": "24d76fdeae4a9c5e08860fe002bc43b770a3c659",
"size": "7078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ORCSchlange/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18383"
},
{
"name": "Python",
"bytes": "64140"
},
{
"name": "Shell",
"bytes": "118"
},
{
"name": "TeX",
"bytes": "4142"
}
],
"symlink_target": ""
}
|
import logging
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import re
import requests
import StringIO
import tempfile
import app_config
from plugins.base import CarebotPlugin
from util.analytics import GoogleAnalytics
from util.chart import ChartTools
from util.models import Story
from util.s3 import Uploader
s3 = Uploader()
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class NPRScrollDepth(CarebotPlugin):
"""
Get scroll depth stats on NPR stories
"""
SLUG_SEARCH_REGEX = re.compile(ur'slug ((\w*-*)+)')
def get_listeners(self):
"""
Associate regular expression matches to the appropriate handler
"""
return [
['depth', self.SLUG_SEARCH_REGEX, self.handle_slug_inquiry, self.get_wait_message],
# ['linger-url', self.GRUBER_URLINTEXT_PAT, self.handle_url_inquiry],
]
def get_wait_message(self):
return "All right, I'm looking up the scroll depth stats."
def get_slug_query_params(self, team, slug=None):
"""
Given a slug, get parameters needed to query google analytics for the
scroll depth
"""
filters = 'ga:eventLabel==10,ga:eventLabel==20,ga:eventLabel==30,ga:eventLabel==40,ga:eventLabel==50,ga:eventLabel==60,ga:eventLabel==70,ga:eventLabel==80,ga:eventLabel==90,ga:eventLabel==100;ga:eventCategory==%s;ga:eventAction==scroll-depth' % slug
params = {
'ids': 'ga:{0}'.format(team['ga_org_id']),
'start-date': '90daysAgo', # start_date.strftime('%Y-%m-%d'),
'end-date': 'today',
'metrics': 'ga:users,ga:eventValue',
'dimensions': 'ga:eventLabel',
'filters': filters,
'max-results': app_config.GA_RESULT_SIZE,
'samplingLevel': app_config.GA_SAMPLING_LEVEL,
'start-index': 1,
}
return params
@staticmethod
def fill_in_max(data):
"""
Sometime people start at 20, 30, 40% of the article read because their
screens are large or the article is short.
fill_in_max finds the starting bucket with the largest number of people
and fills in all previous buckets with that count.that
That way we get an accurate count of how many people read the top of the
article.
"""
max_people = max(data, key=lambda item:item[1])[1]
for row in data:
if row[1] == max_people:
break
row[1] = max_people
# Calculate the percentage of users
for row in data:
pct = round((row[1] / float(max_people)) * 100)
row.append(int(pct))
return data
def clean_data(self, data):
"""
Fix data types, truncate the data, and otherwise make it fit for
consumption.
"""
rows = []
for row in data:
row[0] = int(row[0]) # Percent depth on page
row[1] = int(row[1]) # Total users
row[2] = int(row[2]) # Seconds on page
rows.append(row)
# Sort the row data from 10% => 100%
# Currently handled by the filter above; legacy from before, but doesn't
# hurt to have.
rows.sort(key=lambda tup: tup[0])
rows = NPRScrollDepth.fill_in_max(rows)
# Only take the first 10 rows.
truncated = rows[:10]
return truncated
def get_median(self, data):
"""
Take the scroll depth data we have (number of people per percent)
Then calculate how many people only got to THAT bucket (aka didn't get
to the next percent bucket)
"""
length = len(data)
for i, row in enumerate(data):
if not i == length - 1:
row[1] = row[1] - data[i + 1][1]
lst = []
# Flatten the [percent, count] tuples
# This is a really inefficient way to do this!
for bucket in data:
for _ in range(bucket[1]):
lst.append(bucket[0])
median = GoogleAnalytics.median(lst)
return int(median)
def get_total_people(self, data):
"""
Find the tuple with the max number of people.
"""
return max(data, key=lambda item:item[1])[1]
def get_chart(self,
rows,
median=None,
labels=None):
"""
Create a scroll depth histogram
"""
if labels is None:
labels = ['100%', '90%', '80%', '70%', '60%', '50%', '40%', '30%', '20%', '10%']
r = range(1, len(rows) + 1)
data = []
# Rows are drawn "upside down" so we need to reverse them:
rows.reverse()
for row in rows:
data.append(row[3])
# Set the chart size
plt.figure(figsize=(2,4), dpi=100)
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(1, 1, 1)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Configure x-axis ticks
plt.xlim(0, 100)
ax.tick_params(axis='x', colors='#b8b8b8', labelsize=8, labelbottom='off')
plt.axes().xaxis.set_ticks_position('none')
# Configure y-axis ticks
plt.axes().yaxis.set_ticks_position('none')
ax.tick_params(axis='y', colors='#b8b8b8', labelsize=7)
ax.yaxis.label.set_fontsize(10)
plt.yticks(r, labels)
chart = plt.barh(r, data, align="center")
for index, value in enumerate(data):
chart[index].set_color('#4b7ef0')
# TODO: Median line
# for bar in chart:
# width = bar.get_width()
# print width
# print bar.get_y()
# if bar.get_y() == 1.6:
# print
# ax.text(
# bar.get_y() + bar.get_height()/2.,
# 1.05 * width,
# "MED",
# ha='center',
# va='bottom',
# color='#b8b8b8',
# fontsize=8
# )
with tempfile.TemporaryFile(suffix=".png") as tmpfile:
plt.savefig(tmpfile, bbox_inches='tight')
tmpfile.seek(0) # Rewind the file to the beginning
url = s3.upload(tmpfile)
return url
def get_slug_message(self, slug, story=None):
# Try to match the story to a slug to accurately get a team
# The Google Analytics property ID comes from the team config
# We use the default team if none is found
stories = Story.select().where(Story.slug.contains(slug))
team = self.config.get_team_for_stories(stories)
params = self.get_slug_query_params(team=team, slug=slug)
data = GoogleAnalytics.query_ga(params)
if not data.get('rows'):
logger.info('No rows found for slug %s' % slug)
return
# Clean up the data
clean_data = self.clean_data(data.get('rows'))
total_people = self.get_total_people(clean_data)
friendly_people = "{:,}".format(total_people) # Comma-separated #s
median = self.get_median(clean_data)
# Set up the chart
scroll_histogram_url = self.get_chart(clean_data)
if story:
scroll_histogram_url = ChartTools.add_screenshot_to_chart(story,
scroll_histogram_url)
# TODO: Not confident in median calculations so far
# text = "*%s people* got a median of *%s percent* down the page." % (friendly_people, median)
text = ''
attachments = [{
"fallback": slug + " update",
"color": "#eeeeee",
"title": "How far down did people scroll?",
"image_url": scroll_histogram_url
}]
return {
'text': text,
'attachments': attachments
}
def handle_slug_inquiry(self, message):
"""
Respond to an inquiry about the slug with stats and charts
"""
match = re.search(self.SLUG_SEARCH_REGEX, message.body['text'])
slug = match.group(1)
if slug:
return self.get_slug_message(slug)
def get_update_message(self, story):
"""
Only one slug in the story (should be the first) will return scroll
depth results.
TODO: Will need to handle the case when it's not the first slug
reporting depth data.
"""
story_slugs = story.slug_list()
team = self.config.get_team_for_story(story)
return self.get_slug_message(story_slugs[0])
|
{
"content_hash": "e17641d55174e6bb383290423a135897",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 257,
"avg_line_length": 33.14963503649635,
"alnum_prop": 0.5600572498073324,
"repo_name": "thecarebot/carebot",
"id": "0cd3437d0e7d6af7a7d83f4e6d8230f1d7b54d7c",
"size": "9083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/npr/scrolldepth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "203"
},
{
"name": "HTML",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "126000"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
}
|
"""Unit tests for the metadata package."""
from __future__ import with_statement
import path_initializer
path_initializer.InitSysPath()
import copy
import os
import tempfile
from google.apputils import app
import gflags as flags
import unittest
from gcutil_lib import metadata
FLAGS = flags.FLAGS
class MetadataTest(unittest.TestCase):
def testGatherMetadata(self):
flag_values = copy.deepcopy(FLAGS)
metadata_flags_processor = metadata.MetadataFlagsProcessor(flag_values)
handle, path = tempfile.mkstemp()
try:
with os.fdopen(handle, 'w') as metadata_file:
metadata_file.write('metadata file content')
metadata_file.flush()
flag_values.metadata = ['bar:baz']
flag_values.metadata_from_file = ['bar_file:%s' % path]
metadata_entries = metadata_flags_processor.GatherMetadata()
self.assertEqual(len(metadata_entries), 2)
self.assertEqual(metadata_entries[0]['key'], 'bar')
self.assertEqual(metadata_entries[0]['value'], 'baz')
self.assertEqual(metadata_entries[1]['key'], 'bar_file')
self.assertEqual(metadata_entries[1]['value'],
'metadata file content')
finally:
os.remove(path)
def testGatherMetadataWithDuplicateKeys(self):
flag_values = copy.deepcopy(FLAGS)
metadata_flags_processor = metadata.MetadataFlagsProcessor(flag_values)
flag_values.metadata = ['bar:baz', 'bar:foo']
self.assertRaises(app.UsageError, metadata_flags_processor.GatherMetadata)
flag_values.metadata = ['bar:baz', 'bar:foo', 'foo:baz', 'foobar:val']
self.assertRaises(app.UsageError, metadata_flags_processor.GatherMetadata)
flag_values.metadata = ['foo:foo', 'bar:baz', 'bar:foo', 'foo:baz',
'foobar:val']
self.assertRaises(app.UsageError, metadata_flags_processor.GatherMetadata)
handle, path = tempfile.mkstemp()
try:
with os.fdopen(handle, 'w') as metadata_file:
metadata_file.write('metadata file content')
metadata_file.flush()
flag_values.metadata = ['bar:baz']
flag_values.metadata_from_file = ['bar:%s' % metadata_file.name]
self.assertRaises(app.UsageError,
metadata_flags_processor.GatherMetadata)
finally:
os.remove(path)
def testGatherMetadataWithBannedMetadata(self):
flag_values = copy.deepcopy(FLAGS)
metadata_flags_processor = metadata.MetadataFlagsProcessor(flag_values)
flag_values.metadata = [
metadata.INITIAL_WINDOWS_PASSWORD_METADATA_NAME + ':' + 'Pa$$0rd']
self.assertRaises(app.UsageError, metadata_flags_processor.GatherMetadata)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "155605b807abe2e1ca3e0a20de13ae9a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 31.61627906976744,
"alnum_prop": 0.6778227289444648,
"repo_name": "ychen820/microblog",
"id": "02d449e5cfcca91b281cdb5b937fab371e339257",
"size": "3336",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/metadata_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import requests
import datetime
import json
try:
basestring
except NameError:
basestring = (str, bytes)
class AccessError(Exception):
def __init__(self, response):
self.status_code = response.status_code
data = response.json()
super(AccessError, self).__init__(data["message"])
class ArgumentOutOfRangeException(Exception):
def __init__(self, message):
self.message = message.replace('ArgumentOutOfRangeException: ', '')
super(ArgumentOutOfRangeException, self).__init__(self.message)
class TranslateApiException(Exception):
def __init__(self, message, *args):
self.message = message.replace('TranslateApiException: ', '')
super(TranslateApiException, self).__init__(self.message, *args)
class AccessToken(object):
access_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
expire_delta = datetime.timedelta(minutes=9) # Translator API valid for 10 minutes, actually
def __init__(self, subscription_key):
self.subscription_key = subscription_key
self._token = None
self._expdate = None
def __call__(self, r):
r.headers['Authorization'] = "Bearer " + self.token
return r
def request_token(self):
headers = {
'Ocp-Apim-Subscription-Key': self.subscription_key
}
resp = requests.post(self.access_url, headers=headers)
if resp.status_code == 200:
self._token = resp.text
self._expdate = datetime.datetime.now() + self.expire_delta
else:
raise AccessError(resp)
@property
def expired(self):
return datetime.datetime.now() > self._expdate
@property
def token(self):
if not self._token or self.expired:
self.request_token()
return self._token
class Translator(object):
api_url = "https://api.microsofttranslator.com/v2/ajax.svc/"
def __init__(self, subscription_key):
self.auth = AccessToken(subscription_key)
def make_url(self, action):
return self.api_url + action
def make_request(self, action, params=None):
url = self.make_url(action)
resp = requests.get(url, auth=self.auth, params=params)
return self.make_response(resp)
def make_response(self, resp):
resp.encoding = 'UTF-8-sig'
data = resp.json()
if isinstance(data, basestring) and data.startswith("ArgumentOutOfRangeException"):
raise ArgumentOutOfRangeException(data)
if isinstance(data, basestring) and data.startswith("TranslateApiException"):
raise TranslateApiException(data)
return data
def _translate(self, action, text_params, lang_from, lang_to, contenttype, category):
if not lang_to:
raise ValueError('lang_to parameter is required')
if contenttype not in ('text/plain', 'text/html'):
raise ValueError('Invalid contenttype value')
params = {
'to': lang_to,
'contentType': contenttype,
'category': category,
}
if lang_from:
params['from'] = lang_from
params.update(text_params)
return self.make_request(action, params)
def translate(self, text, lang_from=None, lang_to=None,
contenttype='text/plain', category='general'):
params = {
'text': text,
}
return self._translate('Translate', params, lang_from, lang_to,
contenttype, category)
def translate_array(self, texts=[], lang_from=None, lang_to=None,
contenttype='text/plain', category='general'):
params = {
'texts': json.dumps(texts),
}
return self._translate('TranslateArray', params, lang_from, lang_to,
contenttype, category)
def translate_array2(self, texts=[], lang_from=None, lang_to=None,
contenttype='text/plain', category='general'):
params = {
'texts': json.dumps(texts),
}
return self._translate('TranslateArray2', params, lang_from, lang_to,
contenttype, category)
def get_translations(self, text, lang_from, lang_to, max_n=10, contenttype='text/plain', category='general',
url=None, user=None, state=None):
options = {
'Category': category,
'ContentType': contenttype,
}
if url:
options['Uri'] = url
if user:
options['User'] = user
if state:
options['State'] = state
params = {
'text': text,
'to': lang_to,
'from': lang_from,
'maxTranslations': max_n,
'options': json.dumps(options)
}
return self.make_request('GetTranslations', params)
def break_sentences(self, text, lang):
if len(text) > 10000:
raise ValueError('The text maximum length is 10000 characters')
params = {
'text': text,
'language': lang,
}
lengths = self.make_request('BreakSentences', params)
if isinstance(text, bytes):
text = text.decode('utf-8')
c = 0
result = []
for i in lengths:
result.append(text[c:c+i])
c += i
return result
def add_translation(self, text_orig, text_trans, lang_from, lang_to, user, rating=1,
contenttype='text/plain', category='general', url=None):
if len(text_orig) > 1000:
raise ValueError('The original text maximum length is 1000 characters')
if len(text_trans) > 2000:
raise ValueError('The translated text maximum length is 1000 characters')
if contenttype not in ('text/plain', 'text/html'):
raise ValueError('Invalid contenttype value')
if not -10 < rating < 10 or not isinstance(rating, int):
raise ValueError('Raiting must be an integer value between -10 and 10')
params = {
'originalText': text_orig,
'translatedText': text_trans,
'from': lang_from,
'to': lang_to,
'user': user,
'contentType': contenttype,
'rating': rating,
'category': category,
}
if url:
params['uri'] = url
return self.make_request('AddTranslation', params)
def get_langs(self, speakable=False):
action = 'GetLanguagesForSpeak' if speakable else 'GetLanguagesForTranslate'
return self.make_request(action)
def get_lang_names(self, langs, lang_to):
params = {
'locale': lang_to,
'languageCodes': json.dumps(langs),
}
return self.make_request('GetLanguageNames', params)
def detect_lang(self, text):
return self.make_request('Detect', {'text': text})
def detect_langs(self, texts=[]):
return self.make_request('DetectArray', {'texts': json.dumps(texts)})
def speak(self, text, lang, format='audio/wav', best_quality=False):
if format not in ('audio/wav', 'audio/mp3'):
raise ValueError('Invalid format value')
params = {
'text': text,
'language': lang,
'format': format,
'options': 'MaxQuality' if best_quality else 'MinSize',
}
return self.make_request('Speak', params)
def speak_to_file(self, file, *args, **kwargs):
resp = requests.get(self.speak(*args, **kwargs))
if isinstance(file, basestring):
with open(file, 'wb'):
file.write(resp.content)
elif hasattr(file, 'write'):
file.write(resp.content)
else:
raise ValueError('Expected filepath or a file-like object')
|
{
"content_hash": "fe45c4b7465d33e0b1dceb985f6912f1",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 112,
"avg_line_length": 34.61739130434783,
"alnum_prop": 0.5787490580256217,
"repo_name": "wronglink/mstranslator",
"id": "5aa20576a2cb4d6e0e51371168468931c0e4a12b",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mstranslator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12906"
}
],
"symlink_target": ""
}
|
'''
TabbedPanel
===========
.. image:: images/tabbed_panel.jpg
:align: right
.. versionadded:: 1.3.0
.. warning::
This widget is still experimental, and its API is subject to change in a
future version.
The `TabbedPanel` widget manages different widgets in tabs, with a header area
for the actual tab buttons and a content area for showing the current tab
content.
The :class:`TabbedPanel` provides one default tab.
Simple example
--------------
.. include:: ../../examples/widgets/tabbedpanel.py
:literal:
.. note::
A new class :class:`TabbedPanelItem` has been introduced in 1.5.0 for
convenience. So now one can simply add a :class:`TabbedPanelItem` to a
:class:`TabbedPanel` and `content` to the :class:`TabbedPanelItem`
as in the example provided above.
Customize the Tabbed Panel
--------------------------
You can choose the position in which the tabs are displayed::
tab_pos = 'top_mid'
An individual tab is called a TabbedPanelHeader. It is a special button
containing a `content` property. You add the TabbedPanelHeader first, and set
its `content` property separately::
tp = TabbedPanel()
th = TabbedPanelHeader(text='Tab2')
tp.add_widget(th)
An individual tab, represented by a TabbedPanelHeader, needs its content set.
This content can be any widget. It could be a layout with a deep
hierarchy of widgets, or it could be an individual widget, such as a label or a
button::
th.content = your_content_instance
There is one "shared" main content area active at any given time, for all
the tabs. Your app is responsible for adding the content of individual tabs
and for managing them, but it's not responsible for content switching. The
tabbed panel handles switching of the main content object as per user action.
.. note::
The default_tab functionality is turned off by default since 1.5.0. To
turn it back on, set `do_default_tab` = True.
There is a default tab added when the tabbed panel is instantiated.
Tabs that you add individually as above, are added in addition to the default
tab. Thus, depending on your needs and design, you will want to customize the
default tab::
tp.default_tab_text = 'Something Specific To Your Use'
The default tab machinery requires special consideration and management.
Accordingly, an `on_default_tab` event is provided for associating a callback::
tp.bind(default_tab = my_default_tab_callback)
It's important to note that by default, :data:`default_tab_cls` is of type
:class:`TabbedPanelHeader` and thus has the same properties as other tabs.
Since 1.5.0, it is now possible to disable the creation of the
:data:`default_tab` by setting :data:`do_default_tab` to False.
Tabs and content can be removed in several ways::
tp.remove_widget(widget/tabbed_panel_header)
or
tp.clear_widgets() # to clear all the widgets in the content area
or
tp.clear_tabs() # to remove the TabbedPanelHeaders
To access the children of the tabbed panel, use content.children::
tp.content.children
To access the list of tabs::
tp.tab_list
To change the appearance of the main tabbed panel content::
background_color = (1, 0, 0, .5) #50% translucent red
border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
To change the background of a individual tab, use these two properties::
tab_header_instance.background_normal = 'path/to/tab_head/img'
tab_header_instance.background_down = 'path/to/tab_head/img_pressed'
A TabbedPanelStrip contains the individual tab headers. To change the
appearance of this tab strip, override the canvas of TabbedPanelStrip.
For example, in the kv language::
<TabbedPanelStrip>
canvas:
Color:
rgba: (0, 1, 0, 1) # green
Rectangle:
size: self.size
pos: self.pos
By default the tabbed panel strip takes its background image and color from the
tabbed panel's background_image and background_color.
'''
__all__ = ('StripLayout', 'TabbedPanel', 'TabbedPanelContent',
'TabbedPanelHeader','TabbedPanelItem', 'TabbedPanelStrip',
'TabbedPanelException')
from functools import partial
from kivy.clock import Clock
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, NumericProperty, AliasProperty, BooleanProperty
class TabbedPanelException(Exception):
'''The TabbedPanelException class.
'''
pass
class TabbedPanelHeader(ToggleButton):
'''A Base for implementing a Tabbed Panel Head. A button intended to be
used as a Heading/Tab for a TabbedPanel widget.
You can use this TabbedPanelHeader widget to add a new tab to a
TabbedPanel.
'''
content = ObjectProperty(None, allownone=True)
'''Content to be loaded when this tab header is selected.
:data:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
# only allow selecting the tab if not already selected
def on_touch_down(self, touch):
if self.state == 'down':
#dispatch to children, not to self
for child in self.children:
child.dispatch('on_touch_down', touch)
return
else:
super(TabbedPanelHeader, self).on_touch_down(touch)
def on_release(self, *largs):
# Tabbed panel header is a child of tab_strib which has a
# `tabbed_panel` property
if self.parent:
self.parent.tabbed_panel.switch_to(self)
else:
# tab removed before we could switch to it. Switch back to
# previous tab
self.panel.switch_to(self.panel.current_tab)
class TabbedPanelItem(TabbedPanelHeader):
'''This is a convenience class that provides a header of type
TabbedPanelHeader and links it with the content automatically. Thus
facilitating you to simply do the following in kv language::
<TabbedPanel>:
...other settings
TabbedPanelItem:
BoxLayout:
Label:
text: 'Second tab content area'
Button:
text: 'Button that does nothing'
.. versionadded:: 1.5.0
'''
def add_widget(self, widget, index=0):
self.content = widget
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.switch_to(self)
def remove_widget(self, widget):
self.content = None
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.remove_widget(widget)
class TabbedPanelStrip(GridLayout):
'''A strip intended to be used as background for Heading/Tab.
This does not cover the blank areas in case the tabs don't cover
the entire width/height of the TabbedPanel(use StripLayout for that).
'''
tabbed_panel = ObjectProperty(None)
'''Link to the panel that the tab strip is a part of.
:data:`tabbed_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None .
'''
class StripLayout(GridLayout):
''' The main layout that is used to house the entire tabbedpanel strip
including the blank areas in case the tabs don't cover the entire
width/height.
.. versionadded:: 1.8.0
'''
border = ListProperty([4, 4, 4, 4])
'''Border property for the :data:`background_image`.
:data:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to [4, 4, 4, 4]
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image to be used for the Strip layout of the TabbedPanel.
:data:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to a transparent image.
'''
class TabbedPanelContent(FloatLayout):
'''The TabbedPanelContent class.
'''
pass
class TabbedPanel(GridLayout):
'''The TabbedPanel class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:data:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction, used itself for :data:`background_image`.
Can be changed for a custom background.
It must be a list of four values: (top, right, bottom, left). Read the
BorderImage instructions for more information.
:data:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (16, 16, 16, 16)
'''
background_image = StringProperty('atlas://data/images/defaulttheme/tab')
'''Background image of the main shared content object.
:data:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/tab'.
'''
background_disabled_image = StringProperty(
'atlas://data/images/defaulttheme/tab_disabled')
'''Background image of the main shared content object when disabled.
.. versionadded:: 1.8.0
:data:`background_disabled_image` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/tab'.
'''
strip_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image of the tabbed strip.
.. versionadded:: 1.8.0
:data:`strip_image` is a :class:`~kivy.properties.StringProperty` and defaults
to a empty image.
'''
strip_border = ListProperty([4, 4, 4, 4])
'''Border to be used on :data:`strip_image`.
.. versionadded:: 1.8.0
:data:`strip_border` is a :class:`~kivy.properties.ListProperty` and defaults
to [4, 4, 4, 4].
'''
_current_tab = ObjectProperty(None)
def get_current_tab(self):
return self._current_tab
current_tab = AliasProperty(get_current_tab, None, bind=('_current_tab', ))
'''Links to the currently selected or active tab.
.. versionadded:: 1.4.0
:data:`current_tab` is an :class:`~kivy.AliasProperty`, read-only.
'''
tab_pos = OptionProperty(
'top_left',
options=('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the tabs relative to the content.
Can be one of: `left_top`, `left_mid`, `left_bottom`, `top_left`,
`top_mid`, `top_right`, `right_top`, `right_mid`, `right_bottom`,
`bottom_left`, `bottom_mid`, `bottom_right`.
:data:`tab_pos` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
tab_height = NumericProperty('40dp')
'''Specifies the height of the tab header.
:data:`tab_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 40.
'''
tab_width = NumericProperty('100dp', allownone=True)
'''Specifies the width of the tab header.
:data:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
do_default_tab = BooleanProperty(True)
'''Specifies whether a default_tab head is provided.
.. versionadded:: 1.5.0
:data:`do_default_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to 'True'.
'''
default_tab_text = StringProperty('Default tab')
'''Specifies the text displayed on the default tab header.
:data:`default_tab_text` is a :class:`~kivy.properties.StringProperty` and
defaults to 'default tab'.
'''
default_tab_cls = ObjectProperty(TabbedPanelHeader)
'''Specifies the class to use for the styling of the default tab.
.. versionadded:: 1.4.0
.. warning::
`default_tab_cls` should be subclassed from `TabbedPanelHeader`
:data:`default_tab_cls` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `TabbedPanelHeader`.
'''
def get_tab_list(self):
if self._tab_strip:
return self._tab_strip.children
return 1.
tab_list = AliasProperty(get_tab_list, None)
'''List of all the tab headers.
:data:`tab_list` is an :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
content = ObjectProperty(None)
'''This is the object holding (current_tab's content is added to this)
the content of the current tab. To Listen to the changes in the content
of the current tab, you should bind to current_tabs `content` property.
:data:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
_default_tab = ObjectProperty(None, allow_none=True)
def get_def_tab(self):
return self._default_tab
def set_def_tab(self, new_tab):
if not issubclass(new_tab.__class__, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
if self._default_tab == new_tab:
return
oltab = self._default_tab
self._default_tab = new_tab
self.remove_widget(oltab)
self._original_tab = None
self.switch_to(new_tab)
new_tab.state = 'down'
default_tab = AliasProperty(get_def_tab, set_def_tab,
bind=('_default_tab', ))
'''Holds the default tab.
.. Note:: For convenience, the automatically provided default tab is
deleted when you change default_tab to something else.
As of 1.5.0, this behaviour has been extended to every
`default_tab` for consistency and not just the automatically
provided one.
:data:`default_tab` is an :class:`~kivy.properties.AliasProperty`.
'''
def get_def_tab_content(self):
return self.default_tab.content
def set_def_tab_content(self, *l):
self.default_tab.content = l[0]
default_tab_content = AliasProperty(get_def_tab_content,
set_def_tab_content)
'''Holds the default tab content.
:data:`default_tab_content` is an :class:`~kivy.properties.AliasProperty`.
'''
def __init__(self, **kwargs):
# these variables need to be initialized before the kv lang is
# processed setup the base layout for the tabbed panel
self._childrens = []
self._tab_layout = StripLayout(rows=1)
self.rows = 1
self._tab_strip = TabbedPanelStrip(
tabbed_panel=self,
rows=1, cols=99, size_hint=(None, None),
height=self.tab_height, width=self.tab_width)
self._partial_update_scrollview = None
self.content = TabbedPanelContent()
self._current_tab = self._original_tab \
= self._default_tab = TabbedPanelHeader()
super(TabbedPanel, self).__init__(**kwargs)
self.bind(size=self._reposition_tabs)
if not self.do_default_tab:
Clock.schedule_once(self._switch_to_first_tab)
return
self._setup_default_tab()
self.switch_to(self.default_tab)
def switch_to(self, header):
'''Switch to a specific panel header.
'''
header_content = header.content
self._current_tab.state = 'normal'
header.state = 'down'
self._current_tab = header
self.clear_widgets()
if header_content is None:
return
# if content has a previous parent remove it from that parent
parent = header_content.parent
if parent:
parent.remove_widget(header_content)
self.add_widget(header_content)
def clear_tabs(self, *l):
self_tabs = self._tab_strip
self_tabs.clear_widgets()
if self.do_default_tab:
self_default_tab = self._default_tab
self_tabs.add_widget(self_default_tab)
self_tabs.width = self_default_tab.width
self._reposition_tabs()
def add_widget(self, widget, index=0):
content = self.content
if content is None:
return
parent = widget.parent
if parent:
parent.remove_widget(widget)
if widget in (content, self._tab_layout):
super(TabbedPanel, self).add_widget(widget, index)
elif isinstance(widget, TabbedPanelHeader):
self_tabs = self._tab_strip
self_tabs.add_widget(widget)
widget.group = '__tab%r__' % self_tabs.uid
self.on_tab_width()
else:
widget.pos_hint = {'x': 0, 'top': 1}
self._childrens.append(widget)
content.disabled = self.current_tab.disabled
content.add_widget(widget, index)
def remove_widget(self, widget):
content = self.content
if content is None:
return
if widget in (content, self._tab_layout):
super(TabbedPanel, self).remove_widget(widget)
elif isinstance(widget, TabbedPanelHeader):
if not (self.do_default_tab and widget is self._default_tab):
self_tabs = self._tab_strip
self_tabs.width -= widget.width
self_tabs.remove_widget(widget)
if widget.state == 'down' and self.do_default_tab:
self._default_tab.on_release()
self._reposition_tabs()
else:
Logger.info('TabbedPanel: default tab! can\'t be removed.\n' +
'Change `default_tab` to a different tab.')
else:
self._childrens.pop(widget, None)
if widget in content.children:
content.remove_widget(widget)
def clear_widgets(self, **kwargs):
content = self.content
if content is None:
return
if kwargs.get('do_super', False):
super(TabbedPanel, self).clear_widgets()
else:
content.clear_widgets()
def on_strip_image(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.background_image = value
def on_strip_border(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.border = value
def on_do_default_tab(self, instance, value):
if not value:
dft = self.default_tab
if dft in self.tab_list:
self.remove_widget(dft)
self._switch_to_first_tab()
self._default_tab = self._current_tab
else:
self._current_tab.state = 'normal'
self._setup_default_tab()
def on_default_tab_text(self, *args):
self._default_tab.text = self.default_tab_text
def on_tab_width(self, *l):
Clock.unschedule(self._update_tab_width)
Clock.schedule_once(self._update_tab_width, 0)
def on_tab_height(self, *l):
self._tab_layout.height = self._tab_strip.height = self.tab_height
self._reposition_tabs()
def on_tab_pos(self, *l):
# ensure canvas
self._reposition_tabs()
def _setup_default_tab(self):
if self._default_tab in self.tab_list:
return
content = self._default_tab.content
_tabs = self._tab_strip
cls = self.default_tab_cls
if not issubclass(cls, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
# no need to instanciate if class is TabbedPanelHeader
if cls != TabbedPanelHeader:
self._current_tab = self._original_tab = self._default_tab = cls()
default_tab = self.default_tab
if self._original_tab == self.default_tab:
default_tab.text = self.default_tab_text
default_tab.height = self.tab_height
default_tab.group = '__tab%r__' % _tabs.uid
default_tab.state = 'down'
default_tab.width = self.tab_width if self.tab_width else 100
default_tab.content = content
tl = self.tab_list
if default_tab not in tl:
_tabs.add_widget(default_tab, len(tl))
if default_tab.content:
self.clear_widgets()
self.add_widget(self.default_tab.content)
else:
Clock.schedule_once(self._load_default_tab_content)
self._current_tab = default_tab
def _switch_to_first_tab(self, *l):
ltl = len(self.tab_list) - 1
if ltl > -1:
self._current_tab = dt = self._original_tab \
= self.tab_list[ltl]
self.switch_to(dt)
def _load_default_tab_content(self, dt):
if self.default_tab:
self.switch_to(self.default_tab)
def _reposition_tabs(self, *l):
Clock.unschedule(self._update_tabs)
Clock.schedule_once(self._update_tabs, 0)
def _update_tabs(self, *l):
self_content = self.content
if not self_content:
return
# cache variables for faster access
tab_pos = self.tab_pos
tab_layout = self._tab_layout
tab_layout.clear_widgets()
scrl_v = ScrollView(size_hint=(None, 1))
tabs = self._tab_strip
parent = tabs.parent
if parent:
parent.remove_widget(tabs)
scrl_v.add_widget(tabs)
scrl_v.pos = (0, 0)
self_update_scrollview = self._update_scrollview
# update scrlv width when tab width changes depends on tab_pos
if self._partial_update_scrollview is not None:
tabs.unbind(width=self._partial_update_scrollview)
self._partial_update_scrollview = partial(
self_update_scrollview, scrl_v)
tabs.bind(width=self._partial_update_scrollview)
# remove all widgets from the tab_strip
self.clear_widgets(do_super=True)
tab_height = self.tab_height
widget_list = []
tab_list = []
pos_letter = tab_pos[0]
if pos_letter == 'b' or pos_letter == 't':
# bottom or top positions
# one col containing the tab_strip and the content
self.cols = 1
self.rows = 2
# tab_layout contains the scrollview containing tabs and two blank
# dummy widgets for spacing
tab_layout.rows = 1
tab_layout.cols = 3
tab_layout.size_hint = (1, None)
tab_layout.height = tab_height + tab_layout.padding[1] +\
tab_layout.padding[3] + dp(2)
self_update_scrollview(scrl_v)
if pos_letter == 'b':
# bottom
if tab_pos == 'bottom_mid':
tab_list = (Widget(), scrl_v, Widget())
widget_list = (self_content, tab_layout)
else:
if tab_pos == 'bottom_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'bottom_right':
#add two dummy widgets
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (self_content, tab_layout)
else:
# top
if tab_pos == 'top_mid':
tab_list = (Widget(), scrl_v, Widget())
elif tab_pos == 'top_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'top_right':
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (tab_layout, self_content)
elif pos_letter == 'l' or pos_letter == 'r':
# left ot right positions
# one row containing the tab_strip and the content
self.cols = 2
self.rows = 1
# tab_layout contains two blank dummy widgets for spacing
# "vertically" and the scatter containing scrollview
# containing tabs
tab_layout.rows = 3
tab_layout.cols = 1
tab_layout.size_hint = (None, 1)
tab_layout.width = tab_height
scrl_v.height = tab_height
self_update_scrollview(scrl_v)
# rotate the scatter for vertical positions
rotation = 90 if tab_pos[0] == 'l' else -90
sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
auto_bring_to_front=False,
size=scrl_v.size)
sctr.add_widget(scrl_v)
lentab_pos = len(tab_pos)
# Update scatter's top when it's pos changes.
# Needed for repositioning scatter to the correct place after its
# added to the parent. Use clock_schedule_once to ensure top is
# calculated after the parent's pos on canvas has been calculated.
# This is needed for when tab_pos changes to correctly position
# scatter. Without clock.schedule_once the positions would look
# fine but touch won't translate to the correct position
if tab_pos[lentab_pos - 4:] == '_top':
#on positions 'left_top' and 'right_top'
sctr.bind(pos=partial(self._update_top, sctr, 'top', None))
tab_list = (sctr, )
elif tab_pos[lentab_pos - 4:] == '_mid':
#calculate top of scatter
sctr.bind(pos=partial(self._update_top, sctr, 'mid',
scrl_v.width))
tab_list = (Widget(), sctr, Widget())
elif tab_pos[lentab_pos - 7:] == '_bottom':
tab_list = (Widget(), Widget(), sctr)
if pos_letter == 'l':
widget_list = (tab_layout, self_content)
else:
widget_list = (self_content, tab_layout)
# add widgets to tab_layout
add = tab_layout.add_widget
for widg in tab_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_tab_width(self, *l):
if self.tab_width:
for tab in self.tab_list:
tab.size_hint_x = 1
tsw = self.tab_width * len(self._tab_strip.children)
else:
# tab_width = None
tsw = 0
for tab in self.tab_list:
if tab.size_hint_x:
# size_hint_x: x/.xyz
tab.size_hint_x = 1
#drop to default tab_width
tsw += 100
else:
# size_hint_x: None
tsw += tab.width
self._tab_strip.width = tsw
self._reposition_tabs()
def _update_top(self, *args):
sctr, top, scrl_v_width, x, y = args
Clock.unschedule(partial(self._updt_top, sctr, top, scrl_v_width))
Clock.schedule_once(
partial(self._updt_top, sctr, top, scrl_v_width), 0)
def _updt_top(self, sctr, top, scrl_v_width, *args):
if top[0] == 't':
sctr.top = self.top
else:
sctr.top = self.top - (self.height - scrl_v_width) / 2
def _update_scrollview(self, scrl_v, *l):
self_tab_pos = self.tab_pos
self_tabs = self._tab_strip
if self_tab_pos[0] == 'b' or self_tab_pos[0] == 't':
#bottom or top
scrl_v.width = min(self.width, self_tabs.width)
#required for situations when scrl_v's pos is calculated
#when it has no parent
scrl_v.top += 1
scrl_v.top -= 1
else:
# left or right
scrl_v.width = min(self.height, self_tabs.width)
self_tabs.pos = (0, 0)
|
{
"content_hash": "6f338341c17fc54639519468d818fdf9",
"timestamp": "",
"source": "github",
"line_count": 823,
"max_line_length": 82,
"avg_line_length": 34.72904009720535,
"alnum_prop": 0.6006927436848366,
"repo_name": "hansent/kivy",
"id": "59b0dc9b96e24245595a65d53ae34d7aa1097f5f",
"size": "28582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy/uix/tabbedpanel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "153750"
},
{
"name": "CSS",
"bytes": "6827"
},
{
"name": "Emacs Lisp",
"bytes": "9603"
},
{
"name": "F#",
"bytes": "289"
},
{
"name": "JavaScript",
"bytes": "11300"
},
{
"name": "Python",
"bytes": "2900209"
},
{
"name": "Shell",
"bytes": "6236"
},
{
"name": "TeX",
"bytes": "4271"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
"""
Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
from sqlalchemy import pool
from sqlalchemy.orm import sessionmaker
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _ENGINE
global _MAKER
if not _MAKER:
if not _ENGINE:
kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
'echo': False}
if FLAGS.sql_connection.startswith('sqlite'):
kwargs['poolclass'] = pool.NullPool
_ENGINE = create_engine(FLAGS.sql_connection,
**kwargs)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
session.query = exception.wrap_db_error(session.query)
session.flush = exception.wrap_db_error(session.flush)
return session
|
{
"content_hash": "1ed2ed60c2ef08894011e6822f18beec",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 67,
"avg_line_length": 28.394736842105264,
"alnum_prop": 0.6070435588507878,
"repo_name": "superstack/nova",
"id": "4a9a28f4300981850a86db30330745b633e480a6",
"size": "1855",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2491049"
},
{
"name": "Shell",
"bytes": "31698"
}
],
"symlink_target": ""
}
|
"""End-to-end test for the streaming wordcount example with debug."""
# pytype: skip-file
import logging
import unittest
import uuid
import pytest
from hamcrest.core.core.allof import all_of
from apache_beam.examples import streaming_wordcount_debugging
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.runners.runner import PipelineState
from apache_beam.testing import test_utils
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
INPUT_TOPIC = 'wc_topic_input'
OUTPUT_TOPIC = 'wc_topic_output'
INPUT_SUB = 'wc_subscription_input'
OUTPUT_SUB = 'wc_subscription_output'
SAMPLE_MESSAGES = [
'150', '151', '152', '153', '154', '210', '211', '212', '213', '214'
]
EXPECTED_MESSAGE = [
'150: 1',
'151: 1',
'152: 1',
'153: 1',
'154: 1',
'210: 1',
'211: 1',
'212: 1',
'213: 1',
'214: 1'
]
WAIT_UNTIL_FINISH_DURATION = 6 * 60 * 1000 # in milliseconds
class StreamingWordcountDebuggingIT(unittest.TestCase):
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.project = self.test_pipeline.get_option('project')
self.setup_pubsub()
def setup_pubsub(self):
self.uuid = str(uuid.uuid4())
# Set up PubSub environment.
from google.cloud import pubsub
self.pub_client = pubsub.PublisherClient()
self.input_topic = self.pub_client.create_topic(
name=self.pub_client.topic_path(self.project, INPUT_TOPIC + self.uuid))
self.output_topic = self.pub_client.create_topic(
name=self.pub_client.topic_path(self.project, OUTPUT_TOPIC + self.uuid))
self.sub_client = pubsub.SubscriberClient()
self.input_sub = self.sub_client.create_subscription(
name=self.sub_client.subscription_path(
self.project, INPUT_SUB + self.uuid),
topic=self.input_topic.name)
self.output_sub = self.sub_client.create_subscription(
name=self.sub_client.subscription_path(
self.project, OUTPUT_SUB + self.uuid),
topic=self.output_topic.name,
ack_deadline_seconds=60)
def _inject_data(self, topic, data):
"""Inject numbers as test data to PubSub."""
logging.debug('Injecting test data to topic %s', topic.name)
for n in data:
self.pub_client.publish(self.input_topic.name, str(n).encode('utf-8'))
def tearDown(self):
test_utils.cleanup_subscriptions(
self.sub_client, [self.input_sub, self.output_sub])
test_utils.cleanup_topics(
self.pub_client, [self.input_topic, self.output_topic])
@pytest.mark.it_postcommit
@unittest.skip(
"Skipped due to [BEAM-3377]: assert_that not working for streaming")
def test_streaming_wordcount_debugging_it(self):
# Set extra options to the pipeline for test purpose
state_verifier = PipelineStateMatcher(PipelineState.RUNNING)
pubsub_msg_verifier = PubSubMessageMatcher(
self.project, self.output_sub.name, EXPECTED_MESSAGE, timeout=400)
extra_opts = {
'input_subscription': self.input_sub.name,
'output_topic': self.output_topic.name,
'wait_until_finish_duration': WAIT_UNTIL_FINISH_DURATION,
'on_success_matcher': all_of(state_verifier, pubsub_msg_verifier)
}
# Generate input data and inject to PubSub.
self._inject_data(self.input_topic, SAMPLE_MESSAGES)
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
streaming_wordcount_debugging.run(
self.test_pipeline.get_full_options_as_args(**extra_opts),
save_main_session=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "4d305ea734b21e8d8367293d6e15088d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 80,
"avg_line_length": 34.41818181818182,
"alnum_prop": 0.6925515055467512,
"repo_name": "robertwb/incubator-beam",
"id": "b0c041c05ca0cbb6565c4296f7971ad4e6d7d1ef",
"size": "4571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/streaming_wordcount_debugging_it_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "59582"
},
{
"name": "Dart",
"bytes": "541526"
},
{
"name": "Dockerfile",
"bytes": "48191"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "4688736"
},
{
"name": "Groovy",
"bytes": "888171"
},
{
"name": "HCL",
"bytes": "101646"
},
{
"name": "HTML",
"bytes": "164685"
},
{
"name": "Java",
"bytes": "38649211"
},
{
"name": "JavaScript",
"bytes": "105966"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "209531"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "9785295"
},
{
"name": "SCSS",
"bytes": "312814"
},
{
"name": "Sass",
"bytes": "19336"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "336583"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "181369"
}
],
"symlink_target": ""
}
|
from django.forms.fields import BooleanField
from django.test.client import RequestFactory
from django.utils.safestring import SafeText
from django.utils.translation import ugettext_lazy as _
import mock
from nose.tools import eq_, ok_
import mkt
import mkt.site.tests
from mkt.comm.models import CommunicationNote
from mkt.constants.features import APP_FEATURES
from mkt.developers.models import AppLog
from mkt.files.models import FileUpload
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.tests import user_factory
from mkt.submit import forms
from mkt.users.models import UserProfile
from mkt.webapps.models import AppFeatures, Webapp
class TestNewWebappForm(mkt.site.tests.TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.request.user = user_factory()
self.file = FileUpload.objects.create(valid=True)
self.file.user = self.request.user
self.file.save()
def test_no_user(self):
self.file.user = None
self.file.save()
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid},
request=self.request)
assert not form.is_valid()
eq_(form.ERRORS['user'], form.errors['upload'])
def test_correct_user(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid},
request=self.request)
assert form.is_valid(), form.errors
def test_incorrect_user(self):
self.file.user = user_factory()
self.file.save()
form = forms.NewWebappForm({'upload': self.file.uuid},
request=self.request)
assert not form.is_valid()
eq_(form.ERRORS['user'], form.errors['upload'])
def test_not_packaged(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid})
assert form.is_valid(), form.errors
assert not form.is_packaged()
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {'version': None})
def test_packaged_allowed_everywhere(self):
for device in ('free-firefoxos',
'free-desktop',
'free-android-tablet',
'free-android-mobile'):
form = forms.NewWebappForm({'free_platforms': [device],
'upload': self.file.uuid,
'packaged': True},
request=self.request)
assert form.is_valid(), form.errors
assert form.is_packaged()
class TestNewWebappVersionForm(mkt.site.tests.TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.file = FileUpload.objects.create(valid=True)
def test_no_upload(self):
form = forms.NewWebappVersionForm(request=self.request,
is_packaged=True)
assert not form.is_valid(), form.errors
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_verify_app_domain_called(self, _verify):
self.create_switch('webapps-unique-by-domain')
form = forms.NewWebappVersionForm({'upload': self.file.uuid},
request=self.request,
is_packaged=True)
assert form.is_valid(), form.errors
assert _verify.called
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
def test_verify_app_domain_exclude_same(self):
app = mkt.site.tests.app_factory(app_domain='app://hy.fr')
form = forms.NewWebappVersionForm(
{'upload': self.file.uuid}, request=self.request, is_packaged=True,
addon=app)
assert form.is_valid(), form.errors
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
def test_verify_app_domain_exclude_different(self):
app = mkt.site.tests.app_factory(app_domain='app://yo.lo')
mkt.site.tests.app_factory(app_domain='app://hy.fr')
form = forms.NewWebappVersionForm(
{'upload': self.file.uuid}, request=self.request, is_packaged=True,
addon=app)
assert not form.is_valid(), form.errors
assert ('An app already exists on this domain; '
'only one app per domain is allowed.' in form.errors['upload'])
class TestAppDetailsBasicForm(mkt.site.tests.TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.request = mock.Mock()
self.request.user = UserProfile.objects.get(id=999)
self.request.groups = ()
def get_app(self):
return Webapp.objects.get(pk=337141)
def get_data(self, **kwargs):
default = {
'app_slug': 'thisIsAslug',
'description': '...',
'privacy_policy': '...',
'support_email': 'test@example.com',
'notes': '',
'publish_type': mkt.PUBLISH_IMMEDIATE,
}
default.update(kwargs)
return default
def test_slug(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(self.get_data(), request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.app_slug, 'thisisaslug')
def test_comm_thread(self):
app = self.get_app()
note_body = 'please approve this app'
form = forms.AppDetailsBasicForm(self.get_data(notes=note_body),
request=self.request, instance=app)
assert form.is_valid(), form.errors
form.save()
notes = CommunicationNote.objects.all()
eq_(notes.count(), 1)
eq_(notes[0].body, note_body)
def test_publish_type(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(publish_type=mkt.PUBLISH_PRIVATE),
request=self.request, instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.publish_type, mkt.PUBLISH_PRIVATE)
def test_help_text_uses_safetext_and_includes_url(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(publish_type=mkt.PUBLISH_PRIVATE),
request=self.request, instance=app)
help_text = form.base_fields['privacy_policy'].help_text
eq_(type(help_text), SafeText)
ok_('{url}' not in help_text)
ok_(form.PRIVACY_MDN_URL in help_text)
def test_is_offline_guess_false(self):
app = self.get_app()
app.guess_is_offline = lambda: False
assert not app.is_offline
forms.AppDetailsBasicForm(
self.get_data(),
request=self.request,
instance=app)
assert not app.is_offline
def test_is_offline_guess_false_override(self):
app = self.get_app()
app.guess_is_offline = lambda: False
form = forms.AppDetailsBasicForm(
self.get_data(is_offline=True),
request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.is_offline, True)
def test_is_offline_guess_true(self):
app = self.get_app()
app.guess_is_offline = lambda: True
assert not app.is_offline
forms.AppDetailsBasicForm(
self.get_data(is_offline=None),
request=self.request,
instance=app)
assert app.is_offline
def test_is_offline_guess_true_override(self):
app = self.get_app()
app.guess_is_offline = lambda: True
form = forms.AppDetailsBasicForm(
self.get_data(is_offline=False),
request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.is_offline, False)
def test_tags(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(tags='card games, poker'), request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.tags.count(), 2)
self.assertSetEqual(
app.tags.values_list('tag_text', flat=True),
['card games', 'poker'])
class TestAppFeaturesForm(mkt.site.tests.TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
mkt.set_user(UserProfile.objects.all()[0])
self.form = forms.AppFeaturesForm()
self.app = Webapp.objects.get(pk=337141)
self.features = self.app.current_version.features
def _check_log(self, action):
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
"Didn't find `%s` action in logs." % action.short)
def test_required(self):
f_names = self.form.fields.keys()
for value in (True, False):
form = forms.AppFeaturesForm(dict((n, value) for n in f_names))
eq_(form.is_valid(), True, form.errors)
def test_correct_fields(self):
fields = self.form.fields
f_values = fields.values()
assert 'version' not in fields
assert all(isinstance(f, BooleanField) for f in f_values)
self.assertSetEqual(fields, AppFeatures()._fields())
def test_required_api_fields(self):
fields = [f.help_text for f in self.form.required_api_fields()]
eq_(fields, sorted(f['name'] for f in APP_FEATURES.values()))
def test_required_api_fields_nonascii(self):
forms.AppFeaturesForm.base_fields['has_apps'].help_text = _(
u'H\xe9llo')
fields = [f.help_text for f in self.form.required_api_fields()]
eq_(fields, sorted(f['name'] for f in APP_FEATURES.values()))
def test_changes_mark_for_rereview(self):
self.features.update(has_sms=True)
data = {'has_apps': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save()
ok_(self.features.has_apps)
ok_(not self.features.has_sms)
ok_(not self.features.has_contacts)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert AppLog.objects.filter(addon=self.app,
activity_log__action=action_id).exists()
eq_(RereviewQueue.objects.count(), 1)
def test_no_changes_not_marked_for_rereview(self):
self.features.update(has_sms=True)
data = {'has_sms': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save()
ok_(not self.features.has_apps)
ok_(self.features.has_sms)
eq_(RereviewQueue.objects.count(), 0)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert not AppLog.objects.filter(
addon=self.app,
activity_log__action=action_id).exists()
def test_changes_mark_for_rereview_bypass(self):
self.features.update(has_sms=True)
data = {'has_apps': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save(mark_for_rereview=False)
ok_(self.features.has_apps)
ok_(not self.features.has_sms)
eq_(RereviewQueue.objects.count(), 0)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert not AppLog.objects.filter(
addon=self.app,
activity_log__action=action_id).exists()
|
{
"content_hash": "f887514f2501607a717c5685f0f7f763",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 79,
"avg_line_length": 38.47909967845659,
"alnum_prop": 0.5912091585192613,
"repo_name": "jasonthomas/zamboni",
"id": "d1e5824a066af97cf6a7356cc042282548b74888",
"size": "11967",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mkt/submit/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354193"
},
{
"name": "HTML",
"bytes": "2313765"
},
{
"name": "JavaScript",
"bytes": "529996"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4386929"
},
{
"name": "Shell",
"bytes": "10771"
},
{
"name": "Smarty",
"bytes": "1086"
}
],
"symlink_target": ""
}
|
import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas import DOM
from pyjamas.ui.Anchor import Anchor
from pyjamas.ui.Hyperlink import Hyperlink
from pyjamas import Window
from pyjamas.ui.HTML import HTML
from pyjamas.ui.HTMLPanel import HTMLPanel
from pyjamas.ui.Label import Label
from pyjamas.ui.Image import Image
from pyjamas.ui.HorizontalPanel import HorizontalPanel
def onClick(sender):
Window.alert('Make service request using %s'%sender.getID())
if __name__ == '__main__':
pyjd.setup("public/Anchor.html")
# EXAMPLE 1
a1 = Anchor(Widget = HTML('Test 1: Anchor to external site using HTML widget.'), Href='http://pyjs.org', Title = 'Test1')
RootPanel().add(a1)
# EXAMPLE 2
label = Label(text = 'Test 2: Click listener added to a label.')
label.addClickListener(onClick)
RootPanel().add(label)
# EXAMPLE 3
a2 = Hyperlink(text = 'Hyperlink', Element = DOM.createSpan())
a2.setID('param1')
a2.addClickListener(onClick)
html2=HTMLPanel("Test 3: <span id ='t3'></span> added to HTMLPanel with click listener.")
html2.add(a2, "t3")
RootPanel().add(html2)
# EXAMPLE 4
hpanel = HorizontalPanel()
hpanel.append(HTML('Test 4: Anchor to external site using Image widget'))
a3 = Anchor(Widget = Image('http://pyjs.org/assets/images/pyjs.128x128.png'), Href='http://pyjs.org', Title = 'Test4')
hpanel.append(a3)
RootPanel().add(hpanel)
# EXAMPLE 5
serverXml = \
"""
<html>
<head>
<title>Example 5</title>
</head>
<body>
<p>Test 5: Processes server html and insert click listeners into links:
<span id='link1' class = 'wikilink'>link 1</span> and <span id='link2' class = 'wikilink'>link 2</span>.
</p>
</body>
</html>
"""
html3 = HTMLPanel(serverXml)
links = list()
for elem in html3.findTags('span'):
if DOM.getElemAttribute(elem, 'class') == 'wikilink':
linkClass = DOM.getElemAttribute(elem, 'class')
links.append(elem)
if len(links) > 0:
parent = DOM.getParent(links[0])
for link in links:
linkId = DOM.getElemAttribute(link, 'id')
linkClass = DOM.getElemAttribute(link, 'class')
linkInner = DOM.getInnerHTML(link)
a3 = Hyperlink(text = linkInner, Element = DOM.createSpan())
a3.addClickListener(onClick)
a3.setID('param2')
#todo: modify HTMLPanel to replace an element instead of add
#html3.replace(a3, linkId)
DOM.setInnerHTML(link, '') # clear existing text
html3.add(a3, linkId)
RootPanel().add(html3)
pyjd.run()
|
{
"content_hash": "ddc1055ab06fba83b1c1626e4aa32f84",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 125,
"avg_line_length": 36.465753424657535,
"alnum_prop": 0.6450037565740045,
"repo_name": "pombredanne/pyjs",
"id": "e96303a0201aed63324cd4c0e2e4b539ebf08199",
"size": "2662",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/anchor/Anchor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
}
|
"""
We need this module when storing files in S3 as the S3BotoStorage backend
always uses the bucket name as the root by default, but we don't want to create
two separate buckets for static and media files.
We basically want:
/bucket-name/media/ for media files
/bucket-name/static/ for static files
Source: http://stackoverflow.com/questions/10390244/how-to-set-up-a-django-\
project-with-django-storages-and-amazon-s3-but-with-diff
"""
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
MediaRootS3BotoStorage = lambda: S3BotoStorage(
location=settings.MEDIA_ROOT,
file_overwrite=False
)
StaticRootS3BotoStorage = lambda: S3BotoStorage(
location=settings.STATIC_ROOT
)
|
{
"content_hash": "23884dc2a423b7711a9802466c04a42a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.7696476964769647,
"repo_name": "jcalazan/glucose-tracker",
"id": "ccac8b2538ad3e16392d337e1a849894ed9b72b6",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glucosetracker/core/s3utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52335"
},
{
"name": "HTML",
"bytes": "1674117"
},
{
"name": "JavaScript",
"bytes": "349783"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "208166"
},
{
"name": "Ruby",
"bytes": "1056"
},
{
"name": "Shell",
"bytes": "3145"
}
],
"symlink_target": ""
}
|
import os
import logging
import urllib.request
DOWNDIR = os.path.join(os.path.dirname(__file__), 'downloadedpapers')
URL = {
'bg-8-515-2011.pdf': 'https://www.biogeosciences.net/8/515/2011/bg-8-515-2011.pdf',
'esd-4-11-2013.pdf': 'https://www.earth-syst-dynam.net/4/11/2013/esd-4-11-2013.pdf',
'esd-4-11-2013-supplement.pdf': 'https://www.earth-syst-dynam.net/4/11/2013/esd-4-11-2013-supplement.pdf',
}
def _downloadpdf(url, filename, overwrite=False):
if os.path.exists(filename) and not overwrite:
logging.info(filename+' already present')
return
direc = os.path.dirname(filename)
if direc and not os.path.exists(direc):
os.makedirs(direc)
print('download',url,'to',filename)
response = urllib.request.urlopen(url)
resp = response.read()
with open(filename, 'wb') as f:
f.write(resp)
def downloadpdf(pdf):
fname = os.path.join(DOWNDIR, pdf)
url = URL[pdf]
_downloadpdf(url, fname)
return fname
def downloadall():
for pdf in URL:
downloadpdf(pdf)
if __name__ == '__main__':
downloadall()
|
{
"content_hash": "46acdaf8204c818585ee25749cde833c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 110,
"avg_line_length": 23.595744680851062,
"alnum_prop": 0.648331830477908,
"repo_name": "perrette/myref",
"id": "86b60ff9c97f9c8e1ca2ca34680bf9750dc503b0",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "314500"
}
],
"symlink_target": ""
}
|
"""Functionality for inspecting jax tracers."""
from .. import errors
import jax
def current_trace():
"""Returns the innermost Jax tracer."""
return jax.core.find_top_trace(())
def trace_level(main):
"""Returns the level of the trace of -infinity if it is None."""
if main:
return main.level
return float('-inf')
def check_trace_level(base_level):
level = trace_level(current_trace())
if level != base_level:
raise errors.JaxTransformError()
|
{
"content_hash": "3907f3f163ff1c3c937d57a9f2fa3ed5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 21.40909090909091,
"alnum_prop": 0.6878980891719745,
"repo_name": "google/flax",
"id": "db35e43e27f62b1ae8e753c01611da0f7b489eac",
"size": "1053",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "flax/core/tracers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2643"
},
{
"name": "Jupyter Notebook",
"bytes": "12612"
},
{
"name": "Python",
"bytes": "956526"
},
{
"name": "Shell",
"bytes": "3995"
}
],
"symlink_target": ""
}
|
import supriya
def test_unaggregated_anonymous(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build()
assert synthdef not in server
with server.osc_io.capture() as transcript:
synthdef.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(5, synthdef.compile())
]
with server.osc_io.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(53, synthdef.anonymous_name)
]
def test_unaggregated_named(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build(name="test-synthdef")
assert synthdef not in server
with server.osc_io.capture() as transcript:
synthdef.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(5, synthdef.compile())
]
with server.osc_io.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(53, synthdef.name)
]
def test_aggregated_anonymous(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build()
assert synthdef not in server
synth_a = supriya.Synth(synthdef=synthdef, frequency=666)
synth_b = supriya.Synth(synthdef=synthdef, frequency=777)
synth_c = supriya.Synth(synthdef=synthdef, frequency=888)
# allocate synthdef on node allocation
with server.osc_io.capture() as transcript:
synth_a.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
5,
synthdef.compile(),
supriya.osc.OscMessage(
9, synthdef.anonymous_name, 1000, 0, 1, "frequency", 666.0
),
)
]
# don't need to re-allocate
with server.osc_io.capture() as transcript:
synth_b.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
9, synthdef.anonymous_name, 1001, 0, 1, "frequency", 777.0
)
]
# just free the synthdef
with server.osc_io.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(53, synthdef.anonymous_name)
]
# allocate synthdef (again)n on node allocation
with server.osc_io.capture() as transcript:
synth_c.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
5,
synthdef.compile(),
supriya.osc.OscMessage(
9, synthdef.anonymous_name, 1002, 0, 1, "frequency", 888.0
),
)
]
def test_aggregated_named(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build(name="test-synthdef")
assert synthdef not in server
synth_a = supriya.Synth(synthdef=synthdef, frequency=666)
synth_b = supriya.Synth(synthdef=synthdef, frequency=777)
synth_c = supriya.Synth(synthdef=synthdef, frequency=888)
# allocate synthdef on node allocation
with server.osc_io.capture() as transcript:
synth_a.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
5,
synthdef.compile(),
supriya.osc.OscMessage(9, synthdef.name, 1000, 0, 1, "frequency", 666.0),
)
]
# don't need to re-allocate
with server.osc_io.capture() as transcript:
synth_b.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(9, synthdef.name, 1001, 0, 1, "frequency", 777.0)
]
# just free the synthdef
with server.osc_io.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(53, synthdef.name)
]
# allocate synthdef (again)n on node allocation
with server.osc_io.capture() as transcript:
synth_c.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
5,
synthdef.compile(),
supriya.osc.OscMessage(9, synthdef.name, 1002, 0, 1, "frequency", 888.0),
)
]
|
{
"content_hash": "02d025b606022b3a85eae478e941faf3",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 85,
"avg_line_length": 36.90728476821192,
"alnum_prop": 0.6624798133859681,
"repo_name": "Pulgama/supriya",
"id": "ef3ee57528ac71b4d611ab36d01ae74f83483b6c",
"size": "5573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_synthdefs_SynthDef_lifecycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
}
|
"""
Wrapper to be used to apply a fixed delay to any `ChannelModel` by modifying the
'cm-delay' annotation.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-09-06 16:44:24 -0500 (Tue, 06 Sep 2011) $
* $LastChangedRevision: 5121 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins.channel.channelbase import ChannelModel
from wins.packet import ANNO
def _apply_fixed_delay(cm, *args, **kwargs):
"""Internal method to implement `FixedDelay` `ChannelModel` wrapper."""
assert isinstance(cm, ChannelModel)
assert hasattr(cm, '__subclass__')
assert hasattr(cm, '__fixed_delay__')
subclass = cm.__subclass__
delay = cm.__fixed_delay__
r = subclass.apply(cm, *args, **kwargs)
if ANNO.supports(r, 'cm-delay'):
r.delanno('cm-delay')
if ANNO.supported(r) and (delay>0):
r.setanno('cm-delay', delay)
return r
def FixedDelay(model, delay=0):
"""Create a new `ChannelModel` with overloaded `ChannelModel.apply()` method
to modify 'cm-delay' annotation.
:param model: `ChannelModel` class.
:param delay: Fixed delay value [default = 0].
"""
assert issubclass(model, ChannelModel)
clsname = model.__name__
newname = "_FixedDelay_%s"%(clsname)
tracename = "D%s"%(model.tracename)
createclass = "class %s(model):\n\tpass"%(newname)
exec(createclass)
globals()[newname] = eval(newname)
exec("%s.__subclass__ = model"%(newname) )
exec("%s.apply = _apply_fixed_delay"%(newname) )
exec("%s.__fixed_delay__ = delay"%(newname) )
return eval(newname)
|
{
"content_hash": "b96e44a0ea1212a329c36abeaaa5fc7e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 35.3125,
"alnum_prop": 0.6783185840707965,
"repo_name": "reidlindsay/wins",
"id": "7b0fd3c148eca7494202d051bda664e35da31c43",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wins/channel/fixdelay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5653"
},
{
"name": "C++",
"bytes": "51883"
},
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "1193050"
},
{
"name": "Shell",
"bytes": "665341"
}
],
"symlink_target": ""
}
|
"""
Settings for the text item.
"""
from django.conf import settings
# Allow the "cross site" feature in shared content:
FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE = getattr(settings, "FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE", True)
|
{
"content_hash": "b1f69fc46d9154d19d6fc1ee3fdbb118",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 108,
"avg_line_length": 33,
"alnum_prop": 0.7619047619047619,
"repo_name": "ixc/django-fluent-contents",
"id": "f1ddcd34c71f1f4535739192760fd2d30f8075ab",
"size": "231",
"binary": false,
"copies": "2",
"ref": "refs/heads/ixc",
"path": "fluent_contents/plugins/sharedcontent/appsettings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13003"
},
{
"name": "HTML",
"bytes": "33138"
},
{
"name": "JavaScript",
"bytes": "81000"
},
{
"name": "Python",
"bytes": "449106"
}
],
"symlink_target": ""
}
|
{% extends "cmd_target_tmpl.py" %}
|
{
"content_hash": "78d211623f84d77eaab130b9fe515a25",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.6285714285714286,
"repo_name": "grbd/GBD.Build.BlackJack",
"id": "32947778e1bedbb7b9d9557d6412309d86b9b08f",
"size": "35",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generator/templates/cmds/add_executable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "248002"
},
{
"name": "Visual Basic",
"bytes": "46489"
}
],
"symlink_target": ""
}
|
"""`Dependency` provider example."""
import abc
import dataclasses
from dependency_injector import containers, providers
class DbAdapter(metaclass=abc.ABCMeta):
...
class SqliteDbAdapter(DbAdapter):
...
class PostgresDbAdapter(DbAdapter):
...
@dataclasses.dataclass
class UserService:
database: DbAdapter
class Container(containers.DeclarativeContainer):
database = providers.Dependency(instance_of=DbAdapter)
user_service = providers.Factory(
UserService,
database=database,
)
if __name__ == "__main__":
container1 = Container(database=providers.Singleton(SqliteDbAdapter))
container2 = Container(database=providers.Singleton(PostgresDbAdapter))
assert isinstance(container1.user_service().database, SqliteDbAdapter)
assert isinstance(container2.user_service().database, PostgresDbAdapter)
container3 = Container(database=providers.Singleton(object))
container3.user_service() # <-- raises error:
# <object ...> is not an instance of DbAdapter
|
{
"content_hash": "fd28761337b113170ed6853f493fe194",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 23.022222222222222,
"alnum_prop": 0.7306949806949807,
"repo_name": "ets-labs/dependency_injector",
"id": "e53b6dc4705eb4fa3536a773f283e9f7ad317dd1",
"size": "1036",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/providers/dependency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171148"
}
],
"symlink_target": ""
}
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PreferencesV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'locale': 'str'
}
attribute_map = {
'locale': 'locale'
}
def __init__(self, locale=None): # noqa: E501
"""PreferencesV30 - a model defined in Swagger""" # noqa: E501
self._locale = None
self.discriminator = None
if locale is not None:
self.locale = locale
@property
def locale(self):
"""Gets the locale of this PreferencesV30. # noqa: E501
:return: The locale of this PreferencesV30. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this PreferencesV30.
:param locale: The locale of this PreferencesV30. # noqa: E501
:type: str
"""
allowed_values = ["AR", "CS", "DE", "EN", "ES", "FR", "IT", "JA", "KO", "PT", "RU", "ZH_CN", "ZH_TW", "XX"] # noqa: E501
if locale not in allowed_values:
raise ValueError(
"Invalid value for `locale` ({0}), must be one of {1}" # noqa: E501
.format(locale, allowed_values)
)
self._locale = locale
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PreferencesV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreferencesV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "f70b01e7457158c7de13b1c9d5051061",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 129,
"avg_line_length": 29.617391304347827,
"alnum_prop": 0.5325895478567234,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "af813807c3d7eec02b7f204c84dd9e8a67fbe078",
"size": "3423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/preferences_v30.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
"""
This is a simple example where I(Q,E) data is already in the form
of a histogram, reduced from the raw experimental data.
All this script does is to convert I(Q,E) to Density of States
by performing multiphonon correction.
"""
import histogram.hdf as hh, os, numpy as np
# when the system is headless, do not plot
headless = 'DISPLAY' not in os.environ or not os.environ['DISPLAY']
if not headless:
from matplotlib import pyplot as plt
# change into "examples" dir
here = os.path.dirname(__file__) or os.curdir
os.chdir(here)
# load I(Q,E) data
iqehist = hh.load("data/Al-iqe.h5")
# interpolate I(Q, E) data so that the energy axis has "zero" as a bin center
from multiphonon.sqe import interp
newiqe = interp(iqehist, newE = np.arange(-40, 70, 1.))
# save interpolated data just in case we need it later
hh.dump(newiqe, 'data/Al-iqe-interped.h5')
# create processing engine with processing parameters
from multiphonon.backward import sqe2dos
workdir = 'work-Al'
iterdos = sqe2dos.sqe2dos(
newiqe, T=300, Ecutoff=50.,
elastic_E_cutoff=(-10., 7), M=26.98,
C_ms=0.2, Ei=80., workdir=workdir)
# process
for i, dos in enumerate(iterdos):
print("* Iteration", i)
if not headless:
plt.plot(dos.E, dos.I, label='%d' % i)
continue
if not headless:
plt.legend()
plt.show()
print("Intermediate and final results are stored in directory %r" % workdir)
|
{
"content_hash": "ee41f49972b0477388d8295c14aa1452",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 29.78723404255319,
"alnum_prop": 0.7057142857142857,
"repo_name": "sns-chops/multiphonon",
"id": "48740d85cc4c94f56cc6a69682aca4de082ae740",
"size": "1400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/getdos-Al.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "Jupyter Notebook",
"bytes": "151761"
},
{
"name": "Python",
"bytes": "134339"
},
{
"name": "Shell",
"bytes": "102"
},
{
"name": "TeX",
"bytes": "6533"
}
],
"symlink_target": ""
}
|
import socket, os, sys, binascii, time, datetime, threading, logging, importlib
import re
"""
Rcon protocol class.
Used to establish the connection and to send keep-alive packages
Also it provides some default commands, like kickAll, sendChat , lockServer, etc...
The module loader <loadmodule(name, class)> allows the use of the following events:
- OnConnected
- OnReconnected
- OnPlayerConnect
- OnPlayerDisconnect
- OnChat
"""
class Rcon():
Timeout = 60 # When the connection did not received any response after this period
KeepAlive = 30 # KeepAlive must always be lower than Timeout, otherwise the Timeout occurs
ConnectionRetries = 5 # Try to reconnect (at startup) X times and...
ConnectionInterval = 10 # ... try it every 10 seconds. Example (1 + 5 tries X 10 seconds = 60 seconds until server should be up)
"""
constructor: create an instance by passing ip, password and port as arguments
"""
def __init__(self, ip, password, Port):
# constructor parameters
self.ip = ip
self.password = password
self.port = int(Port)
# module instances as dict (to have them loaded only once)
self.__instances = {}
# last timestamp used for checkinh keepalive
self.isExit = False
self.isAuthenticated = False
self.retry = 0
self.lastcmd = ""
# server message receive filters
self.receiveFilter = [
# receive all players
("\n(\d+)\s+(.*?)\s+([0-9]+)\s+([A-z0-9]{32})\(.*?\)\s(.*)", self.__players, True),
# receive missions
("\n(.*\.[A-z0-9_-]+\.pbo)", self.__missions, True),
# when player is connected
("Verified GUID \(([A-Fa-f0-9]+)\) of player #([0-9]+) (.*)", self.__playerConnect, False),
# when player is disconnected
("Player #([0-9]+) (.*?) disconnected", self.__playerDisconnect, False),
# chat messages
("\((\w+)\) (.*?): (.*)", self.__chatMessage, False),
]
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as serr:
print ('Failed to create socket')
logging.error('rconprotocol: Failed to created socket: {}'.format(serr))
sys.exit()
"""
public: load additional modules.
@param string name - module name without .py suffix
@param string cls - class name to create an instance of
"""
def loadmodule(self, name, cls, *args):
if type(self).__name__ == cls:
return self
key = "%s.%s" % (name, cls)
if not key in self.__instances.keys():
mod = importlib.import_module('lib.' + name)
classT = getattr(mod, cls)
clsObj = classT(self, *args)
self.__instances[key] = clsObj
return self.__instances[key]
"""
private: threaded method sending keepAlive messages to the server.
Use the KeepAlive constant to define the interval
"""
def _keepAliveThread(self):
time.sleep(self.KeepAlive)
self.sendCommand(None)
if not self.isExit:
self._keepAliveThread()
"""
private: to calculate the crc (Battleye).
More Info: http://www.battleye.com/downloads/BERConProtocol.txt
"""
def __compute_crc(self, Bytes):
buf = memoryview(Bytes)
crc = binascii.crc32(buf) & 0xffffffff
crc32 = '0x%08x' % crc
return int(crc32[8:10], 16), int(crc32[6:8], 16), int(crc32[4:6], 16), int(crc32[2:4], 16)
"""
public: send individual server commands.
@param string toSendCommand - any valid server command, like "#ban <playerid>"
"""
def sendCommand(self, toSendCommand):
if not self.isAuthenticated:
logging.error('Command failed - Not Authenticated')
return
# request = "B" + "E" + 4 bytes crc check + command
command = bytearray()
command.append(0xFF)
command.append(0x01)
command.append(0x00)
if toSendCommand:
logging.debug('Sending command "{}"'.format(toSendCommand))
command.extend(toSendCommand.encode('utf-8','replace'))
else:
logging.debug('Sending keepAlive package')
self.lastcmd = toSendCommand
request = bytearray(b'BE')
request.extend( self.__compute_crc(command) )
request.extend(command)
self.s.sendto(request ,(self.ip, self.port))
"""
private: send the magic bytes to login as Rcon admin.
More Info: http://www.battleye.com/downloads/BERConProtocol.txt
"""
def _sendLogin(self, passwd):
logging.debug('Sending login information')
# request = "B" + "E" + 4 bytes crc check + command
command = bytearray()
command.append(0xFF)
command.append(0x00)
command.extend(passwd.encode('utf-8','replace'))
request = bytearray(b'BE')
request.extend( self.__compute_crc(command) )
request.extend(command)
return request
"""
private: accept server messages.
More Info: http://www.battleye.com/downloads/BERConProtocol.txt
"""
def _acknowledge(self, Bytes):
command = bytearray()
command.append(0xFF)
command.append(0x02)
command.extend(Bytes)
request = bytearray(b'BE')
request.extend( self.__compute_crc(command) )
request.extend(command)
seqNo = Bytes[0]
logging.info('ACK seq:{}'.format(seqNo))
return request
"""
private: handle all incoming server messages from socket.recvfrom method
@param unknown packet - received package
"""
def __streamReader(self, packet):
# reset the retries if from now one some connection problems occured
self.retry = 0
#ACKNOWLEDGE THE MESSAGE
p = packet[0]
try:
if p[0:2] == b'BE' and self.isAuthenticated:
self.s.sendto(self._acknowledge(p[8:9]), (self.ip, self.port))
except:
pass
# Debug output the complete packet received from server
logging.debug("[Server: %s:%s]: %s" % (self.ip, self.port, packet))
stream = packet[0]
# successfully authenticad packet received
if stream[6:] == b'\xff\x00\x01':
self.s.settimeout( self.Timeout )
logging.info("[Server: %s:%s]: %s" % (self.ip, self.port, "Authenticated"))
# Only do the below if this is the initial connect call
if not self.isAuthenticated:
self.isAuthenticated = True
self.OnConnected()
else:
self.OnReconnected()
return
# when authentication failed, exit the program
if stream[6:] == b'\xff\x00\x00':
logging.error("Not Authenticated")
exit()
# ausume when the last command is empty, its a keepAlive packet
if stream[6:8] == b'\xff\x01' and not self.lastcmd:
logging.info("[Server: %s:%s]: %s" % (self.ip, self.port, "KeepAlive"))
return
# success message from the server for the previous command (or keep alive)
if stream[6:9] == b'\xff\x01' and self.lastcmd:
logging.info("[Server: %s:%s]: %s" % (self.ip, self.port, "ACK " + self.lastcmd))
# all other packages and commands
if len(stream[9:]) > 0:
stream = stream[9:].decode('utf-8', 'replace')
self.__parseResponse(stream)
logging.info("[Server: %s:%s]: %s" % (self.ip, self.port, stream))
def __players(self, pl):
l = []
for m in pl:
l.append( Player(m[0], m[3], m[4]) )
self.OnPlayers(l)
def __missions(self, missions):
self.OnMissions(missions)
def __playerConnect(self, m):
self.OnPlayerConnect( Player(m[1], m[0], m[2]) )
def __playerDisconnect(self, m):
self.OnPlayerDisconnect( Player(m[0], "", m[1]) )
def __chatMessage(self, m):
self.OnChat( ChatMessage( m[0], m[1], m[2]) )
"""
private: parse the incoming message from __streamReader to provide eventing
"""
def __parseResponse(self, msg):
for x in self.receiveFilter:
regex, action, multiline = x
if multiline:
m = re.findall(regex, msg)
if len(m) > 0:
action(m)
break
else:
m = re.search(regex, msg)
if m:
action(m.groups())
break
"""
public: send a chat message to everyone
@param string msg - message text
"""
def sendChat(self, msg, ident = -1):
self.sendCommand("say %s \"%s\"" % (ident,msg))
"""
public: kick all players
"""
def kickAll(self):
logging.info('Kick All player before restart take action')
for i in range(1, 100):
self.sendCommand('kick %s' % (i))
time.sleep(0.05)
"""
public: lock the server (until next restart/unlock). So nobody can join anymore
"""
def lockServer(self):
self.sendCommand('#lock')
time.sleep(1)
"""
Event: when list of players is requested
"""
def OnPlayers(self, playerList):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnPlayers', None)
if func: func(playerList)
"""
Event: when mission files are requested
"""
def OnMissions(self, missionList):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnMissions', None)
if func: func(missionList)
"""
Event: when a player connects to the server
"""
def OnPlayerConnect(self, player):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnPlayerConnect', None)
if func: func(player)
"""
Event: when a player disconnects from the server
"""
def OnPlayerDisconnect(self, player):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnPlayerDisconnect', None)
if func: func(player)
"""
Event: Incoming chat messages
@param ChatMessage chatObj - chat object containing channel and message
"""
def OnChat(self, chatObj):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnChat', None)
if func: func(chatObj)
"""
Event: when program is successfully connected and authenticated to the server.
This can perfectly be used in modules.
"""
def OnConnected(self):
# initialize keepAlive thread
_t = threading.Thread(target=self._keepAliveThread)
_t.daemon = True
_t.start()
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnConnected', None)
if func: func()
"""
Event: when program is successfully reconnected and authenticated to the server.
This can perfectly be used in modules.
"""
def OnReconnected(self):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnReconnected', None)
if func: func()
def OnAbort(self):
for clsObj in self.__instances.values():
func = getattr(clsObj, 'OnAbort', None)
if func: func()
"""
public: cancel all loops (keepAlive and others from modules) and send the final "exit" command to disconnect from server
"""
def Abort(self):
logging.info("Exit loop")
self.isExit = True
self.OnAbort()
def connectAsync(self):
_t = threading.Thread(target=self.connect, name='connectionThread')
_t.daemon = True
_t.start()
"""
public: used to establish the connection to the server giving by constructor call
"""
def connect(self):
try:
self.s.settimeout(self.ConnectionInterval)
#Set the whole string
logging.info('Connecting to {}:{} #{}'.format(self.ip, self.port, self.retry))
self.s.sendto(self._sendLogin(self.password) ,(self.ip, self.port))
# receive data from client (data, addr)
while not self.isExit:
d = self.s.recvfrom(2048) #1024 value crash on players request on full server
self.__streamReader(d)
# Connection timed out
except socket.timeout as et:
logging.error('Socket timeout: {}'.format(et))
if self.retry < self.ConnectionRetries and not self.isExit:
self.retry += 1
self.connect()
else:
self.Abort()
# Some problem sending data ??
except socket.error as e:
logging.error('Socket error: {}'.format(e))
self.Abort()
# Ctrl + C
except (KeyboardInterrupt, SystemExit):
logging.debug('rconprotocol.connect: Keyboard interrupted')
self.Abort()
except:
logging.exception("Unhandled Exception")
self.Abort()
"""
Player class commonly used for events OnPlayerConnect and OnPlayerDisconnect
"""
class Player():
def __init__(self, no, guid, name):
self.number = no
self.guid = guid
self.name = name
self.allowed = False
def Allow(self):
self.allowed = True
def Disallow(self):
self.allowed = False
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
@staticmethod
def fromJSON(i):
o = Player(i['number'], i['guid'], i['name'])
if i['allowed']:
o.Allow()
return o
"""
Chat class commonly used for event OnChat
"""
class ChatMessage():
def __init__(self, channel, sender, message):
self.channel = channel.lower()
self.sender = sender
self.message = message
|
{
"content_hash": "8ff62d8f09cbf912fae85c2cd53b58ec",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 132,
"avg_line_length": 32.1986301369863,
"alnum_prop": 0.5747004183507055,
"repo_name": "Au1st3in/au1st3in.net",
"id": "f4514662afb6bbd06a36971f172820cf31501830",
"size": "14103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py3rcon/rconprotocol.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9477"
},
{
"name": "HTML",
"bytes": "41427"
},
{
"name": "JavaScript",
"bytes": "141"
},
{
"name": "Makefile",
"bytes": "958"
},
{
"name": "Nginx",
"bytes": "683"
},
{
"name": "Python",
"bytes": "91431"
},
{
"name": "Shell",
"bytes": "172"
}
],
"symlink_target": ""
}
|
import weechat as w
import time
SCRIPT_NAME = "buffer_autoclose"
SCRIPT_AUTHOR = "xt <xt@bash.no>"
SCRIPT_VERSION = "0.4"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Automatically close inactive private message buffers"
settings = {
'interval': '1', # How often in minutes to check
'age_limit': '30', # How old in minutes before auto close
'ignore': '', # Buffers to ignore (use full name: server.buffer_name)
}
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", ""):
for option, default_value in settings.items():
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default_value)
w.hook_timer(\
int(w.config_get_plugin('interval')) * 1000 * 60,
0,
0,
"close_time_cb",
'')
def get_all_buffers():
'''Returns list with pointers of all open buffers.'''
buffers = []
infolist = w.infolist_get('buffer', '', '')
while w.infolist_next(infolist):
buffer_type = w.buffer_get_string(w.infolist_pointer(infolist, 'pointer'), 'localvar_type')
if buffer_type == 'private': # we only close private message buffers for now
buffers.append(w.infolist_pointer(infolist, 'pointer'))
w.infolist_free(infolist)
return buffers
def get_last_line_date(buffer):
date = '1970-01-01 01:00:00'
infolist = w.infolist_get('buffer_lines', buffer, '')
while w.infolist_prev(infolist):
date = w.infolist_time(infolist, 'date')
if date != '1970-01-01 01:00:00':
# Some lines like "Day changed to" message doesn't have date
# set so loop until we find a message that does
break
w.infolist_free(infolist)
return date
def is_in_hotlist(buffer):
''' Returns true if buffer is in hotlist, false if not'''
hotlist = w.infolist_get('hotlist', '', '')
found = False
while w.infolist_next(hotlist):
thebuffer = w.infolist_pointer(hotlist, 'buffer_pointer')
if thebuffer == buffer:
found = True
name = w.buffer_get_string(thebuffer, 'short_name')
break
w.infolist_free(hotlist)
return found
def close_time_cb(buffer, args):
''' Callback for check for inactivity and close '''
for buffer in get_all_buffers():
name = w.buffer_get_string(buffer, 'name')
date = get_last_line_date(buffer)
date = time.mktime(time.strptime(date, '%Y-%m-%d %H:%M:%S'))
now = time.time()
seconds_old = now - date
if seconds_old > int(w.config_get_plugin('age_limit'))*60:
if is_in_hotlist(buffer):
#w.prnt('', '%s: Not closing buffer: %s: it is in hotlist' %(SCRIPT_NAME, name))
continue
if name in w.config_get_plugin('ignore').split(','):
#w.prnt('', '%s: Not closing buffer: %s: it is in ignore list' %(SCRIPT_NAME, name))
continue
if buffer == w.current_buffer():
# Never close current buffer
#w.prnt('', '%s: Not closing buffer: %s: it is in currently active' %(SCRIPT_NAME, name))
continue
if len(w.buffer_get_string(buffer, 'input')):
# Don't close buffers with text on input line
#w.prnt('', '%s: Not closing buffer: %s: it has input' %(SCRIPT_NAME, name))
continue
w.prnt('', '%s: Closing buffer: %s' %(SCRIPT_NAME, name))
w.command(buffer, '/buffer close')
#else:
# w.prnt('', '%s: Not closing buffer: %s: it is too new: %s' %(SCRIPT_NAME, name, seconds_old))
return w.WEECHAT_RC_OK
|
{
"content_hash": "2f011b83d0c532976aba199fa0ef76cf",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 106,
"avg_line_length": 37.73737373737374,
"alnum_prop": 0.5784261241970021,
"repo_name": "deepredsky/dotfiles",
"id": "303f5b37cc84bf11242b4cee905fe82261ecd124",
"size": "4766",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "weechat/python/buffer_autoclose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3812"
},
{
"name": "Lua",
"bytes": "333"
},
{
"name": "Perl",
"bytes": "135224"
},
{
"name": "Python",
"bytes": "175209"
},
{
"name": "Ruby",
"bytes": "11581"
},
{
"name": "Shell",
"bytes": "45537"
},
{
"name": "Vim Script",
"bytes": "14976"
}
],
"symlink_target": ""
}
|
from horizon.test import helpers as test
class ActionsTests(test.TestCase):
# Unit tests for add_provider.
def test_me(self):
self.assertTrue(1 + 1 == 2)
|
{
"content_hash": "ddce853149fc7608100c59cb912bcdb9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.6744186046511628,
"repo_name": "emitrom/integra-openstack-ui",
"id": "6a1b3f7b473643b30877263203a9d65a0f8ea81e",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "4382"
},
{
"name": "JavaScript",
"bytes": "41"
},
{
"name": "Python",
"bytes": "49167"
}
],
"symlink_target": ""
}
|
from msrest.paging import Paged
class SBAuthorizationRulePaged(Paged):
"""
A paging container for iterating over a list of :class:`SBAuthorizationRule <azure.mgmt.servicebus.models.SBAuthorizationRule>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[SBAuthorizationRule]'}
}
def __init__(self, *args, **kwargs):
super(SBAuthorizationRulePaged, self).__init__(*args, **kwargs)
|
{
"content_hash": "c11b5622fa116696fc8513d80a007fce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 138,
"avg_line_length": 31.5,
"alnum_prop": 0.6388888888888888,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "24c4f71b00015fc2e14a090315a6c198e44792c8",
"size": "978",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/models/sb_authorization_rule_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""
Window arrangement.
This contains the data structure for the tab pages with their windows and
buffers. It's not the same as a `prompt-toolkit` layout. The latter directly
represents the rendering, while this is more specific for the editor itself.
"""
from __future__ import unicode_literals
from six import string_types
import weakref
from .editor_buffer import EditorBuffer
__all__ = (
'WindowArrangement',
)
class HSplit(list):
""" Horizontal split. (This is a higher level split than
prompt_toolkit.layout.HSplit.) """
class VSplit(list):
""" Horizontal split. """
class Window(object):
"""
Editor window: a window can show any open buffer.
"""
def __init__(self, editor_buffer):
assert isinstance(editor_buffer, EditorBuffer)
self.editor_buffer = editor_buffer
def __repr__(self):
return '%s(editor_buffer=%r)' % (self.__class__.__name__, self.editor_buffer)
class TabPage(object):
"""
Tab page. Container for windows.
"""
def __init__(self, window):
assert isinstance(window, Window)
self.root = VSplit([window])
# Keep track of which window is focusesd in this tab.
self.active_window = window
def windows(self):
""" Return a list of all windows in this tab page. """
return [window for _, window in self._walk_through_windows()]
def window_count(self):
""" The amount of windows in this tab. """
return len(self.windows())
def visible_editor_buffers(self):
"""
Return a list of visible `EditorBuffer` instances.
"""
return [w.editor_buffer for w in self.windows()]
def _walk_through_windows(self):
"""
Yields (Split, Window) tuples.
"""
def walk(split):
for c in split:
if isinstance(c, (HSplit, VSplit)):
for i in walk(c):
yield i
elif isinstance(c, Window):
yield split, c
return walk(self.root)
def _walk_through_splits(self):
"""
Yields (parent_split, child_plit) tuples.
"""
def walk(split):
for c in split:
if isinstance(c, (HSplit, VSplit)):
yield split, c
for i in walk(c):
yield i
return walk(self.root)
def _get_active_split(self):
for split, window in self._walk_through_windows():
if window == self.active_window:
return split
raise Exception('active_window not found. Something is wrong.')
def _get_split_parent(self, split):
for parent, child in self._walk_through_splits():
if child == split:
return parent
def _split(self, split_cls, editor_buffer=None):
"""
Split horizontal or vertical.
(when editor_buffer is None, show the current buffer there as well.)
"""
if editor_buffer is None:
editor_buffer = self.active_window.editor_buffer
active_split = self._get_active_split()
index = active_split.index(self.active_window)
new_window = Window(editor_buffer)
if isinstance(active_split, split_cls):
# Add new window to active split.
active_split.insert(index, new_window)
else:
# Split in the other direction.
active_split[index] = split_cls([active_split[index], new_window])
# Focus new window.
self.active_window = new_window
def hsplit(self, editor_buffer=None):
"""
Split active window horizontally.
"""
self._split(HSplit, editor_buffer)
def vsplit(self, editor_buffer=None):
"""
Split active window vertically.
"""
self._split(VSplit, editor_buffer)
def show_editor_buffer(self, editor_buffer):
"""
Open this `EditorBuffer` in the active window.
"""
assert isinstance(editor_buffer, EditorBuffer)
self.active_window.editor_buffer = editor_buffer
def close_editor_buffer(self, editor_buffer):
"""
Close all the windows that have this editor buffer open.
"""
for split, window in self._walk_through_windows():
if window.editor_buffer == editor_buffer:
self._close_window(window)
def _close_window(self, window):
"""
Close this window.
"""
if window == self.active_window:
self.close_active_window()
else:
original_active_window = self.active_window
self.close_active_window()
self.active_window = original_active_window
def close_active_window(self):
"""
Close active window.
"""
active_split = self._get_active_split()
# First remove the active window from its split.
index = active_split.index(self.active_window)
del active_split[index]
# Move focus.
if len(active_split):
new_active_window = active_split[max(0, index - 1)]
while isinstance(new_active_window, (HSplit, VSplit)):
new_active_window = new_active_window[0]
self.active_window = new_active_window
else:
self.active_window = None # No windows left.
# When there is exactly on item left, move this back into the parent
# split. (We don't want to keep a split with one item around -- exept
# for the root.)
if len(active_split) == 1 and active_split != self.root:
parent = self._get_split_parent(active_split)
index = parent.index(active_split)
parent[index] = active_split[0]
def cycle_focus(self):
"""
Cycle through all windows.
"""
windows = self.windows()
new_index = (windows.index(self.active_window) + 1) % len(windows)
self.active_window = windows[new_index]
@property
def has_unsaved_changes(self):
"""
True when any of the visible buffers in this tab has unsaved changes.
"""
for w in self.windows():
if w.editor_buffer.has_unsaved_changes:
return True
return False
class WindowArrangement(object):
def __init__(self, editor):
self._editor_ref = weakref.ref(editor)
self.tab_pages = []
self.active_tab_index = None
self.editor_buffers = [] # List of EditorBuffer
self._buffer_index = 0 # Index for generating buffer names.
@property
def editor(self):
""" The Editor instance. """
return self._editor_ref()
@property
def active_tab(self):
""" The active TabPage or None. """
if self.active_tab_index is not None:
return self.tab_pages[self.active_tab_index]
@property
def active_editor_buffer(self):
""" The active EditorBuffer or None. """
if self.active_tab and self.active_tab.active_window:
return self.active_tab.active_window.editor_buffer
def get_editor_buffer_for_location(self, location):
"""
Return the `EditorBuffer` for this location.
When this file was not yet loaded, return None
"""
for eb in self.editor_buffers:
if eb.location == location:
return eb
def get_editor_buffer_for_buffer_name(self, buffer_name):
"""
Return the `EditorBuffer` for this buffer_name.
When not found, return None
"""
for eb in self.editor_buffers:
if eb.buffer_name == buffer_name:
return eb
def close_window(self):
"""
Close active window of active tab.
"""
self.active_tab.close_active_window()
# Clean up buffers.
self._auto_close_new_empty_buffers()
def close_tab(self):
"""
Close active tab.
"""
if len(self.tab_pages) > 1: # Cannot close last tab.
del self.tab_pages[self.active_tab_index]
self.active_tab_index = max(0, self.active_tab_index - 1)
# Clean up buffers.
self._auto_close_new_empty_buffers()
def hsplit(self, location=None, new=False, text=None):
""" Split horizontally. """
assert location is None or text is None or new is False # Don't pass two of them.
if location or text or new:
editor_buffer = self._get_or_create_editor_buffer(location=location, text=text)
else:
editor_buffer = None
self.active_tab.hsplit(editor_buffer)
def vsplit(self, location=None, new=False, text=None):
""" Split vertically. """
assert location is None or text is None or new is False # Don't pass two of them.
if location or text or new:
editor_buffer = self._get_or_create_editor_buffer(location=location, text=text)
else:
editor_buffer = None
self.active_tab.vsplit(editor_buffer)
def keep_only_current_window(self):
"""
Close all other windows, except the current one.
"""
self.tab_pages = [TabPage(self.active_tab.active_window)]
self.active_tab_index = 0
def cycle_focus(self):
""" Focus next visible window. """
self.active_tab.cycle_focus()
def show_editor_buffer(self, editor_buffer):
"""
Show this EditorBuffer in the current window.
"""
self.active_tab.show_editor_buffer(editor_buffer)
# Clean up buffers.
self._auto_close_new_empty_buffers()
def go_to_next_buffer(self, _previous=False):
"""
Open next buffer in active window.
"""
if self.active_editor_buffer:
# Find the active opened buffer.
index = self.editor_buffers.index(self.active_editor_buffer)
# Get index of new buffer.
if _previous:
new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers)
else:
new_index = (index + 1) % len(self.editor_buffers)
# Open new buffer in active tab.
self.active_tab.show_editor_buffer(self.editor_buffers[new_index])
# Clean up buffers.
self._auto_close_new_empty_buffers()
def go_to_previous_buffer(self):
"""
Open the previous buffer in the active window.
"""
self.go_to_next_buffer(_previous=True)
def go_to_next_tab(self):
"""
Focus the next tab.
"""
self.active_tab_index = (self.active_tab_index + 1) % len(self.tab_pages)
def go_to_previous_tab(self):
"""
Focus the previous tab.
"""
self.active_tab_index = (self.active_tab_index - 1 +
len(self.tab_pages)) % len(self.tab_pages)
def go_to_buffer(self, buffer_name):
"""
Go to one of the open buffers.
"""
assert isinstance(buffer_name, string_types)
for i, eb in enumerate(self.editor_buffers):
if (eb.location == buffer_name or
(buffer_name.isdigit() and int(buffer_name) == i)):
self.show_editor_buffer(eb)
break
def _add_editor_buffer(self, editor_buffer, show_in_current_window=False):
"""
Insert this new buffer in the list of buffers, right after the active
one.
"""
assert isinstance(editor_buffer, EditorBuffer) and editor_buffer not in self.editor_buffers
# Add to list of EditorBuffers
eb = self.active_editor_buffer
if eb is None:
self.editor_buffers.append(editor_buffer)
else:
# Append right after the currently active one.
try:
index = self.editor_buffers.index(self.active_editor_buffer)
except ValueError:
index = 0
self.editor_buffers.insert(index, editor_buffer)
# When there are no tabs/windows yet, create one for this buffer.
if self.tab_pages == []:
self.tab_pages.append(TabPage(Window(editor_buffer)))
self.active_tab_index = 0
# To be shown?
if show_in_current_window and self.active_tab:
self.active_tab.show_editor_buffer(editor_buffer)
# Add buffer to CLI.
self.editor.cli.add_buffer(editor_buffer.buffer_name, editor_buffer.buffer)
# Start reporter.
self.editor.run_reporter_for_editor_buffer(editor_buffer)
def _get_or_create_editor_buffer(self, location=None, text=None):
"""
Given a location, return the `EditorBuffer` instance that we have if
the file is already open, or create a new one.
When location is None, this creates a new buffer.
"""
assert location is None or text is None # Don't pass two of them.
assert location is None or isinstance(location, string_types)
def new_name():
""" Generate name for new buffer. """
self._buffer_index += 1
return 'buffer-%i' % self._buffer_index
if location is None:
# Create and add an empty EditorBuffer
eb = EditorBuffer(self.editor, new_name(), text=text)
self._add_editor_buffer(eb)
return eb
else:
# When a location is given, first look whether the file was already
# opened.
eb = self.get_editor_buffer_for_location(location)
# Not found? Create one.
if eb is None:
# Create and add EditorBuffer
eb = EditorBuffer(self.editor, new_name(), location)
self._add_editor_buffer(eb)
return eb
else:
# Found! Return it.
return eb
def open_buffer(self, location=None, show_in_current_window=False):
"""
Open/create a file, load it, and show it in a new buffer.
"""
eb = self._get_or_create_editor_buffer(location)
if show_in_current_window:
self.show_editor_buffer(eb)
def _auto_close_new_empty_buffers(self):
"""
When there are new, empty buffers open. (Like, created when the editor
starts without any files.) These can be removed at the point when there
is no more window showing them.
This should be called every time when a window is closed, or when the
content of a window is replcaed by something new.
"""
# Get all visible EditorBuffers
ebs = set()
for t in self.tab_pages:
ebs |= set(t.visible_editor_buffers())
# Remove empty/new buffers that are hidden.
for eb in self.editor_buffers[:]:
if eb.is_new and not eb.location and eb not in ebs and eb.buffer.text == '':
self.editor_buffers.remove(eb)
def close_buffer(self):
"""
Close current buffer. When there are other windows showing the same
buffer, they are closed as well. When no windows are left, the previous
buffer or an empty buffer is shown.
"""
eb = self.active_editor_buffer
# Remove this buffer.
index = self.editor_buffers.index(eb)
self.editor_buffers.remove(eb)
# Close the active window.
self.active_tab.close_active_window()
# Close all the windows that still have this buffer open.
for i, t in enumerate(self.tab_pages[:]):
t.close_editor_buffer(eb)
# Remove tab when there are no windows left.
if t.window_count() == 0:
self.tab_pages.remove(t)
if i >= self.active_tab_index:
self.active_tab_index = max(0, self.active_tab_index - 1)
# When there are no windows/tabs left, create a new tab.
if len(self.tab_pages) == 0:
self.active_tab_index = None
if len(self.editor_buffers) > 0:
# Open the previous buffer.
new_index = (len(self.editor_buffers) + index - 1) % len(self.editor_buffers)
eb = self.editor_buffers[new_index]
# Create a window for this buffer.
self.tab_pages.append(TabPage(Window(eb)))
self.active_tab_index = 0
else:
# Create a new buffer. (This will also create the window
# automatically.)
eb = self._get_or_create_editor_buffer()
def create_tab(self, location=None):
"""
Create a new tab page.
"""
eb = self._get_or_create_editor_buffer(location)
self.tab_pages.insert(self.active_tab_index + 1, TabPage(Window(eb)))
self.active_tab_index += 1
def list_open_buffers(self):
"""
Return a `OpenBufferInfo` list that gives information about the
open buffers.
"""
active_eb = self.active_editor_buffer
visible_ebs = self.active_tab.visible_editor_buffers()
def make_info(i, eb):
return OpenBufferInfo(
index=i,
editor_buffer=eb,
is_active=(eb == active_eb),
is_visible=(eb in visible_ebs))
return [make_info(i, eb) for i, eb in enumerate(self.editor_buffers)]
class OpenBufferInfo(object):
"""
Information about an open buffer, returned by
`WindowArrangement.list_open_buffers`.
"""
def __init__(self, index, editor_buffer, is_active, is_visible):
self.index = index
self.editor_buffer = editor_buffer
self.is_active = is_active
self.is_visible = is_visible
|
{
"content_hash": "92aed50fa6e32e1d16feffbc94760628",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 99,
"avg_line_length": 32.935543278084715,
"alnum_prop": 0.5747036457168418,
"repo_name": "tianzhihen/pyvim",
"id": "2315752dae068a17f6dd731fe7f838256f5bda9d",
"size": "17884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyvim/window_arrangement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "109564"
}
],
"symlink_target": ""
}
|
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import
from __future__ import with_statement
import collections
from itertools import chain
import weakref
from . import legacy
from . import registry
from .. import exc
from .. import util
from ..util import threading
class RefCollection(util.MemoizedSlots):
__slots__ = ("ref",)
def _memoized_attr_ref(self):
return weakref.ref(self, registry._collection_gced)
class _empty_collection(object):
def append(self, element):
pass
def extend(self, other):
pass
def remove(self, element):
pass
def __iter__(self):
return iter([])
def clear(self):
pass
class _ClsLevelDispatch(RefCollection):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = (
"name",
"arg_names",
"has_kw",
"legacy_signatures",
"_clslevel",
"__weakref__",
)
def __init__(self, parent_dispatch_cls, fn):
self.name = fn.__name__
argspec = util.inspect_getfullargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.varkw)
self.legacy_signatures = list(
reversed(
sorted(
getattr(fn, "_legacy_signatures", []), key=lambda s: s[0]
)
)
)
fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
"Can't assign an event directly to the %s class" % target
)
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._assign_cls_collection(cls)
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def _assign_cls_collection(self, target):
if getattr(target, "_sa_propagate_class_events", True):
self._clslevel[target] = collections.deque()
else:
self._clslevel[target] = _empty_collection()
def update_subclass(self, target):
if target not in self._clslevel:
self._assign_cls_collection(target)
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend(
[fn for fn in self._clslevel[cls] if fn not in clslevel]
)
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ClsLevelDispatch at the class level of
a dispatcher, this returns self.
"""
return self
class _InstanceLevelDispatch(RefCollection):
__slots__ = ()
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_InstanceLevelDispatch):
"""Serves as a proxy interface to the events
served by a _ClsLevelDispatch, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
propagate = frozenset()
listeners = ()
__slots__ = "parent", "parent_listeners", "name"
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _ClsLevelDispatch
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.name
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._instance_cls)
if getattr(obj, self.name) is self:
setattr(obj, self.name, result)
else:
assert isinstance(getattr(obj, self.name), _JoinedListener)
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_InstanceLevelDispatch):
__slots__ = "_exec_once_mutex", "_exec_once"
def _memoized_attr__exec_once_mutex(self):
return threading.Lock()
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
finally:
self._exec_once = True
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(_CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
__slots__ = (
"parent_listeners",
"parent",
"name",
"listeners",
"propagate",
"__weakref__",
)
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self._exec_once = False
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.name
self.listeners = collections.deque()
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [
l
for l in other.listeners
if l not in existing_listener_set
and not only_propagate
or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key.prepend_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key.append_to_list(self, self.listeners):
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners.clear()
class _JoinedListener(_CompoundListener):
__slots__ = "parent", "name", "local", "parent_listeners"
def __init__(self, parent, name, local):
self._exec_once = False
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
|
{
"content_hash": "453b66fb5e68f266ffd82f707c71204a",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 77,
"avg_line_length": 30.82125603864734,
"alnum_prop": 0.5983542319749217,
"repo_name": "wujuguang/sqlalchemy",
"id": "9dfa89809dc985d5504055c2260f58ed93b50baf",
"size": "12994",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/event/attr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45930"
},
{
"name": "Python",
"bytes": "11287383"
}
],
"symlink_target": ""
}
|
def extractKuromarutranslationsHomeBlog(item):
'''
Parser for 'kuromarutranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "6552f0ddf65e1b27cc1654ec4b82b712",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.19047619047619,
"alnum_prop": 0.6427320490367776,
"repo_name": "fake-name/ReadableWebProxy",
"id": "5c946ed920425c4b70d36c0c9554e2b83b8da626",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractKuromarutranslationsHomeBlog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from rapidsms.backends.base import BackendBase as RapidBackendBase
class BackendBase(RapidBackendBase):
"""
Backend that overrides the default RapidSMS backend to keep threads
from starting.
"""
def start(self):
""" Override BackendBase.start(), which never returns """
self._running = True
|
{
"content_hash": "472173db692417551209dadf6eb8394d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 27.5,
"alnum_prop": 0.693939393939394,
"repo_name": "caktus/rapidsms-threadless-router",
"id": "f9b93a6f7dab159899b3088cdb0cf3f604ade6cc",
"size": "330",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "threadless_router/backends/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36982"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
@pytest.fixture
def sparse_df():
return pd.SparseDataFrame({0: {0: 1}, 1: {1: 1}, 2: {2: 1}}) # eye
@pytest.fixture
def multi_index3():
return pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_frame_stack(sparse_df, multi_index3):
ss = sparse_df.stack()
expected = pd.SparseSeries(np.ones(3), index=multi_index3)
tm.assert_sp_series_equal(ss, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_frame_unstack(sparse_df):
mi = pd.MultiIndex.from_tuples([(0, 0), (1, 0), (1, 2)])
sparse_df.index = mi
arr = np.array([[1, np.nan, np.nan], [np.nan, 1, np.nan], [np.nan, np.nan, 1]])
unstacked_df = pd.DataFrame(arr, index=mi).unstack()
unstacked_sdf = sparse_df.unstack()
tm.assert_numpy_array_equal(unstacked_df.values, unstacked_sdf.values)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_unstack(sparse_df, multi_index3):
frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()
arr = np.array([1, np.nan, np.nan])
arrays = {i: pd.SparseArray(np.roll(arr, i)) for i in range(3)}
expected = pd.DataFrame(arrays)
tm.assert_frame_equal(frame, expected)
|
{
"content_hash": "015435991517261473a3cb311e6df91e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 83,
"avg_line_length": 31.767441860465116,
"alnum_prop": 0.6793557833089312,
"repo_name": "kushalbhola/MyStuff",
"id": "bb5232f065a0496aeba1720672529d8e076beb73",
"size": "1366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/tests/sparse/test_reshape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
'''
@Autor: Esperanza Ramirez Armijos
@Tema: Pronosticador del Futbol Ecuatoriano
@Descripcion: Contiene la clase info_liga para procesar y almacenar la información sobre las estadísticas contenida en los distintos archivos .txt:
'''
import sys
import os
# CLASE INFO_LIGA
class info_liga:
#Constructor que solo toma por argumentos la jornada y % goles después minuto 80
def __init__(self, jornada):
self.jornada=jornada
self.timing=None
self.timing_partes=None
self.ratios=None
self.ratios_partes=None
self.casa=None
self.fuera=None
self.clasif=None
self.partidosCasa=None
self.partidosFuera=None
self.infopartidos=None
self.goleslocal=None
self.golesvisitante=None
self.empates0=None
self.golestotal=None
self.marcaprimero=None
self.encajaprimero=None
self.minimoR=0
self.maximoR=0
self.siguiente_jornada=None
#Metodo __str__ llamado cuando se realice una llamada a imprimir por pantalla a la variable con la instancia de la clase
def __str__(self):
mensaje="Instancia para la jornada"+ str(self.jornada)
return mensaje
#Metodos set get para el valor de jornada
def set_jornada(self, ultima_jornada):
self.jornada=ultima_jornada
def get_jornada(self):
return self.jornada
#Metodo que recoge la informacion contenida en general.txt para establecer los valores de
# - clasif, partidosCasa, partidosFuera, infopartidos
def procesar_clasificacion(self):
fi=open('infotxt/general.txt', 'r')
info = [i for i in fi]
fi.close()
#print(info)
while '\n' in info:
info.remove('\n')
todo={}
clasif={}
partidoscasa={}
partidosfuera={}
ipartidos={}
cont=0
for i in info:
temp=i.split('\t')
if not cont:
eq=temp[1]
aux=[]
aux=[j for j in temp if j!=temp[1]]
cont=1
else:
for j in temp:
aux.append(j)
cont+=1
if cont==3:
todo[eq]=aux[:]
cont=0
aux.clear()
for i in todo:
aux=todo[i]
clasif[i]=[aux[0], aux[1], aux[8], aux[2], aux[3], aux[4], aux[5], aux[6], aux[7]]
partidoscasa[i]=aux[10:15]
partidosfuera[i]=aux[21:26]
ipartidos[i]=aux[27:32]
ipartidos[i][4]=ipartidos[i][4][0:3]
self.clasif=clasif
self.partidosCasa=partidoscasa
self.partidosFuera=partidosfuera
self.infopartidos=ipartidos
# Metodo que devuelve un array con todos los nombres de los equpipos (son keys de los maps)
def get_equipos(self):
return [i for i in self.clasif]
# Metodos get para las variables asignadas tras la ejecucion del metodo clasificacion
def get_clasif(self):
return self.clasif
def get_partidosCasa(self):
return self.partidosCasa
def get_partidosFuera(self):
return self.partidosFuera
def get_infopartidos(self):
return self.infopartidos
#Metodo que recoge la informacion en timing.txt para establecer los valores de
# - timing, timing_partes, ratios, ratios_partes
def procesar_timing(self):
fi=open('infotxt/timing.txt', 'r')
info = [i for i in fi]
fi.close()
timing={}
timing_partes={}
ratios={}
ratios_partes={}
for i in info:
temp=i.split('\t')
aux=temp[1:11]
timing[temp[0]]=aux
for i in info:
temp=i.split('\t')
aux=temp[10:13]
timing_partes[temp[0]]=[aux[-2], aux[-1]]
for i in timing:
aux=timing[i]
t=[]
for j in aux:
if j!='':
if j[1]!=' ':
a=int(j[0:2])
else:
a=int(j[0])
if j[5]!=' ':
b=int(j[4:6])
else:
b=int(j[4])
ratio=a-b
t.append(ratio)
ratios[i]=t[:]
t.clear()
for i in timing_partes:
aux=timing_partes[i]
t=[]
for j in aux:
if j!='':
if j[1]!=' ':
a=int(j[0:2])
else:
a=int(j[0])
if j[5]!='\\':
b=int(j[4:6])
else:
b=int(j[4])
ratio=a-b
t.append(ratio)
ratios_partes[i]=t[:]
t.clear()
self.timing=timing
self.timing_partes=timing_partes
self.ratios=ratios
self.ratios_partes=ratios_partes
# Metodos get para las variables asignadas tras la ejecucion del metodo timing
def get_timing(self):
return self.timing
def get_timing_partes(self):
return self.timing_partes
def get_ratios(self):
return self.ratios
def get_ratios_partes(self):
return self.ratios_partes
#Metodo que recoge la informacion ya almacenada en ratios para establecer los valores de
# - maximoR, minimoR
def procesar_ratiomaxmin(self):
if(self.ratios!=None):
max_temp = min_temp = 0
for i in self.ratios:
aux=self.ratios[i]
if max(aux)>max_temp:
max_temp=max(aux)
if min(aux)<min_temp:
min_temp=min(aux)
self.maximoR=max_temp
self.minimoR=min_temp
# Metodos get para las variables asignadas tras la ejecucion del metodo procesar_ratiomaxmin
def get_maximoR(self):
return self.maximoR
def get_minimoR(self):
return self.minimoR
#Metodo que recoge la informacion en goleslocal.txt, golesvisitante.txt, golestotal.txt para establecer los valores de
# - gcasa, gfuera, gall
def procesar_goles(self):
info=[]
gcasa={}
gfuera={}
gall={}
#Goles Local
fi=open('infotxt/goleslocal.txt', 'r')
info = [ i for i in fi]
for i in fi:
info.append(i)
fi.close()
cont=0
for i in info:
temp=i.split('\t')
if not cont:
eq=temp[0]
aux=[]
aux=[j for j in temp if j!=temp[0] and j!='\n' and j!=temp[2]]
cont=1
else:
for j in temp:
if j!='':
aux.append(j)
if cont==1:
gcasa[eq]=aux[:]
gcasa[eq][6]=gcasa[eq][6][:-1]
cont=0
aux.clear()
info.clear()
#Goles Visitante
fi=open('infotxt/golesvisitante.txt', 'r')
for i in fi:
info.append(i)
fi.close()
cont=0
for i in info:
temp=i.split('\t')
if not cont:
eq=temp[0]
aux=[]
aux=[j for j in temp if j!=temp[0] and j!='\n' and j!=temp[2]]
cont=1
else:
for j in temp:
if j!='':
aux.append(j)
if cont==1:
gfuera[eq]=aux[:]
gfuera[eq][6]=gfuera[eq][6][:-1]
cont=0
aux.clear()
info.clear()
#Goles Total
fi=open('infotxt/golestotal.txt', 'r')
for i in fi:
info.append(i)
fi.close()
cont=0
for i in info:
temp=i.split('\t')
if not cont:
eq=temp[0]
aux=[]
aux=[j for j in temp if j!=temp[0] and j!='\n' and j!=temp[2]]
cont=1
else:
for j in temp:
if j!='':
aux.append(j)
if cont == 1:
gall[eq]=aux[:]
gall[eq][6]=gall[eq][6][:-1]
cont=0
aux.clear()
self.goleslocal=gcasa
self.golesvisitante=gfuera
self.golestotal=gall
# Metodos get para las variables asignadas tras la ejecucion del metodo goles
def get_golesLocal(self):
return self.goleslocal
def get_golesVisitante(self):
return self.golesvisitante
def get_golesTotal(self):
return self.golestotal
#Metodo que recoge la informacion en first.txt para establecer los valores de
# - casa, fuera, empates0
def procesar_primero(self):
fi=open('infotxt/first.txt', 'r')
info = [i for i in fi]
fi.close()
casa={}
fuera={}
for i in info:
temp2=i.split("\t")
temp=[i for i in temp2 if i!=' ' and i!=' ' and i!='']
for i in temp:
casa[temp[0]]=[temp[1:6]]
fuera[temp[0]]=[temp[6], temp[7], temp[8], temp[9], temp[10]]
empates={}
for i in self.ratios:
k=i[1:]
empates[i]=int(casa[k][0][1])+int(fuera[k][1])
self.casa=casa
self.fuera=fuera
self.empates0=empates
# Metodos get para las variables asignadas tras la ejecucion del metodo primero
def get_casa(self):
return self.casa
def get_fuera(self):
return self.fuera
def get_empates0(self):
return self.empates0
#Metodo que recoge la informacion en marcaprimero.txt para establecer los valores de
# - marcaprimero
def procesar_marcaprimero(self):
info=[]
victoria_primero={}
fi=open('infotxt/marcaprimero.txt', 'r')
for i in fi:
info.append(i)
fi.close()
for i in info:
temp2=i.split("\t")
temp=[i for i in temp2 if i!=' ' and i!=' ' and i!='']
victoria_primero[temp[0][1:-1]]=[temp[3], temp[4], temp[5]]
self.marcaprimero=victoria_primero
#Metodo que recoge la informacion en encajaprimero.txt para establecer los valores de
# - encajaprimero
def procesar_encajaprimero(self):
encaja_primero={}
fi=open('infotxt/encajaprimero.txt', 'r')
info = [i for i in fi]
fi.close()
for i in info:
temp2=i.split("\t")
temp=[i for i in temp2 if i!=' ' and i!=' ' and i!='']
encaja_primero[temp[0][1:-1]]=[temp[3], temp[4], temp[5]]
self.encajaprimero=encaja_primero
# Metodos get para las variables asignadas tras la ejecucion de los metodos marcaprimero y encajaprimero
def get_marcaPrimero(self):
return self.marcaprimero
def get_encajaPrimero(self):
return self.encajaprimero
# Metodo que establece los valores de los partidos de la siguiente jornada
def set_siguiente_jornada(self, partidos):
self.siguiente_jornada=[]
for i in partidos:
self.siguiente_jornada.append(i)
# Metodo que devuelve la lista de listas de los partidos de la siguiente jornada
def get_siguiente_jornada(self):
return self.siguiente_jornada
# Metodo que compila la informacion al completo en la instancia actual
def procesar_todo(self):
self.procesar_clasificacion()
self.procesar_timing()
self.procesar_goles()
self.procesar_primero()
self.procesar_marcaprimero()
self.procesar_encajaprimero()
self.procesar_ratiomaxmin()
# FIN CLASE INFO_LIGA
#MAIN PARA MOSTRAR
if __name__ == "__main__":
j=7
test=info_liga(j)
test.procesar_todo()
#Para verificar si los datos se recolectaron correctamente
print ('Valor de la jornada si se ingresa j = x get_jornada')
print(test.get_jornada())
print ('\nValor de todos los equipos get_equipos')
print(test.get_equipos())
print ('\nMuestra la clasificacion get_clasif')
print(test.get_clasif())
print ('\nMuestra los partidos casa get_partidosCasa')
print(test.get_partidosCasa())
print ('\nMuestra los partidos fuera get_partidosFuera')
print(test.get_partidosFuera())
print ('\nMuestra la informacion de partidos get_infopartidos')
print(test.get_infopartidos())
print ('\nMuestra el timing get_timing')
print(test.get_timing())
print ('\nMuestra el timing partes get_timing_partes')
print(test.get_timing_partes())
print ('\nMuestra ratios get_ratios')
print(test.get_ratios())
print ('\nMuestra ratios partes get_ratios_partes')
print(test.get_ratios_partes())
print ('\nMuestra maximo r get_maximoR')
print(test.get_maximoR())
print ('\nMuestra minimo r get_minimoR')
print(test.get_minimoR())
print ('\nMuestra goles local get_golesLocal')
print(test.get_golesLocal())
print ('\nMuestra goles visitante get_golesVisitante')
print(test.get_golesVisitante())
print ('\nMuestra goles total get_golesTotal')
print(test.get_golesTotal())
print ('\nMuestra casa get_casa')
print(test.get_casa())
print ('\nMuestra fuera get_fuera ')
print(test.get_fuera())
print ('\nMuestra empates get_empates0 ')
print(test.get_empates0())
print ('\nMuestra marca primero get_marcaPrimero')
print(test.get_marcaPrimero())
print ('\nMuestra encaja primero get_encajaPrimero')
print(test.get_encajaPrimero())
print ('\nMuestra siguiente jornada get_siguiente_jornada')
print(test.get_siguiente_jornada())
|
{
"content_hash": "57e1670c73c28b9f346e4374ec0a1283",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 147,
"avg_line_length": 32.52183908045977,
"alnum_prop": 0.5270375344596028,
"repo_name": "mariaesperanza/Pronosticador",
"id": "b13c4e51d4b3117d98b37b4d885dede1b0487694",
"size": "14196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "recoleccion_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21701"
},
{
"name": "HTML",
"bytes": "57324"
},
{
"name": "Python",
"bytes": "75252"
}
],
"symlink_target": ""
}
|
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export(v1=["summary.FileWriter"])
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
instead forms a compatibility layer over new graph-based summaries
(`tf.contrib.summary`) to facilitate the use of new summary writing with
pre-existing code that expects a `FileWriter` instance.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
compatibility layer over new graph-based summaries (`tf.contrib.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`,
and with any `tf.contrib.summary.SummaryWriter` in this session using the
the same shared resource name (which by default scoped to the logdir). If
no such resource exists, one will be created using the remaining arguments
to this constructor, but if one already exists those arguments are ignored.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource. See `tf.contrib.summary` for more details.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.compat.v1.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`FileWriter` is not compatible with eager execution. To write TensorBoard
summaries under eager execution, use `tf.contrib.summary` instead.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
self._closed = False
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def _warn_if_event_writer_is_closed(self):
if self._closed:
warnings.warn("Attempting to use a closed FileWriter. "
"The operation will be a noop unless the FileWriter "
"is explicitly reopened.")
def _add_event(self, event, step):
self._warn_if_event_writer_is_closed()
super(FileWriter, self)._add_event(event, step)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._warn_if_event_writer_is_closed()
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
# Flushing a closed EventFileWriterV2 raises an exception. It is,
# however, a noop for EventFileWriter.
self._warn_if_event_writer_is_closed()
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
self._closed = True
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
self._closed = False
|
{
"content_hash": "4aaf7d3ac234782e3dd57c74cc9c94b1",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 83,
"avg_line_length": 39.51558752997602,
"alnum_prop": 0.6844277218108994,
"repo_name": "adit-chandra/tensorflow",
"id": "9757ed7db4f4c52582ee59084cf045693f23211a",
"size": "17167",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/summary/writer/writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76734263"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299322"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38764318"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
Revision ID: 30867afad44a
Revises: e9304a3141f0
Create Date: 2021-06-04 22:11:19.849981
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
"""Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
"""Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
|
{
"content_hash": "5251af9466a27df4fcd70dcf368fdc02",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 27.319148936170212,
"alnum_prop": 0.617601246105919,
"repo_name": "cfei18/incubator-airflow",
"id": "44c74778362467a7371f23e27a76f6c4586c2cf5",
"size": "2071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0090_2_2_0_rename_concurrency_column_in_dag_table_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
from test.util import running_on_windows
class _ExpandTabs(_VimTest):
def _extra_vim_config(self, vim_config):
vim_config.append("set sw=3")
vim_config.append("set expandtab")
class RecTabStopsWithExpandtab_SimpleExample_ECR(_ExpandTabs):
snippets = ("m", "\tBlaahblah \t\t ")
keys = "m" + EX
wanted = " Blaahblah \t\t "
class RecTabStopsWithExpandtab_SpecialIndentProblem_ECR(_ExpandTabs):
# Windows indents the Something line after pressing return, though it
# shouldn't because it contains a manual indent. All other vim versions do
# not do this. Windows vim does not interpret the changes made by :py as
# changes made 'manually', while the other vim version seem to do so. Since
# the fault is not with UltiSnips, we simply skip this test on windows
# completely.
skip_if = lambda self: running_on_windows()
snippets = (("m1", "Something"), ("m", "\t$0"))
keys = "m" + EX + "m1" + EX + "\nHallo"
wanted = " Something\n Hallo"
def _extra_vim_config(self, vim_config):
_ExpandTabs._extra_vim_config(self, vim_config)
vim_config.append("set indentkeys=o,O,*<Return>,<>>,{,}")
vim_config.append("set indentexpr=8")
class ProperIndenting_SimpleCase_ECR(_VimTest):
snippets = ("test", "for\n blah")
keys = " test" + EX + "Hui"
wanted = " for\n blahHui"
class ProperIndenting_SingleLineNoReindenting_ECR(_VimTest):
snippets = ("test", "hui")
keys = " test" + EX + "blah"
wanted = " huiblah"
class ProperIndenting_AutoIndentAndNewline_ECR(_VimTest):
snippets = ("test", "hui")
keys = " test" + EX + "\n" + "blah"
wanted = " hui\n blah"
def _extra_vim_config(self, vim_config):
vim_config.append("set autoindent")
# Test for bug 1073816
class ProperIndenting_FirstLineInFile_ECR(_VimTest):
text_before = ""
text_after = ""
files = {
"us/all.snippets": r"""
global !p
def complete(t, opts):
if t:
opts = [ m[len(t):] for m in opts if m.startswith(t) ]
if len(opts) == 1:
return opts[0]
elif len(opts) > 1:
return "(" + "|".join(opts) + ")"
else:
return ""
endglobal
snippet '^#?inc' "#include <>" !r
#include <$1`!p snip.rv = complete(t[1], ['cassert', 'cstdio', 'cstdlib', 'cstring', 'fstream', 'iostream', 'sstream'])`>
endsnippet
"""
}
keys = "inc" + EX + "foo"
wanted = "#include <foo>"
class ProperIndenting_FirstLineInFileComplete_ECR(ProperIndenting_FirstLineInFile_ECR):
keys = "inc" + EX + "cstdl"
wanted = "#include <cstdlib>"
class _FormatoptionsBase(_VimTest):
def _extra_vim_config(self, vim_config):
vim_config.append("set tw=20")
vim_config.append("set fo=lrqntc")
class FOSimple_Break_ExpectCorrectResult(_FormatoptionsBase):
snippets = ("test", "${1:longer expand}\n$1\n$0", "", "f")
keys = (
"test"
+ EX
+ "This is a longer text that should wrap as formatoptions are enabled"
+ JF
+ "end"
)
wanted = (
"This is a longer\ntext that should\nwrap as\nformatoptions are\nenabled\n"
+ "This is a longer\ntext that should\nwrap as\nformatoptions are\nenabled\n"
+ "end"
)
class FOTextBeforeAndAfter_ExpectCorrectResult(_FormatoptionsBase):
snippets = ("test", "Before${1:longer expand}After\nstart$1end")
keys = "test" + EX + "This is a longer text that should wrap"
wanted = """BeforeThis is a
longer text that
should wrapAfter
startThis is a
longer text that
should wrapend"""
# Tests for https://bugs.launchpad.net/bugs/719998
class FOTextAfter_ExpectCorrectResult(_FormatoptionsBase):
snippets = ("test", "${1:longer expand}after\nstart$1end")
keys = (
"test" + EX + "This is a longer snippet that should wrap properly "
"and the mirror below should work as well"
)
wanted = """This is a longer
snippet that should
wrap properly and
the mirror below
should work as wellafter
startThis is a longer
snippet that should
wrap properly and
the mirror below
should work as wellend"""
class FOWrapOnLongWord_ExpectCorrectResult(_FormatoptionsBase):
snippets = ("test", "${1:longer expand}after\nstart$1end")
keys = "test" + EX + "This is a longersnippet that should wrap properly"
wanted = """This is a
longersnippet that
should wrap properlyafter
startThis is a
longersnippet that
should wrap properlyend"""
|
{
"content_hash": "e8e6a884c605f4436ec415537f8b0981",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 121,
"avg_line_length": 30.1523178807947,
"alnum_prop": 0.6479244454206018,
"repo_name": "khatchad/vimrc",
"id": "8c5c3152d76aa0742eb697c1f673b7f15363e469",
"size": "4553",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sources_non_forked/ultisnips/test/test_Format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "961"
},
{
"name": "C",
"bytes": "11028"
},
{
"name": "C#",
"bytes": "1235"
},
{
"name": "C++",
"bytes": "3464"
},
{
"name": "CMake",
"bytes": "3900"
},
{
"name": "CSS",
"bytes": "950"
},
{
"name": "Clojure",
"bytes": "720"
},
{
"name": "CoffeeScript",
"bytes": "11440"
},
{
"name": "Crystal",
"bytes": "9834"
},
{
"name": "Dart",
"bytes": "4388"
},
{
"name": "Dockerfile",
"bytes": "2148"
},
{
"name": "Elixir",
"bytes": "1903"
},
{
"name": "Elm",
"bytes": "5333"
},
{
"name": "Emacs Lisp",
"bytes": "4563"
},
{
"name": "Go",
"bytes": "1113"
},
{
"name": "HTML",
"bytes": "1634"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "863"
},
{
"name": "Java",
"bytes": "9033"
},
{
"name": "JavaScript",
"bytes": "10452"
},
{
"name": "Lua",
"bytes": "19732"
},
{
"name": "Makefile",
"bytes": "16292"
},
{
"name": "PHP",
"bytes": "2726"
},
{
"name": "PowerShell",
"bytes": "10114"
},
{
"name": "PureScript",
"bytes": "7576"
},
{
"name": "Python",
"bytes": "392724"
},
{
"name": "R",
"bytes": "1288"
},
{
"name": "Ruby",
"bytes": "119025"
},
{
"name": "Rust",
"bytes": "6153"
},
{
"name": "SCSS",
"bytes": "1801"
},
{
"name": "Scala",
"bytes": "1504"
},
{
"name": "Shell",
"bytes": "40972"
},
{
"name": "TypeScript",
"bytes": "4661"
},
{
"name": "VBScript",
"bytes": "7510"
},
{
"name": "Vim Script",
"bytes": "13029765"
},
{
"name": "Vim Snippet",
"bytes": "785859"
},
{
"name": "Vue",
"bytes": "662"
}
],
"symlink_target": ""
}
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = None
try:
name = self.timezone.tzname(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
pass
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
offset = self.timezone.utcoffset(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ""
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
try:
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ''
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
{
"content_hash": "d257cddc6e4682b0ca3c2459ae33b42e",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 102,
"avg_line_length": 32.07774798927614,
"alnum_prop": 0.5370664437944004,
"repo_name": "yephper/django",
"id": "2f6ad431f60e0154db1c2cd63cb57c30205e8739",
"size": "11965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/dateformat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
class UdacityPipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "f7662107a07e31ddd9a0f4007131c7b4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31,
"alnum_prop": 0.6881720430107527,
"repo_name": "cdhekne/Masters_Thesis",
"id": "10d6761318c86a7c4afccb0eed2ea490358bc992",
"size": "287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Thesis_WebCrawler/udacity/udacity/pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "466"
},
{
"name": "CSS",
"bytes": "464970"
},
{
"name": "HTML",
"bytes": "368686"
},
{
"name": "Java",
"bytes": "84571"
},
{
"name": "JavaScript",
"bytes": "556245"
},
{
"name": "PHP",
"bytes": "14557"
},
{
"name": "Python",
"bytes": "11890"
},
{
"name": "Web Ontology Language",
"bytes": "40414"
}
],
"symlink_target": ""
}
|
import sys
import random
import string
import pycurl
import json
#STEP 2 INIT BOOLS
josh = False
bcheap = False
ncheap = False
decided = False
#STEP 3 CHECK ARGS, WARN FOR SHIT INPUT
if len(sys.argv) > 2:
if (sys.argv[1] == 'cheap' and sys.argv[2] == 'ncheap') or (sys.argv[1] == 'ncheap' and sys.argv[2] == 'cheap'):
print "ugh wtf are you doing? I am not writing the error handling for stupidity. You're gonna get cheap food"
if len(sys.argv) > 1 and sys.argv[1]=='josh':
josh = True
elif len(sys.argv) > 1 and sys.argv[1]=='cheap':
bcheap = True
elif len(sys.argv) > 1 and sys.argv[1]=='ncheap':
ncheap = True
#STEP 4 KEEP CHECKING ARGS
if len(sys.argv) > 2:
if sys.argv[2]=='cheap':
bcheap=True
elif sys.argv[2] == 'ncheap':
ncheap=True
else:
print "You fucked up... discarding arg " + sys.argv[2]
#OPEN THEM DATA FILES FOR READ/READ+
Cheap = open("cheapeats.txt","r")
NotCheap = open("notcheapeats.txt","r")
Veto = open("vetolist.txt","r")
AteThere = open("atethere.txt","r+")
#READ THEM DATAFILES, FILL THEM LISTS
cheapeats = Cheap.readlines()
notcheapeats = NotCheap.readlines()
veto = Veto.readlines()
atethere = AteThere.readlines()
#CHECK AGAINST LAST 5 PLACES EATEN AT
if len(atethere) > 5:
AteThere.close()
AteThere = open("atethere.txt",'w')
i = 1
for item in range(1,6):
AteThere.write(atethere[i])
i=i+1
#CLEAR THE DATAFILE, REWRITE LAST PLACES EATEN AT FROM LIST
AteThere.close()
NotAgain = open("atethere.txt","r")
notagain = NotAgain.readlines()
#CHECK CASES FOR CHEAP OR NOT CHEAT OPTIONS, CONCATENATE LISTS
if not bcheap and not ncheap:
eats = cheapeats+notcheapeats
elif (bcheap):
eats = cheapeats
elif (ncheap):
eats = notcheapeats
#REMOVE ANYTHING IN LIST THAT WAS CHOSEN LAST 5 TIMES
eats = [x for x in eats if x not in notagain]
#IF JOSH IS COMING, SCRUB ENTRIES THAT HE DOESN'T LIKE
if (josh):
eats = [x for x in eats if x not in veto]
#SEED RANDOM FUNCTION
while not (decided):
secure_random = random.SystemRandom()
#SELECT LUNCH SPOT
lunch = secure_random.choice(eats)
#RUN AND TELL DAT TO CONSOLE
print lunch
#DID GROUP ACCEPT CHOICE? IF SO KILL LOOP
vote = raw_input("Yay or nay?")
if (vote == 'y' or vote == 'Y' or vote == 'Yay'):
decided = True
#ADD CHOICE TO ATE THERE LIST
AteThere=open("atethere.txt","a")
AteThere.write(lunch)
AteThere.close()
#RUN AND TELL DAT TO SLACK
#DEFINE JSON MESSAGE:
data = json.dumps({"username": "LUNCH WILL BE AT:", "icon_emoji": ":hamburger:", "text": lunch})
#DEFINE SLACK WEBHOOK URL:
slackurl='https://hooks.slack.com/services/PUT/YOURURL/HERE'
#Create CURL object
c = pycurl.Curl()
#Define CURL Paramters
c.setopt(c.URL, slackurl)
c.setopt(c.HTTPHEADER, [
'Content-Type: application/json'
])
c.setopt(c.POSTFIELDS, data)
c.setopt(pycurl.POST, 1)
#POST TO SLACK
c.perform()
|
{
"content_hash": "bb402a5f8130ae9af7afa029160868b1",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 113,
"avg_line_length": 25.472727272727273,
"alnum_prop": 0.7009279086366881,
"repo_name": "packetracer/lunch",
"id": "41a299d4ab02273f421729fa4c84beff5a10a1f1",
"size": "2867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lunch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2867"
}
],
"symlink_target": ""
}
|
from backend.common.consts.award_type import AwardType
class DistrictPointValues:
"""
A class that contains various district point constants over the years:
Documents containing point systems:
- 2016: same as 2015
- 2015: http://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/archive/2015/AdminManual20150407.pdf
- 2014: http://www.firstinmichigan.org/FRC_2014/District_Standard_Points_Ranking_System.pdf
- 2013: http://www.firstinmichigan.org/FRC_2013/2013_Rules_Supplement.pdf
- 2012: http://www.firstinmichigan.org/FRC_2012/2012_Rules_Supplement.pdf
- 2011: http://www.firstinmichigan.org/FRC_2011/2011_Rules_Supplement.pdf
- 2010: http://www.firstinmichigan.org/FRC_2010/2010_Update_3.pdf
- 2009: https://www.chiefdelphi.com/forums/showpost.php?p=759453&postcount=67
"""
STANDARD_MULTIPLIER = 1
# Since 2014, points earned at District CMP has 3x bonus
DISTRICT_CMP_MULIPLIER_DEFAULT = 3
DISTRICT_CMP_MULTIPLIER = {2013: 1, 2012: 1, 2011: 1, 2010: 1, 2009: 1}
# In years prior to 2015, teams get points for a win/tie in a qualification match
MATCH_WIN = 2
MATCH_TIE = 1
# Used to determine alliance selection points
# Captain/First pick get 17-alliance number, second pick gets 17 - draft order
ALLIANCE_MAX_DEFAULT = 17
# In 2009 - 2013 (except 2010), second pick teams got fewer elim round advancement points as captain/pick 1
# TODO many of these events don't have alliance selection data, so we can't factor this in
ELIM_SECOND_PICK_MULTIPLIER_DEFAULT = 1
ELIM_SECOND_PICK_MULTIPLIER = {2013: 0.8, 2012: 0.8, 2011: 0.8, 2009: 0.8}
# Used to determine elim/playoff points.
# Teams on each round's winning alliance gets points per match won
# For the 2015 game, these are awarded for participating in a qf/sf match, since there were no wins
QF_WIN_DEFAULT = 5
QF_WIN = {2015: 5.0}
SF_WIN_DEFAULT = 5
SF_WIN = {
2015: 3.3,
}
F_WIN_DEFAULT = 5
F_WIN = {2015: 5.0}
# Chairman's Award
CHAIRMANS_DEFAULT = 10
CHAIRMANS = {2013: 0, 2012: 0, 2011: 0, 2009: 0}
# Engineering Inspiration and Rookie All-Star
EI_AND_RAS_DEFAULT = 8
OTHER_AWARD_DEFAULT = 5
# Points for playing your first two events as
# back-to-back single day events
BACK_TO_BACK_2022_BONUS = 2
# Pre-2014 Awards, all worth either 5 or 2 points
LEGACY_5_PT_AWARDS = {
2013: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
],
2012: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
],
2011: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
AwardType.EXCELLENCE_IN_DESIGN,
],
2010: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ROOKIE_ALL_STAR,
AwardType.ENGINEERING_INSPIRATION,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
],
2009: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.DRIVING_TOMORROWS_TECHNOLOGY,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
],
}
LEGACY_2_PT_AWARDS = {
2013: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
],
2012: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2011: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2010: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2009: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.SAFETY,
AwardType.WSU_AIM_HIGHER,
AwardType.WEBSITE,
],
}
|
{
"content_hash": "0612eccef2aed7a821e59d7ebb2505bd",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 148,
"avg_line_length": 34.422360248447205,
"alnum_prop": 0.6086250451100685,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "767443f515835cbcbdfab1204610c60401f3331a",
"size": "5542",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/consts/district_point_values.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
}
|
""" Garret's python vs panda comparison of speed for matching N**2 problem"""
# `list(enumerate(sents))` and `dict(map(lambda x: x[::-1], enumerate(sents))`
import pandas as pd
|
{
"content_hash": "351da8da15e38564293b962df706028c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 78,
"avg_line_length": 59,
"alnum_prop": 0.7005649717514124,
"repo_name": "hobson/hobson.github.io",
"id": "d83ce2be4b647c62e4d34d973644d1606f92659d",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_posts/panda_vs_python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118834"
},
{
"name": "HTML",
"bytes": "1145875"
},
{
"name": "JavaScript",
"bytes": "57872"
},
{
"name": "Python",
"bytes": "9253"
},
{
"name": "Ruby",
"bytes": "13563"
},
{
"name": "Shell",
"bytes": "7943"
}
],
"symlink_target": ""
}
|
problem = """
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
"""
from math import factorial
from itertools import takewhile, count, combinations_with_replacement, imap
def largest_sum(num_digits):
return largest_sum.f * num_digits
largest_sum.f = factorial(9)
def smallest_num(num_digits):
return 10**(num_digits - 1)
max_num_digits = max(takewhile(lambda num_digits: largest_sum(num_digits) >= smallest_num(num_digits), count(1)))
numbers = set()
base_digits = [0,1,2,3,4,5,6,7,8,9]
for num_digits in range(1, max_num_digits+1):
for x in combinations_with_replacement(zip(base_digits, map(factorial, base_digits)), num_digits):
digits, digit_factorials = zip(*x)
if len(digits) < 2:
continue
number = sum(digit_factorials)
if (tuple(map(int, sorted(str(number)))) == digits):
numbers.add(number)
print sum(numbers)
|
{
"content_hash": "56ef54f7fba6462f24886e1a6b01e8d4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 113,
"avg_line_length": 32.15151515151515,
"alnum_prop": 0.6654099905749293,
"repo_name": "lorenyu/project-euler",
"id": "782b5f9f6f1300262d14e90adef9c45518404e36",
"size": "1061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem-034.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51438"
}
],
"symlink_target": ""
}
|
"""
Package for script API properties tests.
"""
__version__ = "$Revision-Id:$"
|
{
"content_hash": "27abb1be2f72f829425be25659a9dade",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 40,
"avg_line_length": 14.166666666666666,
"alnum_prop": 0.6,
"repo_name": "DLR-SC/DataFinder",
"id": "298ff964c7d2122ceca1ba1f61a52e51b10d1735",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/script_api/properties/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
"""Manifest validation."""
from typing import Dict
import voluptuous as vol
from voluptuous.humanize import humanize_error
from .model import Integration
MANIFEST_SCHEMA = vol.Schema({
vol.Required('domain'): str,
vol.Required('name'): str,
vol.Optional('config_flow'): bool,
vol.Optional('zeroconf'): [str],
vol.Optional('ssdp'): vol.Schema({
vol.Optional('st'): [str],
vol.Optional('manufacturer'): [str],
vol.Optional('device_type'): [str],
}),
vol.Optional('homekit'): vol.Schema({
vol.Optional('models'): [str],
}),
vol.Required('documentation'): str,
vol.Required('requirements'): [str],
vol.Required('dependencies'): [str],
vol.Optional('after_dependencies'): [str],
vol.Required('codeowners'): [str],
})
def validate_manifest(integration: Integration):
"""Validate manifest."""
try:
MANIFEST_SCHEMA(integration.manifest)
except vol.Invalid as err:
integration.add_error(
'manifest',
"Invalid manifest: {}".format(
humanize_error(integration.manifest, err)))
integration.manifest = None
return
if integration.manifest['domain'] != integration.path.name:
integration.add_error('manifest', 'Domain does not match dir name')
def validate(integrations: Dict[str, Integration], config):
"""Handle all integrations manifests."""
for integration in integrations.values():
if integration.manifest:
validate_manifest(integration)
|
{
"content_hash": "e6c0d0fb3c4eb58a50ad06efb071e32a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 30.294117647058822,
"alnum_prop": 0.6394822006472491,
"repo_name": "jabesq/home-assistant",
"id": "3e25ab31712c6c9fe20fc37f4a2ad4716c0131d6",
"size": "1545",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "script/hassfest/manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from fly_project import settings
from api.models import Badge, Me, Notification
@login_required(login_url='/authentication')
def dashboard_page(request):
# BADGE ID #1
# Check to see if the logged in User has the Badge wit the ID #1. If not
# then create it now.
me = get_object_or_404(Me, user=request.user.id)
badge = get_object_or_404(Badge, id=1)
if badge not in me.badges.all():
me.badges.add(badge)
Notification.objects.create(
type=2,
title=badge.title,
description=badge.description,
user=me.user,
badge=badge,
)
return render(request, 'dashboard/view.html',{
'settings': settings,
})
|
{
"content_hash": "4bf55008a648c543b49cdc2afcd30e88",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 32.44827586206897,
"alnum_prop": 0.6726886291179596,
"repo_name": "evan-rusin/fly-project",
"id": "4d8e3e0ce506a3da81bc0acd1ec60103be0a12f8",
"size": "941",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dashboard/views.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "188049"
},
{
"name": "HTML",
"bytes": "334623"
},
{
"name": "JavaScript",
"bytes": "136630"
},
{
"name": "Python",
"bytes": "218526"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
""" Startup script for the server."""
import argparse
import sys
import tornado.web
import socket
import tornado.options
from metadatastore.mds import MDS, MDSRO
from metadataservice.server.engine import (RunStartHandler, RunStopHandler,
EventDescriptorHandler,
EventHandler, loop)
from metadataservice.server.conf import load_configuration
if __name__ == "__main__":
config = {}
parser = argparse.ArgumentParser()
parser.add_argument('--database', dest='database', type=str,
help='name of database to use')
parser.add_argument('--mongo-host', dest='mongohost', type=str,
help='mongodb host to connect to')
parser.add_argument('--timezone', dest='timezone', type=str,
help='Local timezone')
parser.add_argument('--mongo-port', dest='mongoport', type=int,
help='mongodb port to connect')
parser.add_argument('--service-port', dest='serviceport', type=int,
help='port to broadcast from')
parser.add_argument('--no-auth', dest='auth', action='store_false')
parser.add_argument('--auth', dest='auth', action='store_true')
parser.set_defaults(auth=False)
parser.add_argument('--mongo-user', dest='mongo_user', type=str,
help='Mongo username')
parser.add_argument('--mongo-pwd', dest='mongo_pwd', type=str,
help='Mongo password')
args = parser.parse_args()
# name of the database server will talk to.
# If db does not exist, creates one
if args.database is not None:
config['database'] = args.database
else:
raise KeyError('--database is a required arg')
# name/ip address of the machine hosting mongodb
if args.mongohost is not None:
config['mongohost'] = args.mongohost
else:
raise KeyError('--mongo-host is a required arg')
# US/Eastern for BNL
if args.timezone is not None:
config['timezone'] = args.timezone
else:
raise KeyError('--timezone is a required arg')
# port mongo uses on the mongo-host machine, 27017 by default
if args.mongoport is not None:
config['mongoport'] = args.mongoport
else:
raise KeyError('--mongo-port is a required arg')
# Port that this server will use to communicate
if args.serviceport is not None:
config['serviceport'] = args.serviceport
else:
raise KeyError('--service-port is a required arg')
if args.auth:
if args.mongo_user and args.mongo_pwd:
config['mongo_user'] = args.mongo_user
config['mongo_pwd'] = args.mongo_pwd
else:
raise KeyError('--mongo-user and --mongo-pwd required with auth')
else:
config['mongo_user'] = None
config['mongo_pwd'] = None
libconfig = dict(host=config['mongohost'], port=config['mongoport'],
timezone=config['timezone'], database=config['database'],
mongo_user= config['mongo_user'], mongo_pwd=config['mongo_pwd'])
mdsro = MDSRO(version=1, config=libconfig, auth=args.auth)
mdsrw = MDS(version=1, config=libconfig, auth=args.auth)
print('Connecting to mongodb...{}:{}/{}'.format(config['mongohost'],
config['mongoport'],
config['database']))
args = sys.argv
# args.append("--log_file_prefix=/tmp/metadataservice.log")
# tornado.options.parse_command_line(args)
application = tornado.web.Application([
(r'/run_start', RunStartHandler), (r'/run_stop', RunStopHandler),
(r'/event_descriptor', EventDescriptorHandler),
(r'/event', EventHandler)
], mdsro=mdsro, mdsrw=mdsrw)
application.listen(config['serviceport'])
print('Service live on address {}:{}'.format(socket.gethostname(),
config['serviceport']))
loop.start()
|
{
"content_hash": "42251e202182d0f73c8c92d79614ff13",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 85,
"avg_line_length": 44.934065934065934,
"alnum_prop": 0.595255563707508,
"repo_name": "NSLS-II/metadataservice",
"id": "90234328c80d3e8e9777954e6a6f24eeed27acfa",
"size": "4089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "startup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "96670"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
}
|
"""Support for Dyson Pure Cool Link Sensors."""
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_cool_link import DysonPureCoolLink
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
STATE_OFF,
TEMP_CELSIUS,
TIME_HOURS,
)
from . import DYSON_DEVICES, DysonEntity
SENSOR_ATTRIBUTES = {
"air_quality": {ATTR_ICON: "mdi:fan"},
"dust": {ATTR_ICON: "mdi:cloud"},
"humidity": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"temperature": {ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE},
"filter_life": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: TIME_HOURS,
},
"carbon_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"combi_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"hepa_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
}
SENSOR_NAMES = {
"air_quality": "AQI",
"dust": "Dust",
"humidity": "Humidity",
"temperature": "Temperature",
"filter_life": "Filter Life",
"carbon_filter_state": "Carbon Filter Remaining Life",
"combi_filter_state": "Combi Filter Remaining Life",
"hepa_filter_state": "HEPA Filter Remaining Life",
}
DYSON_SENSOR_DEVICES = "dyson_sensor_devices"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson Sensors."""
if discovery_info is None:
return
hass.data.setdefault(DYSON_SENSOR_DEVICES, [])
unit = hass.config.units.temperature_unit
devices = hass.data[DYSON_SENSOR_DEVICES]
# Get Dyson Devices from parent component
device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]]
new_entities = []
for device in hass.data[DYSON_DEVICES]:
if isinstance(device, DysonPureCool):
if f"{device.serial}-temperature" not in device_ids:
new_entities.append(DysonTemperatureSensor(device, unit))
if f"{device.serial}-humidity" not in device_ids:
new_entities.append(DysonHumiditySensor(device))
# For PureCool+Humidify devices, a single filter exists, called "Combi Filter".
# It's reported with the HEPA state, while the Carbon state is set to INValid.
if device.state and device.state.carbon_filter_state == "INV":
if f"{device.serial}-hepa_filter_state" not in device_ids:
new_entities.append(DysonHepaFilterLifeSensor(device, "combi"))
else:
if f"{device.serial}-hepa_filter_state" not in device_ids:
new_entities.append(DysonHepaFilterLifeSensor(device))
if f"{device.serial}-carbon_filter_state" not in device_ids:
new_entities.append(DysonCarbonFilterLifeSensor(device))
elif isinstance(device, DysonPureCoolLink):
new_entities.append(DysonFilterLifeSensor(device))
new_entities.append(DysonDustSensor(device))
new_entities.append(DysonHumiditySensor(device))
new_entities.append(DysonTemperatureSensor(device, unit))
new_entities.append(DysonAirQualitySensor(device))
if not new_entities:
return
devices.extend(new_entities)
add_entities(devices)
class DysonSensor(DysonEntity, SensorEntity):
"""Representation of a generic Dyson sensor."""
def __init__(self, device, sensor_type):
"""Create a new generic Dyson sensor."""
super().__init__(device, None)
self._old_value = None
self._sensor_type = sensor_type
self._attributes = SENSOR_ATTRIBUTES[sensor_type]
def on_message(self, message):
"""Handle new messages which are received from the fan."""
# Prevent refreshing if not needed
if self._old_value is None or self._old_value != self.state:
self._old_value = self.state
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the Dyson sensor name."""
return f"{super().name} {SENSOR_NAMES[self._sensor_type]}"
@property
def unique_id(self):
"""Return the sensor's unique id."""
return f"{self._device.serial}-{self._sensor_type}"
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._attributes.get(ATTR_UNIT_OF_MEASUREMENT)
@property
def icon(self):
"""Return the icon for this sensor."""
return self._attributes.get(ATTR_ICON)
@property
def device_class(self):
"""Return the device class of this sensor."""
return self._attributes.get(ATTR_DEVICE_CLASS)
class DysonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Filter Life sensor (in hours)."""
def __init__(self, device):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, "filter_life")
@property
def native_value(self):
"""Return filter life in hours."""
return int(self._device.state.filter_life)
class DysonCarbonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Carbon Filter Life sensor (in percent)."""
def __init__(self, device):
"""Create a new Dyson Carbon Filter Life sensor."""
super().__init__(device, "carbon_filter_state")
@property
def native_value(self):
"""Return filter life remaining in percent."""
return int(self._device.state.carbon_filter_state)
class DysonHepaFilterLifeSensor(DysonSensor):
"""Representation of Dyson HEPA (or Combi) Filter Life sensor (in percent)."""
def __init__(self, device, filter_type="hepa"):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, f"{filter_type}_filter_state")
@property
def native_value(self):
"""Return filter life remaining in percent."""
return int(self._device.state.hepa_filter_state)
class DysonDustSensor(DysonSensor):
"""Representation of Dyson Dust sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Dust sensor."""
super().__init__(device, "dust")
@property
def native_value(self):
"""Return Dust value."""
return self._device.environmental_state.dust
class DysonHumiditySensor(DysonSensor):
"""Representation of Dyson Humidity sensor."""
def __init__(self, device):
"""Create a new Dyson Humidity sensor."""
super().__init__(device, "humidity")
@property
def native_value(self):
"""Return Humidity value."""
if self._device.environmental_state.humidity == 0:
return STATE_OFF
return self._device.environmental_state.humidity
class DysonTemperatureSensor(DysonSensor):
"""Representation of Dyson Temperature sensor."""
def __init__(self, device, unit):
"""Create a new Dyson Temperature sensor."""
super().__init__(device, "temperature")
self._unit = unit
@property
def native_value(self):
"""Return Temperature value."""
temperature_kelvin = self._device.environmental_state.temperature
if temperature_kelvin == 0:
return STATE_OFF
if self._unit == TEMP_CELSIUS:
return float(f"{(temperature_kelvin - 273.15):.1f}")
return float(f"{(temperature_kelvin * 9 / 5 - 459.67):.1f}")
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
class DysonAirQualitySensor(DysonSensor):
"""Representation of Dyson Air Quality sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Air Quality sensor."""
super().__init__(device, "air_quality")
@property
def native_value(self):
"""Return Air Quality value."""
return int(self._device.environmental_state.volatil_organic_compounds)
|
{
"content_hash": "007f5963d44efec095d440f1d852dd4c",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 91,
"avg_line_length": 33.693548387096776,
"alnum_prop": 0.6376256582096697,
"repo_name": "FreekingDean/home-assistant",
"id": "be83a7e43735b192b7a8ae6e8d16ed0ef2b6b39a",
"size": "8356",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/dyson/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from joommf.energies.baseenergy import energy
import textwrap
class Exchange(energy):
def __init__(self, A):
energy.__init__(self, "Exchange")
self.A = A
def get_mif(self):
exchange_mif = textwrap.dedent("""\
Specify Oxs_UniformExchange {{
A {:.2e}
}}\n\n""").format(self.A)
return exchange_mif
|
{
"content_hash": "12cf7036ec6a2532ec585e7932aa34fc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 24.294117647058822,
"alnum_prop": 0.5060532687651331,
"repo_name": "ryanpepper/oommf-python",
"id": "d00b0f89300a64f55570cce841d952bb394e96ed",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joommf/energies/exchange.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "161"
},
{
"name": "Emacs Lisp",
"bytes": "2282"
},
{
"name": "Jupyter Notebook",
"bytes": "101733"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "137229"
},
{
"name": "Ruby",
"bytes": "295"
},
{
"name": "Shell",
"bytes": "3512"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def varimp_test():
train = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
# Run GBM
my_gbm = h2o.gbm(y=train["class"], x=train[1:4], ntrees=50, learn_rate=0.1, distribution="multinomial")
should_be_none = my_gbm.varimp()
assert should_be_none is None, "expected varimp to return None, but returned {0}".format(should_be_none)
should_be_list = my_gbm.varimp(return_list=True)
assert len(should_be_list) == 3, "expected varimp list to contain 3 entries, but it has " \
"{0}".format(len(should_be_list))
assert len(should_be_list[0]) == 4, "expected varimp entry to contain 4 elements (variable, relative_importance, " \
"scaled_importance, percentage), but it has {0}".format(len(should_be_list[0]))
if __name__ == "__main__":
tests.run_test(sys.argv, varimp_test)
|
{
"content_hash": "4c247a8bad7683b55b01f850da47e53f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 120,
"avg_line_length": 42.17391304347826,
"alnum_prop": 0.6134020618556701,
"repo_name": "junwucs/h2o-3",
"id": "96cf0380e8b29fa865c8f094cd5432ea234f382d",
"size": "970",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_misc/pyunit_varimp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5815537"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34005"
},
{
"name": "Python",
"bytes": "2084348"
},
{
"name": "R",
"bytes": "1818321"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "46944"
},
{
"name": "TeX",
"bytes": "583215"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
if __name__ == '__main__':
RawTransactionsTest().main()
|
{
"content_hash": "a3f7ae2f79ca43acffb313de2cb7eb28",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 147,
"avg_line_length": 42.05343511450382,
"alnum_prop": 0.6169903793791977,
"repo_name": "krzysztofwos/BitcoinUnlimited",
"id": "40c9fa252154a01ae3f619614d9f6cb5b076eda7",
"size": "5915",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "qa/rpc-tests/rawtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "647624"
},
{
"name": "C++",
"bytes": "4618568"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3821"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156005"
},
{
"name": "Makefile",
"bytes": "96732"
},
{
"name": "Objective-C",
"bytes": "5375"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "687509"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "38644"
}
],
"symlink_target": ""
}
|
from itsdangerous import BadSignature, BadTimeSignature
from sqlalchemy.exc import IntegrityError
from flask_jsonschema import ValidationError
from app.api.decorators import json_response
from app.api.errors import forbidden
from app.api.errors import not_acceptable # noqa
from . import auth
@auth.errorhandler(IntegrityError)
@json_response
def server_error(e):
return {'message': e.args[0]}, 500
@auth.errorhandler(403)
def forbidden_handler(e):
return forbidden('You don\'t have the permission to access the requested'
' resource. It is either read-protected or not readable '
'by the server.')
@auth.errorhandler(ValidationError)
@json_response
def on_validation_error(e):
return {'message': e.message, 'validator': e.validator}, 422
@auth.errorhandler(BadTimeSignature)
@json_response
def on_bad_time_signature(e):
return {'message': e.args[0], 'validator': 'signature'}, 400
@auth.errorhandler(BadSignature)
@json_response
def on_bad_signature(e):
return {'message': e.args[0]}, 400
|
{
"content_hash": "2d2100a064c4ef8ea1e9f327c671edfd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 27.973684210526315,
"alnum_prop": 0.7281279397930386,
"repo_name": "certeu/do-portal",
"id": "22e874fa73f3b2b5f3ba76f8b6fcd84526f3a4b3",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "31516"
},
{
"name": "HTML",
"bytes": "241648"
},
{
"name": "JavaScript",
"bytes": "84093"
},
{
"name": "Makefile",
"bytes": "3016"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "480459"
}
],
"symlink_target": ""
}
|
'''OpenGL extension NV.texture_expand_normal
This module customises the behaviour of the
OpenGL.raw.GL.NV.texture_expand_normal to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a remapping mode where unsigned texture
components (in the range [0,1]) can be treated as though they
contained signed data (in the range [-1,+1]). This allows
applications to easily encode signed data into unsigned texture
formats.
The functionality of this extension is nearly identical to the
EXPAND_NORMAL_NV remapping mode provided in the NV_register_combiners
extension, although it applies even if register combiners are used.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texture_expand_normal.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.texture_expand_normal import *
### END AUTOGENERATED SECTION
|
{
"content_hash": "624b09d35578a42e0b7cc75d33677aa9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 37.7037037037037,
"alnum_prop": 0.7966601178781926,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "e96e6b741eb038a0a15713c4b308feca821b2216",
"size": "1018",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/GL/NV/texture_expand_normal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
{
"content_hash": "d8d5e0425d9fe7703e6188c153a83b35",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 37.5,
"alnum_prop": 0.6533333333333333,
"repo_name": "gds-attic/backdrop-collector",
"id": "2d8a5192bd299b1c73745faff499ab517be7249c",
"size": "143",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backdrop/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22079"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
'''HiFive1-specific (flash only) runner.'''
from os import path
from runners.core import ZephyrBinaryRunner, RunnerCaps
class HiFive1BinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for the HiFive1 board, using openocd.'''
def __init__(self, cfg):
super().__init__(cfg)
self.openocd_config = path.join(cfg.board_dir, 'support', 'openocd.cfg')
@classmethod
def name(cls):
return 'hifive1'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'})
@classmethod
def do_add_parser(cls, parser):
pass
@classmethod
def do_create(cls, cfg, args):
if cfg.gdb is None:
raise ValueError('--gdb not provided at command line')
return HiFive1BinaryRunner(cfg)
def do_run(self, command, **kwargs):
self.require(self.cfg.openocd)
self.require(self.cfg.gdb)
openocd_cmd = ([self.cfg.openocd, '-f', self.openocd_config])
gdb_cmd = ([self.cfg.gdb, self.cfg.elf_file, '--batch',
'-ex', 'set remotetimeout 240',
'-ex', 'target extended-remote localhost:3333',
'-ex', 'load',
'-ex', 'quit'])
self.run_server_and_client(openocd_cmd, gdb_cmd)
|
{
"content_hash": "03f1d4a8e7c7ead22d0246b567c39d2e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 29.86046511627907,
"alnum_prop": 0.5919003115264797,
"repo_name": "finikorg/zephyr",
"id": "1a72d3018e114c28891b67b600291e592aedb8f8",
"size": "1359",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "scripts/west_commands/runners/hifive1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "445128"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "44321001"
},
{
"name": "C++",
"bytes": "29292"
},
{
"name": "CMake",
"bytes": "1369918"
},
{
"name": "Cadence",
"bytes": "1501"
},
{
"name": "EmberScript",
"bytes": "997"
},
{
"name": "Forth",
"bytes": "1648"
},
{
"name": "GDB",
"bytes": "1285"
},
{
"name": "Haskell",
"bytes": "722"
},
{
"name": "JetBrains MPS",
"bytes": "3152"
},
{
"name": "PLSQL",
"bytes": "281"
},
{
"name": "Perl",
"bytes": "215338"
},
{
"name": "Python",
"bytes": "2251570"
},
{
"name": "Shell",
"bytes": "171294"
},
{
"name": "SmPL",
"bytes": "36840"
},
{
"name": "Smalltalk",
"bytes": "1885"
},
{
"name": "SourcePawn",
"bytes": "14890"
},
{
"name": "Tcl",
"bytes": "5838"
},
{
"name": "VBA",
"bytes": "294"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
}
|
import arcpy, pythonaddins
import os, urllib2, json, zipfile
class dl2012(object):
"""Implementation for dl2012.tool (Tool)"""
def __init__(self):
self.enabled = True
self.cursor = 3
self.shape = "Rectangle"
def onMouseDown(self, x, y, button, shift):
pass
def onMouseDownMap(self, x, y, button, shift):
pass
def onMouseUp(self, x, y, button, shift):
pass
def onMouseUpMap(self, x, y, button, shift):
pass
def onMouseMove(self, x, y, button, shift):
pass
def onMouseMoveMap(self, x, y, button, shift):
pass
def onDblClick(self):
pass
def onKeyDown(self, keycode, shift):
pass
def onKeyUp(self, keycode, shift):
pass
def deactivate(self):
pass
def onCircle(self, circle_geometry):
pass
def onLine(self, line_geometry):
pass
def onRectangle(self, rectangle_geometry):
# perform some sanity checks
if not (rectangle_geometry.spatialReference.PCSCode == 3424):
rectangle_geometry = rectangle_geometry.projectAs(arcpy.SpatialReference(3424))
if(not isinstance(rectangle_geometry.XMin, float)):
pythonaddins.MessageBox("Click and drag to select an area.", "Incorrect Extent Specified", 1)
return False
if(rectangle_geometry.XMin < 0 or rectangle_geometry.YMin < 0 or rectangle_geometry.XMax > 1000000 or rectangle_geometry.YMax > 1000000):
pythonaddins.MessageBox("Select an area within New Jersey.", "Out of bounds", 1)
return False
# build the URL to request features from the NJGIN service
wfsurl = "http://njgin.state.nj.us/NJ_GeoServer/wfs?service=wfs&version=2.0.0&request=GetFeature&typeName=NJOGIS:Ortho07Grid_poly&srsName=EPSG:3424&bbox={minx},{miny},{maxx},{maxy}&outputFormat=json" \
.format(minx=rectangle_geometry.XMin,miny=rectangle_geometry.YMin,maxx=rectangle_geometry.XMax,maxy=rectangle_geometry.YMax)
try:
response = urllib2.urlopen(wfsurl)
wfsd = json.loads(response.read())['features']
except:
pythonaddins.MessageBox("Unable to connect to the NJGIN web service. Please try again.", "Unable to connect", 0)
return False
# warn people about the potentially large download
download = pythonaddins.MessageBox("You have selected {0} tiles, approximately {1}MB in size. Do you want to continue downloading?".format(len(wfsd), 10.1*len(wfsd)), "Continue Download?", 4)
if(download == "No"):
return False
# specify output directory
dldir = pythonaddins.SaveDialog("Choose download directory location...", "NewDownloadDirectory", r"C:\temp")
if(dldir == None or dldir == 'None'):
return False
if(not os.path.exists(dldir)):
os.makedirs(dldir)
# one more notice
finalnotice = """Please note, the download may take a while. Open the Python Console (in the "Geoprocessing" menu) for progress updates. You will be notified when the download is complete."""
if("Cancel" == pythonaddins.MessageBox(finalnotice, "Beginning download...", 1)):
return False
#iterate over features in the JSON returned by the WFS service
for f in wfsd:
url = "https://njgin.state.nj.us/ortho2012/nj2012ortho_sid_{0}.zip".format(f["properties"]["TILE_NO"])
response = urllib2.urlopen(url)
# output the requested zip to disk
ozf = os.path.join(dldir, os.path.basename(url))
if(not os.path.exists(ozf)):
with open(ozf, "wb") as zipf:
zipf.write(response.read())
print f["properties"]["TILE_NO"] + ".zip downloaded...",
else:
print f["properties"]["TILE_NO"] + ".zip already downloaded..."
# open the zip and extract the juicy raster goodness
if(not os.path.exists(os.path.join(dldir,f["properties"]["TILE_NO"]+".sid"))):
with zipfile.ZipFile(ozf, 'r') as sidzip:
sidzip.extract(f["properties"]["TILE_NO"]+".sid",dldir)
sidzip.extract(f["properties"]["TILE_NO"]+".sdw",dldir)
print "extracted."
else:
print f["properties"]["TILE_NO"] + ".sid already extracted..."
# add each raster to the current map frame
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd)[0]
result = arcpy.MakeRasterLayer_management(os.path.join(dldir, f["properties"]["TILE_NO"]+".sid"), f["properties"]["TILE_NO"]+".sid")
pythonaddins.MessageBox("Download complete.", "Done!", 0)
arcpy.RefreshActiveView()
class openGeoweb(object):
"""Implementation for openGeoweb.tool (Tool)"""
def __init__(self):
self.enabled = True
self.cursor = 3
import webbrowser
self.wb = webbrowser
self.shape = "Rectangle"
def onMouseDown(self, x, y, button, shift):
pass
def onMouseDownMap(self, x, y, button, shift):
pass
def onMouseUp(self, x, y, button, shift):
pass
def onMouseUpMap(self, x, y, button, shift):
pass
def onMouseMove(self, x, y, button, shift):
pass
def onMouseMoveMap(self, x, y, button, shift):
pass
def onDblClick(self):
pass
def onKeyDown(self, keycode, shift):
pass
def onKeyUp(self, keycode, shift):
pass
def deactivate(self):
pass
def onCircle(self, circle_geometry):
pass
def onLine(self, line_geometry):
pass
def onRectangle(self, rectangle_geometry):
# if not in NJ State Plane feet, reproject it
if not (rectangle_geometry.spatialReference.PCSCode == 3424):
rectangle_geometry = rectangle_geometry.projectAs(arcpy.SpatialReference(3424))
# catch single-clicks
if(not isinstance(rectangle_geometry.XMin, float)):
pythonaddins.MessageBox("Click and drag to select an area. NJ-Geoweb will then open at the same extent.", "Incorrect Extent Specified", 1)
return False
# catch bounding boxes drawn far outside of New Jersey
if(rectangle_geometry.XMin < 0 or rectangle_geometry.YMin < 0 or rectangle_geometry.XMax > 1000000 or rectangle_geometry.YMax > 1000000):
pythonaddins.MessageBox("Select an area within New Jersey.", "Out of bounds", 1)
return False
# build the URL and open a browser
geoweb = "http://njwebmap.state.nj.us/NJGeoWeb//UrlHandler.ashx?MAPTABID=2&MINX={minx}&MINY={miny}&MAXX={maxx}&MAXY={maxy}&SIZE=800,600&LABEL=%7c431434.194025001%7c482873.283329998%7c102711%7c0%2c0%2c0%7c12%7cCIRCLE%7c0%2c128%2c255%7c10%7c%7c&THEME=&LANGUAGE=en-US" \
.format(minx=rectangle_geometry.XMin,miny=rectangle_geometry.YMin,maxx=rectangle_geometry.XMax,maxy=rectangle_geometry.YMax)
self.wb.open(geoweb)
# cross your fingers and hope that GeoWeb loads
|
{
"content_hash": "9f49252bcdcea120775db633558534a6",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 275,
"avg_line_length": 49.013698630136986,
"alnum_prop": 0.6260480715483511,
"repo_name": "RowanGeolab/arcgisPythonAddins",
"id": "eca3b9ad512a02e9226b2e1d51e1a21ceb29bfb3",
"size": "7156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NJServices/Install/NJServices_addin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22628"
}
],
"symlink_target": ""
}
|
from marshmallow import Schema, fields
from api.tags.schemas import TagSchema
class FileSchema(Schema):
tags = fields.Nested(TagSchema, attribute="tags", many=True)
class Meta:
fields = ("sha1",
"sha256",
"md5",
"timestamp_first_scan",
"timestamp_last_scan",
"size",
"mimetype",
"tags")
|
{
"content_hash": "d9f55123ecca4681f007d18bb75362e1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 27,
"alnum_prop": 0.49074074074074076,
"repo_name": "quarkslab/irma",
"id": "0476926dd071d28586f1cee277db303785db8227",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/api/files/schemas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "CSS",
"bytes": "86535"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "2366"
},
{
"name": "HTML",
"bytes": "26577"
},
{
"name": "JavaScript",
"bytes": "1774854"
},
{
"name": "Jinja",
"bytes": "2672"
},
{
"name": "Less",
"bytes": "13774"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "15660"
},
{
"name": "Python",
"bytes": "797592"
},
{
"name": "Shell",
"bytes": "61907"
}
],
"symlink_target": ""
}
|
"""
ncurses interface
"""
import sys
import curses
import sys_backlight
from .. import backlight
keys = {
'esc': 27,
'q': 113,
'up': 259,
'down': 258,
'left': 260,
'right': 261
}
alias = {
'esc': 'quit',
'q': 'quit',
'up': 'inc',
'down': 'dec',
'left': 'min',
'right': 'max',
}
action = {
"quit": (lambda _: sys.exit(sys_backlight.success)),
"inc": (lambda b: b.addrel(sys_backlight.default['step'])),
"dec": (lambda b: b.subrel(sys_backlight.default['step'])),
"min": (lambda b: b.setrel(0)),
"max": (lambda b: b.setrel(100)),
}
def interface():
b = backlight.Backlight()
screen = curses.initscr()
curses.noecho()
screen.keypad(1)
try:
while True:
char = screen.getch() # fetch input
for keyname, keycode in keys.items(): # loop through defined keys
if char == keycode: # inputed key matches definied entry
if alias[keyname]: # alias for key exists
if action[alias[keyname]]: # action for alias exists
action[alias[keyname]](b) # execute key action
finally:
screen.keypad(0)
curses.endwin()
curses.echo()
def main():
interface()
sys.exit(sys_backlight.success)
if __name__ == "__main__":
main()
|
{
"content_hash": "fdb580f9b9f338940be177a4efe244a2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 21.919354838709676,
"alnum_prop": 0.5349521707137601,
"repo_name": "hamgom95/sys_backlight",
"id": "46ed581296d892e5e725d1f9999dd9020b754b2e",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sys_backlight/ncurses/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11337"
}
],
"symlink_target": ""
}
|
import unittest
from airflow.models import Connection
from airflow.providers.apache.spark.hooks.spark_jdbc import SparkJDBCHook
from airflow.utils import db
class TestSparkJDBCHook(unittest.TestCase):
_config = {
'cmd_type': 'spark_to_jdbc',
'jdbc_table': 'tableMcTableFace',
'jdbc_driver': 'org.postgresql.Driver',
'metastore_table': 'hiveMcHiveFace',
'jdbc_truncate': False,
'save_mode': 'append',
'save_format': 'parquet',
'batch_size': 100,
'fetch_size': 200,
'num_partitions': 10,
'partition_column': 'columnMcColumnFace',
'lower_bound': '10',
'upper_bound': '20',
'create_table_column_types': 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)',
}
# this config is invalid because if one of [partitionColumn, lowerBound, upperBound]
# is set, all of the options must be enabled (enforced by Spark)
_invalid_config = {
'cmd_type': 'spark_to_jdbc',
'jdbc_table': 'tableMcTableFace',
'jdbc_driver': 'org.postgresql.Driver',
'metastore_table': 'hiveMcHiveFace',
'jdbc_truncate': False,
'save_mode': 'append',
'save_format': 'parquet',
'batch_size': 100,
'fetch_size': 200,
'num_partitions': 10,
'partition_column': 'columnMcColumnFace',
'upper_bound': '20',
'create_table_column_types': 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)',
}
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark-default',
conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}',
)
)
db.merge_conn(
Connection(
conn_id='jdbc-default',
conn_type='postgres',
host='localhost',
schema='default',
port=5432,
login='user',
password='supersecret',
extra='{"conn_prefix":"jdbc:postgresql://"}',
)
)
def test_resolve_jdbc_connection(self):
# Given
hook = SparkJDBCHook(jdbc_conn_id='jdbc-default')
expected_connection = {
'url': 'localhost:5432',
'schema': 'default',
'conn_prefix': 'jdbc:postgresql://',
'user': 'user',
'password': 'supersecret',
}
# When
connection = hook._resolve_jdbc_connection()
# Then
self.assertEqual(connection, expected_connection)
def test_build_jdbc_arguments(self):
# Given
hook = SparkJDBCHook(**self._config)
# When
cmd = hook._build_jdbc_application_arguments(hook._resolve_jdbc_connection())
# Then
expected_jdbc_arguments = [
'-cmdType',
'spark_to_jdbc',
'-url',
'jdbc:postgresql://localhost:5432/default',
'-user',
'user',
'-password',
'supersecret',
'-metastoreTable',
'hiveMcHiveFace',
'-jdbcTable',
'tableMcTableFace',
'-jdbcDriver',
'org.postgresql.Driver',
'-batchsize',
'100',
'-fetchsize',
'200',
'-numPartitions',
'10',
'-partitionColumn',
'columnMcColumnFace',
'-lowerBound',
'10',
'-upperBound',
'20',
'-saveMode',
'append',
'-saveFormat',
'parquet',
'-createTableColumnTypes',
'columnMcColumnFace INTEGER(100), name CHAR(64),comments VARCHAR(1024)',
]
self.assertEqual(expected_jdbc_arguments, cmd)
def test_build_jdbc_arguments_invalid(self):
# Given
hook = SparkJDBCHook(**self._invalid_config)
# Expect Exception
hook._build_jdbc_application_arguments(hook._resolve_jdbc_connection())
|
{
"content_hash": "b68b8b628de81dfda23fa3adbd020adc",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 88,
"avg_line_length": 30.896296296296295,
"alnum_prop": 0.5202589307120594,
"repo_name": "DinoCow/airflow",
"id": "bd80bccdd8072ae4f945a648b7cab74d61644831",
"size": "4960",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/apache/spark/hooks/test_spark_jdbc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
"""quick and dirty test"""
import unittest
import subprocess
import os
class MyTest(unittest.TestCase):
"""RNA Alignment test"""
def test_rna_align_find_seq_in_alignment(self):
print('----------------------------------------------------------------------')
cmd = "./rna_align_find_seq_in_alignment.py -a test_data/RF00167.stockholm.sto -f test_data/xx.fa"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_rna_align_find_core(self):
print('----------------------------------------------------------------------')
cmd = "./rna_align_find_core.py test_data/RF00167.stockholm.sto"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_rna_align_seq_to_alignment(self):
print('----------------------------------------------------------------------')
cmd = "./rna_align_seq_to_alignment.py -f test_data/4lvv_cmalign.txt -a test_data/RF01831.stockholm.sto"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_rna_align_seq_to_alignment_2(self):
print('----------------------------------------------------------------------')
cmd = "./rna_align_seq_to_alignment.py -s test_data/4lvv.seq -a test_data/RF01831.stockholm.sto -m test_data/RF01831.cm"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_rna_align_get_ss_from_alignment(self):
print('----------------------------------------------------------------------')
cmd = "./rna_align_get_ss_from_alignment.py test_data/ade.fa"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_rna_alignment(self):
print('----------------------------------------------------------------------')
cmd = "./rna_alignment.py"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
def test_random_assignment_of_nucleotides(self):
print('----------------------------------------------------------------------')
cmd = "python random_assignment_of_nucleotides.py --alignfn test_data/aln1.fasta"
print(cmd)
code = os.system(cmd)
self.assertEqual(code, 0)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "077a807facf68517f5ebc942e4c3679c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 128,
"avg_line_length": 39.6271186440678,
"alnum_prop": 0.47005988023952094,
"repo_name": "m4rx9/rna-pdb-tools",
"id": "69a2660db0cd799535ce47a311053fcbf675a571",
"size": "2361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rna_tools/tools/rna_alignment/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34107"
},
{
"name": "Shell",
"bytes": "1130"
}
],
"symlink_target": ""
}
|
'''tzinfo timezone information for Etc/GMT0.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class GMT0(StaticTzInfo):
'''Etc/GMT0 timezone definition. See datetime.tzinfo for details'''
zone = 'Etc/GMT0'
_utcoffset = timedelta(seconds=0)
_tzname = 'GMT'
GMT0 = GMT0()
|
{
"content_hash": "0f302215c9bd6619ea509f2310d2aa0a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.7210682492581603,
"repo_name": "gauribhoite/personfinder",
"id": "ce857ca06d75d61b938df871d594ff243eebc962",
"size": "337",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "app/pytz/zoneinfo/Etc/GMT0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "413819"
},
{
"name": "CSS",
"bytes": "330448"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "720955"
},
{
"name": "JavaScript",
"bytes": "1072023"
},
{
"name": "Makefile",
"bytes": "16086"
},
{
"name": "PHP",
"bytes": "2582470"
},
{
"name": "Python",
"bytes": "60243792"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "TeX",
"bytes": "60219"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""
Functions for provisioning environments with fabtools (eat shit puppet!)
"""
# standard library
import sys
import copy
import os
from distutils.util import strtobool
# 3rd party
import fabric
from fabric.api import env, task, local, run, settings, cd, sudo, lcd
import fabtools
from fabtools.vagrant import vagrant_settings
# local
import decorators
import utils
@task
@decorators.needs_environment
def apt_get_update(max_age=86400*7):
"""refresh apt-get index if its more than max_age out of date
"""
with vagrant_settings(env.host_string):
try:
fabtools.require.deb.uptodate_index(max_age=max_age)
except AttributeError:
msg = (
"Looks like your fabtools is out of date. "
"Try updating fabtools first:\n"
" sudo pip install fabtools==0.17.0"
)
raise Exception(msg)
@task
@decorators.needs_environment
def python_packages():
"""install python packages"""
filename = os.path.join(utils.remote_project_root(), "REQUIREMENTS")
with vagrant_settings(env.host_string):
fabtools.require.python.requirements(filename, use_sudo=True)
@task
@decorators.needs_environment
def debian_packages():
"""install debian packages"""
# get the list of packages
filename = os.path.join(utils.project_root(), "REQUIREMENTS-DEB")
with open(filename, 'r') as stream:
packages = stream.read().strip().splitlines()
# install them all with fabtools.
with vagrant_settings(env.host_string):
fabtools.require.deb.packages(packages)
@task
@decorators.needs_environment
def packages():
"""install all packages"""
debian_packages()
python_packages()
@task
@decorators.needs_environment
def setup_shell_environment():
"""setup the shell environment on the remote machine"""
with vagrant_settings(env.host_string):
# change into the /vagrant directory by default
template = os.path.join(
utils.fabfile_templates_root(),
'.bash_profile',
)
fabtools.require.files.file(
path="/home/vagrant/.bash_profile",
contents="cd /vagrant",
)
@task
@decorators.needs_environment
def setup_analysis():
"""prepare analysis environment"""
with vagrant_settings(env.host_string):
# write a analysis.ini file that has the provider so we can
# easily distinguish between development and production
# environments when we run our analysis
template = os.path.join(
utils.fabfile_templates_root(),
"server_config.ini",
)
fabtools.require.files.template_file(
path="/vagrant/server_config.ini",
template_source=template,
context=env,
)
# create a data directory where all of the analysis and raw
# data is stored.
data_dir = "/vagrant/data"
fabtools.require.files.directory(data_dir)
@task(default=True)
@decorators.needs_environment
def default(do_rsync=True):
"""run all provisioning tasks"""
# http://stackoverflow.com/a/19536667/564709
if isinstance(do_rsync, (str, unicode,)):
do_rsync = bool(strtobool(do_rsync))
# rsync files (Vagrant isn't doing any provisioning now)
if do_rsync:
local("vagrant provision %(host_string)s" % env)
# run all of these provisioning tasks in the order specified here
apt_get_update()
# install debian packages first to make sure any compiling python
# packages have necessary dependencies
packages()
# set up anything else that should be done on the virtual machine
# to get it into the same state for everyone
setup_shell_environment()
setup_analysis()
|
{
"content_hash": "09322f7f837832d7218d0c2366cf8b17",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 72,
"avg_line_length": 28.795454545454547,
"alnum_prop": 0.6566692975532754,
"repo_name": "bjlange/revenge",
"id": "0bc4dcb4fe2bb8069846bfbe293a614fb4dc3a7a",
"size": "3801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/provision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6329"
},
{
"name": "Python",
"bytes": "13784"
}
],
"symlink_target": ""
}
|
import logging
import sys
import xml.dom.minidom
from pysnmp.entity.rfc3413.oneliner import cmdgen
from snmputils import print_validation_error, splunk_escape
__author__ = 'John Oxley'
class SnmpStanza:
"""
A class to represent a SNMP stanza in inputs.conf
"""
def __init__(self):
self.conf = {}
def scheme(self):
return "XML Scheme here. Some way of extending it..."
def read_config(self):
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
self.conf["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
self.conf[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
conf_dict = [(param, splunk_escape(self.conf[param]))
for param in self.conf
if param in ['destination', 'interfaces', 'operations']]
conf_str = ' '.join(['%s=%s' % nvp for nvp in conf_dict])
logging.info('action=configured stanza="%s" %s', self.conf['name'], conf_str)
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if (checkpnt_node and checkpnt_node.firstChild and
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE):
self.conf["checkpoint_dir"] = checkpnt_node.firstChild.data
if not self.conf:
raise Exception("Invalid configuration received from Splunk.")
def port(self):
return int(self.conf.get("port", 161))
def destination(self):
return self.conf.get("destination")
def snmpinterval(self):
return self.conf.get("snmpinterval", 60)
def name(self):
return self.conf.get("name")
def ipv6(self):
return int(self.conf.get("ipv6", 0))
def transport(self):
"""
Get the SNMP transport taking into consideration ipv4/ipv6
:return: SNMP transport
"""
if self.ipv6():
transport = cmdgen.Udp6TransportTarget((self.destination(), self.port()))
else:
transport = cmdgen.UdpTransportTarget((self.destination(), self.port()), timeout=5)
return transport
def security_object(self):
"""
Get the SNMP security object from the configuration, taking into consideration the SNMP version
:return: security object
"""
# snmp 1 and 2C params
snmp_version = self.conf.get("snmp_version", "2C")
if snmp_version == "3":
v3_security_name = self.conf.get("v3_securityName", "")
v3_auth_key = self.conf.get("v3_authKey", None)
v3_priv_key = self.conf.get("v3_privKey", None)
v3_auth_protocol_str = self.conf.get("v3_authProtocol", "usmHMACMD5AuthProtocol")
v3_priv_protocol_str = self.conf.get("v3_privProtocol", "usmDESPrivProtocol")
v3_auth_protocol = {
'usmHMACMD5AuthProtocol': cmdgen.usmHMACMD5AuthProtocol,
'usmHMACSHAAuthProtocol': cmdgen.usmHMACSHAAuthProtocol,
'usmNoAuthProtocol': cmdgen.usmNoAuthProtocol
}.get(v3_auth_protocol_str)
v3_priv_protocol = {
'usmDESPrivProtocol': cmdgen.usmDESPrivProtocol,
'usm3DESEDEPrivProtocol': cmdgen.usm3DESEDEPrivProtocol,
'usmAesCfb128Protocol': cmdgen.usmAesCfb128Protocol,
'usmAesCfb192Protocol': cmdgen.usmAesCfb192Protocol,
'usmAesCfb256Protocol': cmdgen.usmAesCfb256Protocol,
'usmNoPrivProtocol': cmdgen.usmNoPrivProtocol,
}.get(v3_priv_protocol_str)
security_object = cmdgen.UsmUserData(v3_security_name, authKey=v3_auth_key, privKey=v3_priv_key,
authProtocol=v3_auth_protocol, privProtocol=v3_priv_protocol)
else:
communitystring = self.conf.get("communitystring", "public")
mp_model_val = 1
if snmp_version == "1":
mp_model_val = 0
security_object = cmdgen.CommunityData(communitystring, mpModel=mp_model_val)
return security_object
def is_valid(self):
valid = True
if self.port() is None or int(self.port()) < 1:
print_validation_error("Port value must be a positive integer")
valid = False
if self.snmpinterval() is None or int(self.snmpinterval()) < 1:
print_validation_error("SNMP Polling interval must be a positive integer")
valid = False
if self.destination() is None:
print_validation_error("Destination must be present")
valid = False
# TODO Validate security options??
return valid
|
{
"content_hash": "c9edc0c9ad6348dd1917b33b21465e28",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 110,
"avg_line_length": 39.36054421768708,
"alnum_prop": 0.5782924300034566,
"repo_name": "liquidtelecom/splunk-snmpmod",
"id": "fe5cd7af53d96ab428b7a527cb2bcdb6a37eff4f",
"size": "5786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "snmpmod/bin/SnmpStanza.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "392"
},
{
"name": "Python",
"bytes": "77395"
},
{
"name": "Ruby",
"bytes": "534"
},
{
"name": "Shell",
"bytes": "315"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from collections import OrderedDict
from django.utils import timezone
from snisi_web.views.upload import (
handle_report_upload as original_handle_report_upload)
from snisi_nutrition.xls_import import NutritionExcelForm
from snisi_core.models.Reporting import ExpectedReporting, ReportClass
from snisi_nutrition.integrity import (
create_nut_report,
URENAMNutritionRIntegrityChecker,
URENASNutritionRIntegrityChecker,
URENINutritionRIntegrityChecker,
StocksNutritionRIntegrityChecker)
from snisi_nutrition.models.URENAM import AbstractURENAMNutritionR
from snisi_nutrition.models.URENAS import AbstractURENASNutritionR
from snisi_nutrition.models.URENI import AbstractURENINutritionR
from snisi_nutrition.models.Stocks import AbstractNutritionStocksR
reportcls_nut = ReportClass.get_or_none(slug='nutrition_monthly_routine')
logger = logging.getLogger(__name__)
def handle_report_upload(excel_form, form, provider):
if not isinstance(excel_form, NutritionExcelForm):
return original_handle_report_upload(excel_form, form, provider)
excel_form.set('submit_time', timezone.now())
excel_form.set('submitter', provider)
# ensure we have a expecteds and all
excel_form.check()
if not excel_form.is_valid():
return None, excel_form.errors.pop().render(short=True)
# build requirements for report
entity = excel_form.get('entity')
period = excel_form.get('period')
# expected reporting defines if report is expeted or not
expected_reporting = ExpectedReporting.get_or_none(
report_class=reportcls_nut,
period=period,
within_period=False,
entity=entity,
within_entity=False,
amount_expected=ExpectedReporting.EXPECTED_SINGLE)
# should have already been checked in excel_form.
if expected_reporting is None:
logger.error("Expected reporting not found: "
"cls:{cls} - period:{period} - entity:{entity}"
.format(cls=reportcls_nut, period=period, entity=entity))
return None, ("Aucun rapport de routine attendu à "
"{entity} pour {period}"
.format(entity=entity, period=period))
# check data individually for sub reports
integrity_map = OrderedDict([
('urenam', (AbstractURENAMNutritionR,
URENAMNutritionRIntegrityChecker)),
('urenas', (AbstractURENASNutritionR,
URENASNutritionRIntegrityChecker)),
('ureni', (AbstractURENINutritionR,
URENINutritionRIntegrityChecker)),
('stocks', (AbstractNutritionStocksR,
StocksNutritionRIntegrityChecker)),
])
sr_checkers = {}
master_fields = ['entity', 'period', 'submitter', 'submit_time']
for sr, sr_data in integrity_map.items():
sr_rcls, sr_cls = sr_data
if sr == 'stocks' or getattr(entity, 'has_{}'.format(sr), False):
logger.debug("checking {}".format(sr))
sri = sr_cls()
# feed checker with meta-data
for field in master_fields:
sri.set(field, excel_form.get(field))
# feed checker with UREN data
for field in sr_rcls.data_fields():
sri.set(field,
excel_form.get('{}_{}'.format(sr, field)))
sri.check()
if not sri.is_valid():
for feedback in sri.feedbacks:
# should_raise = sri.raised == feedback
excel_form.add_feedback(feedback, False)
else:
sr_checkers[sr] = sri
# checker now includes sub-reports errors
if not excel_form.is_valid():
return None, None
# all sub reports have been checked. we can safely create reports
logger.debug("[UPLOAD] ALL UREN+STOCKS CHECKS PERFORMED. CREATING REPORT")
return create_nut_report(
provider=provider,
expected_reporting=expected_reporting,
completed_on=timezone.now(),
integrity_checker=excel_form,
data_source=excel_form,
subreport_checkers=sr_checkers)
|
{
"content_hash": "1fb02e8d34363ea096a39bfafb4473d0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 36.91379310344828,
"alnum_prop": 0.6510976179355441,
"repo_name": "yeleman/snisi",
"id": "04cc80fcf0b9843a3bd1479afaecfbe8b7d1e61e",
"size": "4362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_nutrition/upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
from bisect import bisect_left
from whoosh.compat import iteritems, xrange
from whoosh.filedb.compound import CompoundStorage
from whoosh.filedb.fieldcache import FieldCache, DefaultFieldCachingPolicy
from whoosh.matching import FilterMatcher
from whoosh.reading import IndexReader, TermNotFound
from whoosh.store import OverlayStorage
from whoosh.support import dawg
SAVE_BY_DEFAULT = True
# Reader class
class SegmentReader(IndexReader):
GZIP_CACHES = False
def __init__(self, storage, schema, segment, generation=None, codec=None):
self.storage = storage
self.schema = schema
self.segment = segment
self._gen = generation
self.is_closed = False
# Copy info from underlying segment
self._has_deletions = segment.has_deletions()
self._dc = segment.doc_count()
self._dc_all = segment.doc_count_all()
if hasattr(self.segment, "segment_id"):
self.segid = self.segment.segment_id()
else:
from whoosh.codec.base import Segment
self.segid = Segment._random_id()
# self.files is a storage object from which to load the segment files.
# This is different from the general storage (which will be used for
# cahces) if the segment is in a compound file.
if segment.is_compound():
# Use an overlay here instead of just the compound storage because
# in rare circumstances a segment file may be added after the
# segment is written
self.files = OverlayStorage(segment.open_compound_file(storage),
self.storage)
else:
self.files = storage
# Get microreaders from codec
if codec is None:
from whoosh.codec import default_codec
codec = default_codec()
self._codec = codec
self._terms = codec.terms_reader(self.files, self.segment)
self._lengths = codec.lengths_reader(self.files, self.segment)
self._stored = codec.stored_fields_reader(self.files, self.segment)
self._vectors = None # Lazy open with self._open_vectors()
self._graph = None # Lazy open with self._open_dawg()
self.set_caching_policy()
def _open_vectors(self):
if self._vectors:
return
self._vectors = self._codec.vector_reader(self.files, self.segment)
def _open_dawg(self):
if self._graph:
return
self._graph = self._codec.graph_reader(self.files, self.segment)
def has_deletions(self):
return self._has_deletions
def doc_count(self):
return self._dc
def doc_count_all(self):
return self._dc_all
def is_deleted(self, docnum):
return self.segment.is_deleted(docnum)
def generation(self):
return self._gen
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.segment)
def __contains__(self, term):
return term in self._terms
def close(self):
self._terms.close()
self._stored.close()
if self._lengths:
self._lengths.close()
if self._vectors:
self._vectors.close()
if self._graph:
self._graph.close()
self.files.close()
self.caching_policy = None
self.is_closed = True
def stored_fields(self, docnum):
assert docnum >= 0
schema = self.schema
return dict(item for item in iteritems(self._stored[docnum])
if item[0] in schema)
def all_stored_fields(self):
is_deleted = self.segment.is_deleted
sf = self.stored_fields
for docnum in xrange(self._dc_all):
if not is_deleted(docnum):
yield sf(docnum)
def field_length(self, fieldname):
return self._lengths.field_length(fieldname)
def min_field_length(self, fieldname):
return self._lengths.min_field_length(fieldname)
def max_field_length(self, fieldname):
return self._lengths.max_field_length(fieldname)
def doc_field_length(self, docnum, fieldname, default=0):
return self._lengths.doc_field_length(docnum, fieldname,
default=default)
def has_vector(self, docnum, fieldname):
if self.schema[fieldname].vector:
try:
self._open_vectors()
except (NameError, IOError):
return False
return (docnum, fieldname) in self._vectors
else:
return False
def _test_field(self, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
if self.schema[fieldname].format is None:
raise TermNotFound("Field %r is not indexed" % fieldname)
def all_terms(self):
schema = self.schema
return ((fieldname, text) for fieldname, text in self._terms.keys()
if fieldname in schema)
def terms_from(self, fieldname, prefix):
self._test_field(fieldname)
schema = self.schema
return ((fname, text) for fname, text
in self._terms.keys_from((fieldname, prefix))
if fname in schema)
def term_info(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms[fieldname, text]
except KeyError:
raise TermNotFound("%s:%r" % (fieldname, text))
def _texts_in_fieldcache(self, fieldname, prefix=''):
# The first value in a fieldcache is the default
texts = self.fieldcache(fieldname).texts[1:]
if prefix:
i = bisect_left(texts, prefix)
while i < len(texts) and texts[i].startswith(prefix):
yield texts[i]
i += 1
else:
for text in texts:
yield text
def expand_prefix(self, fieldname, prefix):
self._test_field(fieldname)
# If a fieldcache for the field is already loaded, we already have the
# values for the field in memory, so just yield them from there
if self.fieldcache_loaded(fieldname):
return self._texts_in_fieldcache(fieldname, prefix)
else:
# Call super
return IndexReader.expand_prefix(self, fieldname, prefix)
def lexicon(self, fieldname):
self._test_field(fieldname)
# If a fieldcache for the field is already loaded, we already have the
# values for the field in memory, so just yield them from there
if self.fieldcache_loaded(fieldname):
return self._texts_in_fieldcache(fieldname)
else:
# Call super
return IndexReader.lexicon(self, fieldname)
def __iter__(self):
schema = self.schema
return ((term, terminfo) for term, terminfo in self._terms.items()
if term[0] in schema)
def iter_from(self, fieldname, text):
schema = self.schema
self._test_field(fieldname)
for term, terminfo in self._terms.items_from((fieldname, text)):
if term[0] not in schema:
continue
yield (term, terminfo)
def frequency(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms.frequency((fieldname, text))
except KeyError:
return 0
def doc_frequency(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms.doc_frequency((fieldname, text))
except KeyError:
return 0
def postings(self, fieldname, text, scorer=None):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
format_ = self.schema[fieldname].format
matcher = self._terms.matcher(fieldname, text, format_, scorer=scorer)
deleted = self.segment.deleted
if deleted:
matcher = FilterMatcher(matcher, deleted, exclude=True)
return matcher
def vector(self, docnum, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
vformat = self.schema[fieldname].vector
if not vformat:
raise Exception("No vectors are stored for field %r" % fieldname)
self._open_vectors()
return self._vectors.matcher(docnum, fieldname, vformat)
# DAWG methods
def has_word_graph(self, fieldname):
if fieldname not in self.schema:
return False
if not self.schema[fieldname].spelling:
return False
try:
self._open_dawg()
except (NameError, IOError, dawg.FileVersionError):
return False
return self._graph.has_root(fieldname)
def word_graph(self, fieldname):
if not self.has_word_graph(fieldname):
raise KeyError("No word graph for field %r" % fieldname)
return dawg.Node(self._graph, self._graph.root(fieldname))
def terms_within(self, fieldname, text, maxdist, prefix=0):
if not self.has_word_graph(fieldname):
# This reader doesn't have a graph stored, use the slow method
return IndexReader.terms_within(self, fieldname, text, maxdist,
prefix=prefix)
return dawg.within(self._graph, text, k=maxdist, prefix=prefix,
address=self._graph.root(fieldname))
# Field cache methods
def supports_caches(self):
return True
def set_caching_policy(self, cp=None, save=True, storage=None):
"""This method lets you control the caching policy of the reader. You
can either pass a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
as the first argument, *or* use the `save` and `storage` keywords to
alter the default caching policy::
# Use a custom field caching policy object
reader.set_caching_policy(MyPolicy())
# Use the default caching policy but turn off saving caches to disk
reader.set_caching_policy(save=False)
# Use the default caching policy but save caches to a custom
# storage
from whoosh.filedb.filestore import FileStorage
mystorage = FileStorage("path/to/cachedir")
reader.set_caching_policy(storage=mystorage)
:param cp: a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
object. If this argument is not given, the default caching policy
is used.
:param save: save field caches to disk for re-use. If a caching policy
object is specified using `cp`, this argument is ignored.
:param storage: a custom :class:`whoosh.store.Storage` object to use
for saving field caches. If a caching policy object is specified
using `cp` or `save` is `False`, this argument is ignored.
"""
if not cp:
if save and storage is None:
storage = self.storage
elif not save:
storage = None
cp = DefaultFieldCachingPolicy(self.segment.segment_id(),
storage=storage)
if type(cp) is type:
cp = cp()
self.caching_policy = cp
def _fieldkey(self, fieldname):
return "%s/%s" % (self.segid, fieldname)
def fieldcache(self, fieldname, save=SAVE_BY_DEFAULT):
"""Returns a :class:`whoosh.filedb.fieldcache.FieldCache` object for
the given field.
:param fieldname: the name of the field to get a cache for.
:param save: if True (the default), the cache is saved to disk if it
doesn't already exist.
"""
key = self._fieldkey(fieldname)
fc = self.caching_policy.get(key)
if not fc:
fc = FieldCache.from_field(self, fieldname)
self.caching_policy.put(key, fc, save=save)
return fc
def fieldcache_available(self, fieldname):
"""Returns True if a field cache exists for the given field (either in
memory already or on disk).
"""
return self._fieldkey(fieldname) in self.caching_policy
def fieldcache_loaded(self, fieldname):
"""Returns True if a field cache for the given field is in memory.
"""
return self.caching_policy.is_loaded(self._fieldkey(fieldname))
def unload_fieldcache(self, name):
self.caching_policy.delete(self._fieldkey(name))
|
{
"content_hash": "639e2e54271f20b2afc64099a977e3c6",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 79,
"avg_line_length": 36.06837606837607,
"alnum_prop": 0.6022116903633491,
"repo_name": "mozilla/popcorn_maker",
"id": "9c63691af2ec247482474d49f715174bdafa25c2",
"size": "14190",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/whoosh/filedb/filereading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3779620"
},
{
"name": "Puppet",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "5113791"
},
{
"name": "Ruby",
"bytes": "1970"
},
{
"name": "Shell",
"bytes": "2419"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__author__ = 'chad nelson'
__project__ = 'blowdrycss'
# TODO: Consider what it would take to handle shorthand property 'font'.
class FontParser(object):
""" **Features:**
- Parses unquoted font families.
Unquoted Font-Family References:
| http://www.cssfontstack.com/
| https://mathiasbynens.be/notes/unquoted-font-family
- Holds a basic ``font_families_dict`` (could be extended as desired):
| Keys: ``font-family`` category names
| Values: ``font-family`` member names
- Can generate web safe fallback fonts.
Assumes that the property_name is ``font-family``. It does not handle the shorthand property_name ``font``
**Examples:**
>>> font_parser = FontParser('papyrus')
>>> font_parser.generate_fallback_fonts()
'papyrus, fantasy'
"""
def __init__(self, font_value=''):
self.font_value = font_value
self.font_families_dict = {
'serif': {
'georgia', 'palatino', 'times', 'cambria', 'didot', 'garamond', 'perpetua', 'rockwell', 'baskerville',
},
'sans-serif': {
'arial', 'helvetica', 'gadget', 'cursive', 'impact', 'charcoal', 'tahoma', 'geneva', 'verdana',
'calibri', 'candara', 'futura', 'optima',
},
'monospace': {'courier', 'monaco', 'consolas', },
'fantasy': {'copperplate', 'papyrus', },
}
def generate_fallback_fonts(self):
""" Generates web safe fallback fonts
Reference: http://www.w3schools.com/cssref/css_websafe_fonts.asp
:return: (str) -- Returns a web safe fallback font string.
**Examples:**
>>> font_parser = FontParser('arial')
>>> font_parser.generate_fallback_fonts()
'arial, sans-serif'
>>> font_parser.font_value = 'monospace'
'monospace'
>>> font_parser.font_value = 'invalid'
''
"""
fallback = '' # set default font to empty string
if self.font_value in self.font_families_dict:
fallback = self.font_value # font_value 'monospace' returns 'monospace'
else:
for family, fonts in self.font_families_dict.items():
if self.font_value in fonts:
fallback = self.font_value + ", " + family # font_value 'arial' returns 'arial, sans-serif'
return fallback
# TODO: Consider the handling of multi-word double quoted fonts i.e. "Palatino Linotype", "Book Antiqua", etc.
# Seems complicated.
# could use 'qq', 'q--q' or 'dq' to indicate double-quotes e.g.
# 'qqPalatino-Linotypeqq' - confusing 'Linotype' looks like 'Linotypeg'. The letter 'q' looks like a 'g' at the end.
# 'q-Palatino-Linotype-q' - might work
# 'dqPalatino-Linotypedq' - confusing 'Linotype' becomes 'Linotyped'. The letter 'd' commonly ends words.
|
{
"content_hash": "6c6c48f18aa88be54a5ed5a3a431456f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 120,
"avg_line_length": 38.56410256410256,
"alnum_prop": 0.5784574468085106,
"repo_name": "nueverest/BlowDryCSS",
"id": "afc2cd8ea75ebde9a1f7e884a9b77d47cd44a69a",
"size": "3019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blowdrycss/fontparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "3689"
},
{
"name": "CSS",
"bytes": "1378"
},
{
"name": "HTML",
"bytes": "17637"
},
{
"name": "JavaScript",
"bytes": "10569"
},
{
"name": "Python",
"bytes": "399730"
}
],
"symlink_target": ""
}
|
"""
This script is for creating download reports for a given folder, recursively.
Examples:
girder audit-logs-report
--folder=57557fac8d777f68be8f3f49
--start-date=2018-09-10T13:55:34.847Z
--end-date=2018-09-13T13:55:34.847Z
--output report.csv
girder audit-logs-report -f 57557fac8d777f68be8f3f49
"""
import click
import csv
import dateutil.parser
import sys
from bson.objectid import ObjectId
from girder.models.item import Item
from girder.models.folder import Folder
from girder_audit_logs import Record
def index_folder(folderId):
if Folder().load(folderId, force=True) is None:
raise ValueError('folderId={} was not a valid folder'.format(folderId))
items = Item().find({'folderId': ObjectId(folderId)})
subfolders = Folder().find({'parentId': ObjectId(folderId)})
files = []
for item in items:
for file in Item().childFiles(item, fields={'_id': True}):
fileId = file['_id']
files.append(fileId)
for folder in subfolders:
files += index_folder(folder['_id'])
return files
def get_file_download_records(files, start=None, end=None):
query = {
'type': 'file.download',
'details.fileId': {
'$in': files,
},
'details.startByte': 0
}
if (start is not None) or (end is not None):
whenClause = {'when': {}}
if start is not None:
whenClause['when']['$gte'] = dateutil.parser.parse(start)
if end is not None:
whenClause['when']['$lt'] = dateutil.parser.parse(end)
query.update(whenClause)
return Record().find(query)
@click.command(name='audit-logs-report')
@click.option('-f', '--folder', help='folder ID to use as root for all download reports.',
required=True)
@click.option('--start-date', help='ISO 8601 format')
@click.option('--end-date', help='ISO 8601 format')
@click.option('-o', '--output', type=click.File('w'), default=sys.stdout, help='file to write out')
def report(folder, start_date, end_date, output):
files = index_folder(folder)
records = get_file_download_records(files, start=start_date, end=end_date)
fieldnames = ['file_id', 'ip', 'timestamp']
rows = ({
'file_id': r['details']['fileId'],
'ip': r['ip'],
'timestamp': r['when'].isoformat(),
} for r in records)
reportwriter = csv.DictWriter(output, fieldnames=fieldnames)
reportwriter.writeheader()
reportwriter.writerows(rows)
if __name__ == '__main__':
report()
|
{
"content_hash": "99ed5ae64a0054d62cdcdec6a76c2fe9",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 99,
"avg_line_length": 31.20731707317073,
"alnum_prop": 0.6307151230949589,
"repo_name": "girder/girder",
"id": "f00fd82e2c463c1d9a87076037e877605ed7d014",
"size": "2559",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "plugins/audit_logs/girder_audit_logs/report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1176017"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2018697"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem363.py
#
# Bézier Curves
# =============
# Published on Sunday, 18th December 2011, 10:00 am
#
# A cubic Bézier curve is defined by four points: P0, P1, P2 and P3.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "98e49f855818231fdc4a28b6045c0e9e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 19.375,
"alnum_prop": 0.6129032258064516,
"repo_name": "olduvaihand/ProjectEuler",
"id": "3fa3fccf761986dbac3e06927d3aea743bbf1c1e",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem363.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
}
|
"""
An APF auth plugin to provide SSH key info.
"""
import base64
import logging
import os
import traceback
# Does not need to be a thread because it doesn't need to perform asynch actions.
class SSH(object):
"""
Container for SSH account info.
Works with provided filepaths.
Work with config-only provided base64-encoded tokens, with files created.
Files written to
<authbasedir>/<name>/<ssh.type>
<authbasedir>/<name>/<ssh.type>.pub
"""
def __init__(self, manager, config, section):
self.log = logging.getLogger('autopyfactory.auth')
self.name = section
self.manager = manager
self.factory = manager.factory
self.basedir = os.path.expanduser(config.get(section, 'authbasedir'))
self.sshtype = config.get(section, 'ssh.type' )
self.privkey = config.get(section, 'ssh.privatekey' )
self.pubkey = config.get(section, 'ssh.publickey' )
self.privkeypass = config.get(section, 'ssh.privkeypass')
self.privkeypath = os.path.expanduser(config.get(section, 'ssh.privatekeyfile' ))
self.pubkeypath = os.path.expanduser(config.get(section, 'ssh.publickeyfile' ))
self.privkeypasspath = config.get(section, 'ssh.privkeypassfile')
#self.passwordfile = config.get(section, 'ssh.passwordfile')
# Handle raw empty values
if self.privkey.lower() == 'none':
self.privkey = None
if self.privkeypass.lower() == 'none':
self.privkeypass = None
if self.pubkey.lower() == 'none':
self.pubkey = None
# Handle path empty values
if self.privkeypath.lower() == 'none':
self.privkeypath = None
if self.privkeypasspath.lower() == 'none':
self.privkeypasspath = None
if self.pubkeypath.lower() == 'none':
self.pubkeypath = None
#if self.passwordfile.lower() == 'none':
# self.passwordfile = None
# Create files if needed
if self.privkey is not None:
fdir = "%s/%s" % (self.basedir, self.name)
fpath = "%s/%s" % (fdir, self.sshtype)
try:
self._ensuredir(fdir)
self._decodewrite(fpath, self.privkey)
self.privkeypath = fpath
os.chmod(fpath, 0600)
self.log.debug("Wrote decoded private key to %s and set config OK." % self.privkeypath)
except Exception, e:
self.log.error("Exception: %s" % str(e))
self.log.debug("Exception: %s" % traceback.format_exc())
if self.pubkey is not None:
fdir = "%s/%s" % (self.basedir, self.name)
fpath = "%s/%s.pub" % (fdir, self.sshtype)
try:
self._ensuredir(fdir)
self._decodewrite(fpath, self.pubkey)
self.pubkeypath = fpath
self.log.debug("Wrote decoded public key to %s and set config OK." % self.pubkeypath)
except Exception, e:
self.log.error("Exception: %s" % str(e))
self.log.debug("Exception: %s" % traceback.format_exc())
self.log.debug("SSH Handler for profile %s initialized." % self.name)
def _ensuredir(self, dirpath):
self.log.debug("Ensuring directory %s" % dirpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def _decodewrite(self, filepath, b64string ):
self.log.debug("Writing key to %s" % filepath)
decoded = SSH.decode(b64string)
try:
fh = open(filepath, 'w')
fh.write(decoded)
fh.close()
except Exception, e:
self.log.error("Exception: %s" % str(e))
self.log.debug("Exception: %s" % traceback.format_exc())
raise
else:
fh.close()
def _validate(self):
"""
Confirm credentials exist and are valid.
"""
return True
def getSSHPubKey(self):
pass
def getSSHPrivKey(self):
pass
def getSSHPubKeyFilePath(self):
self.log.debug('[%s] Retrieving pubkeypath: %s' % (self.name, self.pubkeypath))
return self.pubkeypath
def getSSHPrivKeyFilePath(self):
self.log.debug('[%s] Retrieving privkeypath: %s' % (self.name, self.privkeypath))
return self.privkeypath
def getSSHPassFilePath(self):
self.log.debug('[%s] Retrieving passpath: %s' % (self.name, self.privkeypasspath))
return self.privkeypasspath
##############################################
# External Utility class methods.
##############################################
@classmethod
def encode(self, string):
return base64.b64encode(string)
@classmethod
def decode(self, string):
return base64.b64decode(string)
|
{
"content_hash": "14dd7ca6b62089b05e52dd5d9ee1d166",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 103,
"avg_line_length": 36.25179856115108,
"alnum_prop": 0.5562611629291526,
"repo_name": "btovar/autopyfactory",
"id": "f6d882bee6e7bba6c3adcc2e4c0f4bd3fc34ca36",
"size": "5039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autopyfactory/plugins/authmanager/auth/SSH.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "827948"
},
{
"name": "Shell",
"bytes": "97872"
}
],
"symlink_target": ""
}
|
import re
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.conf import settings
from core import models
EXEMPT_URLS = [re.compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
EXEMPT_URLS += [re.compile(expr) for expr in settings.ROOT_CRT_INTERFACE]
class RootCrtMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if any(m.match(request.path_info) for m in EXEMPT_URLS):
return response
if models.RootCrt.objects.exists():
return response
messages.info(request, 'Please create crt root')
return HttpResponseRedirect(reverse_lazy('root_crt'))
|
{
"content_hash": "0e8db1ea450c69a40342303ff3d0ea2b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 32.16,
"alnum_prop": 0.7052238805970149,
"repo_name": "telminov/ca",
"id": "d3d38fab10a65df1ce6914234061ddefe775dff1",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2346"
},
{
"name": "Dockerfile",
"bytes": "1352"
},
{
"name": "HTML",
"bytes": "21396"
},
{
"name": "JavaScript",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "90979"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mModelSelection_Criterion = "CRPS"
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.columns);
lColumns = ['Split', 'Transformation', 'Model', 'Category', 'Complexity',
'FitCRPS', 'ForecastCRPS', 'TestCRPS']
print(lEngine.mSignalDecomposition.mTrPerfDetails[lColumns].head(10));
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
|
{
"content_hash": "0d4056358256298dff25c135805a7b26",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 83,
"avg_line_length": 25.280701754385966,
"alnum_prop": 0.7244968771686329,
"repo_name": "antoinecarme/pyaf",
"id": "58642401d7e5d4ecf471bd62f2b3231ebe32a1bb",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/probabilistic_forecasting/test_ozone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
Sliding-window-based job/task queue class (& example of use.)
May use ``multiprocessing.Process`` or ``threading.Thread`` objects as queue
items, though within Fabric itself only ``Process`` objects are used/supported.
"""
from __future__ import with_statement
import time
import Queue
from collections import deque
from progressbar import Bar, ETA, Percentage, ProgressBar, SimpleProgress
from fabric.context_managers import settings
from fabric.network import ssh
class JobQueue(object):
"""
The goal of this class is to make a queue of processes to run, and go
through them running X number at any given time.
So if the bubble is 5 start with 5 running and move the bubble of running
procs along the queue looking something like this:
Start
...........................
[~~~~~]....................
___[~~~~~].................
_________[~~~~~]...........
__________________[~~~~~]..
____________________[~~~~~]
___________________________
End
"""
def __init__(self, max_running, comms_queue, role_limits=None, debug=False):
"""
Setup the class to resonable defaults.
"""
self._max = max_running
self._comms_queue = comms_queue
self._debug = debug
if role_limits is None:
role_limits = {}
role_limits.setdefault('default', self._max)
self._pools = {}
for role, limit in role_limits.iteritems():
self._pools[role] = {
'running': [],
'queue': deque(),
'limit': limit,
}
self._completed = []
self._num_of_jobs = 0
self._finished = False
self._closed = False
widgets = ['Running tasks: ', Percentage(), ' ', Bar(), ' ', SimpleProgress(), ' ', ETA()]
self.pbar = ProgressBar(widgets=widgets)
def _all_alive(self):
"""
Simply states if all procs are alive or not. Needed to determine when
to stop looping, and pop dead procs off and add live ones.
"""
if self._running:
for pool in self._pools.itervalues():
if not all(x.is_alive() for x in pool['running']):
return False
return True
else:
return False
def __len__(self):
"""
Just going to use number of jobs as the JobQueue length.
"""
return self._num_of_jobs
def close(self):
"""
A sanity check, so that the need to care about new jobs being added in
the last throws of the job_queue's run are negated.
"""
if self._debug:
print("JOB QUEUE: closed")
self._closed = True
def append(self, process):
"""
Add the Process() to the queue, so that later it can be checked up on.
That is if the JobQueue is still open.
If the queue is closed, this will just silently do nothing.
To get data back out of this process, give ``process`` access to a
``multiprocessing.Queue`` object, and give it here as ``queue``. Then
``JobQueue.run`` will include the queue's contents in its return value.
"""
if not self._closed:
r = process.name.split('|')[0]
role = r if r in self._pools else 'default'
self._pools[role]['queue'].appendleft(process)
self._num_of_jobs += 1
self.pbar.maxval = self._num_of_jobs
if self._debug:
print("JOB QUEUE: %s: added %s" % (role, process.name))
def run(self):
"""
This is the workhorse. It will take the intial jobs from the _queue,
start them, add them to _running, and then go into the main running
loop.
This loop will check for done procs, if found, move them out of
_running into _completed. It also checks for a _running queue with open
spots, which it will then fill as discovered.
To end the loop, there have to be no running procs, and no more procs
to be run in the queue.
This function returns an iterable of all its children's exit codes.
"""
def _advance_the_queue(pool):
"""
Helper function to do the job of poping a new proc off the queue
start it, then add it to the running queue. This will eventually
depleate the _queue, which is a condition of stopping the running
while loop.
It also sets the env.host_string from the job.name, so that fabric
knows that this is the host to be making connections on.
"""
job = pool['queue'].pop()
if self._debug:
print("Popping '%s' off the queue and starting it" % job.name)
with settings(clean_revert=True, host_string=job.name, host=job.name):
job.start()
pool['running'].append(job)
# Prep return value so we can start filling it during main loop
results = {}
for pool in self._pools.itervalues():
for job in pool['queue']:
# job.name contains role so split that off and discard
job_name = job.name.split('|')[-1]
results[job_name] = dict.fromkeys(('exit_code', 'results'))
if not self._closed:
raise Exception("Need to close() before starting.")
if self._debug:
print("JOB QUEUE: starting")
self.pbar.start()
while len(self._completed) < self._num_of_jobs:
for pool_name, pool in self._pools.iteritems():
while len(pool['queue']) and len(pool['running']) < pool['limit']:
_advance_the_queue(pool)
for i, job in enumerate(pool['running']):
if not job.is_alive():
if self._debug:
print("JOB QUEUE: %s: %s: finish" % (pool_name, job.name))
job.join() # not necessary for Process but is for Thread
self._completed.append(job)
pool['running'].pop(i)
job_name = job.name.split('|')[-1]
results[job_name]['exit_code'] = job.exitcode
# Each loop pass, try pulling results off the queue to keep its
# size down. At this point, we don't actually care if any results
# have arrived yet; they will be picked up after the main loop.
self._fill_results(results)
time.sleep(ssh.io_sleep)
if self._debug:
print("JOB QUEUE: %s: %d running jobs" % (pool_name, len(pool['running'])))
if len(pool['queue']) == 0:
print("JOB QUEUE: %s: depleted" % pool_name)
# Allow some context switching
time.sleep(ssh.io_sleep)
self.pbar.update(len(self._completed))
# Consume anything left in the results queue. Note that there is no
# need to block here, as the main loop ensures that all workers will
# already have finished.
self._fill_results(results)
# Attach exit codes now that we're all done & have joined all jobs
for job in self._completed:
results[job.name]['exit_code'] = job.exitcode
self.pbar.finish()
return results
def _fill_results(self, results):
"""
Attempt to pull data off self._comms_queue and add to 'results' dict.
If no data is available (i.e. the queue is empty), bail immediately.
"""
while True:
try:
datum = self._comms_queue.get_nowait()
results[datum['name']]['results'] = datum['result']
except Queue.Empty:
break
#### Sample
def try_using(parallel_type):
"""
This will run the queue through it's paces, and show a simple way of using
the job queue.
"""
def print_number(number):
"""
Simple function to give a simple task to execute.
"""
print(number)
if parallel_type == "multiprocessing":
from multiprocessing import Process as Bucket # noqa
elif parallel_type == "threading":
from threading import Thread as Bucket # noqa
# Make a job_queue with a bubble of len 5, and have it print verbosely
jobs = JobQueue(5)
jobs._debug = True
# Add 20 procs onto the stack
for x in range(20):
jobs.append(Bucket(
target=print_number,
args=[x],
kwargs={},
))
# Close up the queue and then start it's execution
jobs.close()
jobs.run()
if __name__ == '__main__':
try_using("multiprocessing")
try_using("threading")
|
{
"content_hash": "818dfb9aad71601cb0aced07b0eaea0b",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 98,
"avg_line_length": 33.755639097744364,
"alnum_prop": 0.5425993985967257,
"repo_name": "getsentry/fabric",
"id": "41476709da7627abb03e9e3256af917b82ac4bab",
"size": "8979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabric/job_queue.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "438132"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.