text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class GrpcServerTest(tf.test.TestCase):
def testRunStep(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = tf.train.Server.create_local_server()
c = tf.constant([[2, 1]])
d = tf.constant([[1], [2]])
e = tf.matmul(c, d)
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testLargeConstant(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = tf.constant(const_val)
shape_t = tf.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
c = tf.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = tf.train.Server.create_local_server()
with tf.Session(server.target) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = tf.placeholder(tf.float32, shape=[10000, 3000])
min_t = tf.reduce_min(p)
max_t = tf.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testInvalidHostname(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "port"):
_ = tf.train.Server({"local": ["localhost"]},
job_name="local",
task_index=0)
class ServerDefTest(tf.test.TestCase):
def testLocalServer(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = tf.train.ClusterSpec(
{"local": ["localhost:2222", "localhost:2223"]}).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
).as_cluster_def()
server_def = tf.train.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = tf.train.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testClusterSpec(self):
cluster_spec = tf.train.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "8804b56b79def077461d9ad241d58d4b",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 78,
"avg_line_length": 35.65,
"alnum_prop": 0.6104488078541375,
"repo_name": "wchan/tensorflow",
"id": "c6d2f1e1f86c3618c962925d63721493ab8cfff4",
"size": "6381",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/server_lib_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154155"
},
{
"name": "C++",
"bytes": "8545023"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "732030"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "11651"
},
{
"name": "Jupyter Notebook",
"bytes": "1771939"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "112690"
},
{
"name": "Python",
"bytes": "5637204"
},
{
"name": "Shell",
"bytes": "163955"
},
{
"name": "TypeScript",
"bytes": "388189"
}
],
"symlink_target": ""
}
|
import json
import re
import pprint
import urllib
from collections import OrderedDict
try:
file=open("../api_data.json")
data=json.load(file, object_pairs_hook=OrderedDict)
file.close()
except:
file=open("../api_data.json", encoding='utf-8')
data=json.load(file, object_pairs_hook=OrderedDict)
file.close()
def generate(function):
# get method metadata
name = convert(function['name'])
method = function['type'].upper()
url = function['url']
match = re.match(r'\s*\d+\.\s*(.*)', function['title'])
if match:
title = match.group(1)
else:
raise "No title for " + name
url_params = []
mandatory_params = []
optional_params = []
# get parameters metadata
if 'parameter' in function:
parameter_list = function['parameter']['fields']
for key in parameter_list.keys():
plist = parameter_list[key]
for parameter in plist:
if parameter['group'] == "URL parameters":
url_params.append(parameter['field']);
elif parameter['optional']:
optional_params.append(parameter['field']);
else:
mandatory_params.append(parameter['field']);
parameters = url_params + mandatory_params
if method != 'GET':
parameters.append("change_info=None")
parameters.extend([x+'=None' for x in optional_params])
# write method code
print(' def ' + name + '(' + ', '.join(['self'] + parameters + ['return_raw=False']) + '):')
print(' """ ' + title + ' (' + method.lower() + ') """')
if method == 'PUT' or method == 'POST':
print(' data = { ')
for param in mandatory_params:
print(' "' + param + '": ' + param + ",")
for param in optional_params:
print(' "' + param + '": ' + param + ",")
print(' }')
print(' clean_params(data)')
final_url = '"' + url.replace('{', '" + ').replace('}', ' + "') + '"'
if method == 'GET':
print(' extra=""')
for param in optional_params:
print(" if %s is not None:"%(param))
print(' extra += "?%s=" + urllib.parse.quote(str(%s))'%(param, param))
print(' return self.request("' + method + '", ' + final_url + ' + extra, return_raw=return_raw)')
elif method == 'DELETE':
print(' return self.request("' + method + '", ' + final_url + ', change_info, return_raw=return_raw)')
else:
print(' return self.request("' + method + '", ' + final_url + ', change_info, data, return_raw=return_raw)')
print('')
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
if __name__ == "__main__":
print('')
for fct in data:
# this is a doc and not a real function
if fct['name'] == "Response_format_documentationApiNodesPendingApiNodesApiNodesNodeid":
continue
# this function has composite parameters that we cannot handle yet
if fct['name'] == "createDataSource":
continue
generate(fct)
|
{
"content_hash": "d447180e214860768f298bb93347da02",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 115,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.5786099865047234,
"repo_name": "Normation/rudder-api-client",
"id": "da0e845653e67eea0f7f33f422817b5e0a59dfcd",
"size": "3202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib.python/generate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1567"
},
{
"name": "Python",
"bytes": "22753"
},
{
"name": "Shell",
"bytes": "437"
}
],
"symlink_target": ""
}
|
from passlib.context import CryptContext
from .tokens import TokenManager
class UserManager:
def __init__(self, app):
self.enable_email = True
self.send_password_changed_email = True
self.send_registered_email = True
self.enable_confirm_email = True
self.show_email_does_not_exist = True
self.enable_retype_password = True
self.enable_invitation = True
self.enable_forgot_password = True
self.password_hash_mode = 'passlib' # or Flask-Security or plaintext
self.password_hash = 'bcrypt'
self.password_salt = app.config['SECRET_KEY'] # for Flask-Security
self.password_crypt_context = CryptContext(schemes=[self.password_hash])
self.confirm_email_email_template = 'emails/confirm_email'
self.forgot_password_email_template = 'emails/forgot_password'
self.password_changed_email_template = 'emails/password_changed'
self.registered_email_template = 'emails/registered'
self.username_changed_email_template = 'emails/username_changed'
self.invite_email_template = 'emails/invite'
self.app_name = 'LoRaWAN'
self.token_manager = TokenManager()
self.token_manager.setup(app.config.get('SECRET_KEY'))
self.reset_password_expiration = 2*24*3600
self.invite_expiration = 30*24*3600
self.confirm_email_expiration = 2*24*3600
self.confirm_email_url = '/user/confirm-email'
self.resend_confirm_email_url = '/user/resend-confirm-email'
self.reset_password_url = '/user/reset-password'
self.register_url = '/register'
self.forgot_password_url = '/user/forgot-password'
|
{
"content_hash": "9c01b59fbdd940ae459b5a6b11df39ae",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 40.595238095238095,
"alnum_prop": 0.6668621700879765,
"repo_name": "soybean217/lora-python",
"id": "07858e4f21565fb0189d0dd8fea32a436f68b3d8",
"size": "1705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UServer/userver/user/user_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "27647"
},
{
"name": "Python",
"bytes": "808327"
}
],
"symlink_target": ""
}
|
"""big array mult"""
from time import time
from random import random
@gpu.vectorize
def array_mult(a,b,c,d):
a = numpy.array(a, dtype=numpy.float32 )
b = numpy.array(b, dtype=numpy.float32 )
c = numpy.array(c, dtype=numpy.float32 )
d = numpy.array(d, dtype=numpy.float32 )
return a * b * c * d
def main():
ARRAY_SIZE = 1024*1024*2
a = [ random() for i in range(ARRAY_SIZE)]
b = [ random() for i in range(ARRAY_SIZE)]
c = [ random() for i in range(ARRAY_SIZE)]
d = [ random() for i in range(ARRAY_SIZE)]
start = time()
res = array_mult( a,b,c,d )
print( time()-start )
#print(res)
|
{
"content_hash": "2f29f4a46dcca8bde6c0689538777aa3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 43,
"avg_line_length": 23.96,
"alnum_prop": 0.6444073455759599,
"repo_name": "jinankjain/PythonJS",
"id": "5b50e9b1d26f64615d71cfbb7e587e6f1dff85eb",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regtests/bench/webclgl_array_mult_vectorize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
from webtest import TestApp
import test_helper
class MoviesControllerTests(unittest.TestCase):
def test_movies(self):
app = TestApp(test_helper.get_app())
assert app.get('/movies').status == '200 OK'
|
{
"content_hash": "7ee73f244b9846d89258008c1dc74676",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7,
"repo_name": "Rolinh/pydeo",
"id": "39567b0c2a5f49e20b50c1a0674a3c6e1ff4c88d",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydeo/test/functional/movies_controller_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "JavaScript",
"bytes": "5506"
},
{
"name": "Python",
"bytes": "37158"
}
],
"symlink_target": ""
}
|
"""
URL patterns for testing Horizon views.
"""
from django.conf.urls.defaults import include # noqa
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
from django.views.generic import TemplateView # noqa
import horizon
urlpatterns = patterns('',
url(r'', include(horizon.urls)),
url(r"auth/login/", "django.contrib.auth.views.login",
{'template_name': "auth/login.html"},
name='login'),
url(r'auth/', include('django.contrib.auth.urls')),
url(r'^qunit/$',
TemplateView.as_view(template_name="horizon/qunit.html"),
name='qunit_tests')
)
urlpatterns += staticfiles_urlpatterns()
|
{
"content_hash": "fede907dbadc43cf42d742d368317871",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 30.52,
"alnum_prop": 0.6985583224115334,
"repo_name": "r-icarus/openstack_microserver",
"id": "b6bce7dca655ef15a6d897b09ab360dd2388e606",
"size": "1572",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "horizon/test/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160741"
},
{
"name": "JavaScript",
"bytes": "359957"
},
{
"name": "Python",
"bytes": "2753685"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
}
|
"""
Script that imports locally stored data as a new dataset for the user
Usage: import id outputfile
"""
import sys, os
from shutil import copyfile
#tempfile, shutil
BUFFER = 1048576
uids = sys.argv[1].split(",")
out_file1 = sys.argv[2]
#remove NONE from uids
have_none = True
while have_none:
try:
uids.remove('None')
except:
have_none = False
#create dictionary keyed by uid of tuples of (displayName,filePath,build) for all files
available_files = {}
try:
for line in open( "/depot/data2/galaxy/microbes/microbial_data.loc" ):
if not line or line[0:1] == "#" : continue
fields = line.split('\t')
try:
info_type = fields.pop(0)
if info_type.upper()=="DATA":
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0).replace("\r","").replace("\n","")
file_type = filetype
build = org_num
description = uid
else:
continue
except:
continue
available_files[uid]=(description,path,build,file_type,chr_acc)
except:
print >>sys.stderr, "It appears that the configuration file for this tool is missing."
#create list of tuples of (displayName,FileName,build) for desired files
desired_files = []
for uid in uids:
try:
desired_files.append(available_files[uid])
except:
continue
#copy first file to contents of given output file
file1_copied = False
while not file1_copied:
try:
first_file = desired_files.pop(0)
except:
print >>sys.stderr, "There were no valid files requested."
sys.exit()
file1_desc, file1_path, file1_build, file1_type,file1_chr_acc = first_file
try:
copyfile(file1_path,out_file1)
print "#File1\t"+file1_desc+"\t"+file1_chr_acc+"\t"+file1_build+"\t"+file1_type
file1_copied = True
except:
print >>sys.stderr, "The file specified is missing."
continue
#print >>sys.stderr, "The file specified is missing."
#Tell post-process filter where remaining files reside
for extra_output in desired_files:
file_desc, file_path, file_build, file_type,file_chr_acc = extra_output
print "#NewFile\t"+file_desc+"\t"+file_chr_acc+"\t"+file_build+"\t"+file_path+"\t"+file_type
|
{
"content_hash": "0b9b5cdbf1ef821baafdc7bef695e40f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 96,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.6015187849720224,
"repo_name": "jmchilton/galaxy-central",
"id": "735eed94929a232d61d982af8c76ff49d9b60628",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/data_source/microbial_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32128"
},
{
"name": "HTML",
"bytes": "96150"
},
{
"name": "JavaScript",
"bytes": "37270"
},
{
"name": "Makefile",
"bytes": "1035"
},
{
"name": "Python",
"bytes": "1341118"
},
{
"name": "Shell",
"bytes": "3787"
}
],
"symlink_target": ""
}
|
from AppKit import NSLevelIndicator, NSLevelIndicatorCell, NSTickMarkAbove, NSTickMarkBelow, \
NSDiscreteCapacityLevelIndicatorStyle, NSContinuousCapacityLevelIndicatorStyle, \
NSRatingLevelIndicatorStyle, NSRelevancyLevelIndicatorStyle, NSImage
from vanilla.vanillaBase import VanillaBaseControl
# This control is available in OS 10.4+.
# Cause a NameError if in an earlier OS.
NSLevelIndicator
_tickPositionMap = {
"above": NSTickMarkAbove,
"below": NSTickMarkBelow,
}
_levelIndicatorStyleMap = {
"discrete": NSDiscreteCapacityLevelIndicatorStyle,
"continuous": NSContinuousCapacityLevelIndicatorStyle,
"rating": NSRatingLevelIndicatorStyle,
"relevancy": NSRelevancyLevelIndicatorStyle,
}
class LevelIndicator(VanillaBaseControl):
"""
A control which shows a value on a linear scale.::
from vanilla import *
class LevelIndicatorDemo(object):
def __init__(self):
self.w = Window((200, 68))
self.w.discreteIndicator = LevelIndicator(
(10, 10, -10, 18), callback=self.levelIndicatorCallback)
self.w.continuousIndicator = LevelIndicator(
(10, 40, -10, 18), style="continuous",
callback=self.levelIndicatorCallback)
self.w.open()
def levelIndicatorCallback(self, sender):
print("level indicator edit!", sender.get())
LevelIndicatorDemo()
**posSize** Tuple of form *(left, top, width, height)* representing the position
and size of the level indicator.
+-------------------------------+
| **Standard Dimensions()** |
+-------------------------------+
| *discrete without ticks* |
+-------------------------------+
| H | 18 |
+-------------------------------+
| *discrete with minor ticks* |
+-------------------------------+
| H | 22 |
+-------------------------------+
| *discrete with major ticks* |
+-------------------------------+
| H | 25 |
+-------------------------------+
| *continuous without ticks* |
+-------------------------------+
| H | 16 |
+-------------------------------+
| *continuous with minor ticks* |
+-------------------------------+
| H | 20 |
+-------------------------------+
| *continuous with major ticks* |
+-------------------------------+
| H | 23 |
+-------------------------------+
**style** The style of the level indicator. The options are:
+--------------+-------------------+
| "continuous" | A continuous bar. |
+--------------+-------------------+
| "discrete" | A segmented bar. |
+--------------+-------------------+
**value** The initial value of the level indicator.
**minValue** The minimum value allowed by the level indicator.
**maxValue** The maximum value allowed by the level indicator.
**warningValue** The value at which the filled portions of the
level indicator should display the warning color.
**criticalValue** The value at which the filled portions of the
level indicator should display the critical color.
**tickMarkPosition** The position of the tick marks in relation
to the level indicator. The options are:
+---------+
| "above" |
+---------+
| "below" |
+---------+
**minorTickMarkCount** The number of minor tick marcks to be displayed
on the level indicator. If *None* is given, no minor tick marks will be displayed.
**majorTickMarkCount** The number of major tick marcks to be displayed on the level
indicator. If *None* is given, no major tick marks will be displayed.
**callback** The method to be called when the level indicator has been edited.
If no callback is given, the level indicator will not be editable.
"""
nsLevelIndicatorClass = NSLevelIndicator
def __init__(self, posSize, style="discrete",
value=5, minValue=0, maxValue=10, warningValue=None, criticalValue=None,
tickMarkPosition=None, minorTickMarkCount=None, majorTickMarkCount=None,
callback=None):
self._setupView(self.nsLevelIndicatorClass, posSize, callback=callback)
self._nsObject.cell().setLevelIndicatorStyle_(_levelIndicatorStyleMap[style])
self._nsObject.setMinValue_(minValue)
self._nsObject.setMaxValue_(maxValue)
self._nsObject.setFloatValue_(value)
if warningValue is not None:
self._nsObject.setWarningValue_(warningValue)
if criticalValue is not None:
self._nsObject.setCriticalValue_(criticalValue)
if tickMarkPosition is not None:
self._nsObject.setTickMarkPosition_(_tickPositionMap[tickMarkPosition])
if minorTickMarkCount is not None:
self._nsObject.setNumberOfTickMarks_(minorTickMarkCount)
if majorTickMarkCount is not None:
self._nsObject.setNumberOfMajorTickMarks_(majorTickMarkCount)
if callback is None:
self._nsObject.setEnabled_(False)
def getNSLevelIndicator(self):
"""
Return the *NSLevelIndicator* that this object wraps.
"""
return self._nsObject
def set(self, value):
"""
Set the value of the level indicator.
"""
self._nsObject.setFloatValue_(value)
def get(self):
"""
Get the value of the level indicator.
"""
return self._nsObject.floatValue()
def setMinValue(self, value):
"""
Set the minimum value of the level indicator.
"""
self._nsObject.setMinValue_(value)
def getMinValue(self):
"""
Get the minimum value of the level indicator.
"""
return self._nsObject.minValue()
def setMaxValue(self, value):
"""
Set the maximum of the level indicator.
"""
self._nsObject.setMaxValue_(value)
def getMaxValue(self):
"""
Get the maximum of the level indicator.
"""
return self._nsObject.maxValue()
def setWarningValue(self, value):
"""
Set the warning value of the level indicator.
"""
self._nsObject.setWarningValue_(value)
def getWarningValue(self, value):
"""
Get the warning value of the level indicator.
"""
return self._nsObject.warningValue()
def setCriticalValue(self, value):
"""
Set the critical value of the level indicator.
"""
self._nsObject.setCriticalValue_(value)
def getCriticalValue(self, value):
"""
Get the critical value of the level indicator.
"""
return self._nsObject.criticalValue()
def LevelIndicatorListCell(style="discrete",
minValue=0, maxValue=10, warningValue=None, criticalValue=None,
imagePath=None, imageNamed=None, imageObject=None):
"""
An object that displays a level indicator in a List column.
**This object should only be used in the *columnDescriptions* argument
during the construction of a List.**::
from vanilla import *
class LevelIndicatorListCellDemo(object):
def __init__(self):
self.w = Window((340, 140))
items = [
{"discrete": 3, "continuous": 4, "rating": 1, "relevancy": 9},
{"discrete": 8, "continuous": 3, "rating": 5, "relevancy": 5},
{"discrete": 3, "continuous": 7, "rating": 3, "relevancy": 4},
{"discrete": 2, "continuous": 5, "rating": 4, "relevancy": 7},
{"discrete": 6, "continuous": 9, "rating": 3, "relevancy": 2},
{"discrete": 4, "continuous": 0, "rating": 6, "relevancy": 8},
]
columnDescriptions = [
{"title": "discrete",
"cell": LevelIndicatorListCell(style="discrete", warningValue=7, criticalValue=9)},
{"title": "continuous",
"cell": LevelIndicatorListCell(style="continuous", warningValue=7, criticalValue=9)},
{"title": "rating",
"cell": LevelIndicatorListCell(style="rating", maxValue=6)},
{"title": "relevancy",
"cell": LevelIndicatorListCell(style="relevancy")},
]
self.w.list = List((0, 0, -0, -0), items=items,
columnDescriptions=columnDescriptions)
self.w.open()
LevelIndicatorListCellDemo()
**style** The style of the level indicator. The options are:
+--------------+-----------------------------------------+
| "continuous" | A continuous bar. |
+--------------+-----------------------------------------+
| "discrete" | A segmented bar. |
+--------------+-----------------------------------------+
| "rating" | A row of stars. Similar to the rating |
| | indicator in iTunes. |
+--------------+-----------------------------------------+
| "relevancy" | A row of lines. Similar to the search |
| | result relevancy indicator in Mail. |
+--------------+-----------------------------------------+
**minValue** The minimum value allowed by the level indicator.
**maxValue** The maximum value allowed by the level indicator.
**warningValue** The value at which the filled portions of the
level indicator should display the warning color. Applies only to
discrete and continuous level indicators.
**criticalValue** The value at which the filled portions of the
level indicator should display the critical color. Applies only to
discrete and continuous level indicators.
"""
cell = NSLevelIndicatorCell.alloc().init()
cell.setLevelIndicatorStyle_(_levelIndicatorStyleMap[style])
cell.setMinValue_(minValue)
cell.setMaxValue_(maxValue)
if warningValue is not None:
cell.setWarningValue_(warningValue)
if criticalValue is not None:
cell.setCriticalValue_(criticalValue)
if imagePath is not None:
image = NSImage.alloc().initWithContentsOfFile_(imagePath)
elif imageNamed is not None:
image = NSImage.imageNamed_(imageNamed)
elif imageObject is not None:
image = imageObject
if imageObject is not None:
cell.setImage_(image)
return cell
|
{
"content_hash": "cff7a3f96e7ea4488e0ed37a1a7fef9d",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 106,
"avg_line_length": 37.241379310344826,
"alnum_prop": 0.5471296296296296,
"repo_name": "moyogo/vanilla",
"id": "6f7e274d72ceb5e4b0f26016430694ca8a56dde7",
"size": "10800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/vanilla/vanillaLevelIndicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "331750"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from conf_common import *
# General information about the project.
project = u'ESP-IDF 编程指南'
copyright = u'2016 - 2018 乐鑫信息科技(上海)有限公司'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'zh_CN'
|
{
"content_hash": "d56c79a7e05911ded2827f742115db89",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 29.545454545454547,
"alnum_prop": 0.7415384615384616,
"repo_name": "mashaoze/esp-idf",
"id": "a4017c8c0b0636b098f47e6d8788ca759ffdea06",
"size": "587",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/zh_CN/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "142861"
},
{
"name": "C",
"bytes": "22799712"
},
{
"name": "C++",
"bytes": "1390945"
},
{
"name": "CMake",
"bytes": "136271"
},
{
"name": "Inno Setup",
"bytes": "8670"
},
{
"name": "Lex",
"bytes": "7270"
},
{
"name": "Makefile",
"bytes": "123250"
},
{
"name": "Objective-C",
"bytes": "41763"
},
{
"name": "Perl",
"bytes": "15204"
},
{
"name": "Python",
"bytes": "701810"
},
{
"name": "Shell",
"bytes": "66203"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
}
|
"""Logger initialization."""
import os
import logging
import logging.config
import pkgutil
import yaml
# Search path for config file. Will default to packaged file.
CONFIG_PATH = ['./picdb_log.yaml', '~/.picdb/picdb_log.yaml']
def _lookup_configuration():
"""Lookup the configuration file.
:return: path to config file, opened configuration file
:rtype: (str, stream)
"""
for pth in CONFIG_PATH:
path = os.path.abspath(os.path.expanduser(pth))
if os.path.isfile(path):
return path, open(path)
pkg_path = 'resources/config_log.yaml'
return pkg_path, pkgutil.get_data('picdb', pkg_path)
def initialize_logger():
"""Initialize logger based on configuration.
:return: logger
:rtype: logging.logger
"""
path_cfg_file, config = __read_configuration()
logging.config.dictConfig(config)
logger = logging.getLogger('picdb.logging')
logger.info('Logger configuration file: %s', path_cfg_file)
def __read_configuration():
"""Read the logging configuration from file.
:return: path to config file, configuration dictionary
:rtype: (str, dict)
"""
path, cfg = _lookup_configuration()
conf_dict = yaml.safe_load(cfg)
return path, conf_dict
|
{
"content_hash": "a6e16491a23fabab1df377b67edc53c7",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 63,
"avg_line_length": 26.659574468085108,
"alnum_prop": 0.6711891460494812,
"repo_name": "stbraun/picdb",
"id": "b4536b7793c04ccff4cd2997e24748e067a0e047",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picdb/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188564"
},
{
"name": "Shell",
"bytes": "2153"
}
],
"symlink_target": ""
}
|
from twilio.twiml.voice_response import Connect, VoiceResponse, Say, VirtualAgent
response = VoiceResponse()
response.say('Hello! You will be now be connected to a virtual agent.')
connect = Connect(action='https://myactionurl.com/virtualagent_ended')
connect.virtual_agent(
connector_name='project', status_callback='https://mycallbackurl.com'
)
response.append(connect)
print(response)
|
{
"content_hash": "c70e5f3857e9d64db9c5c86e36877d30",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.7817258883248731,
"repo_name": "TwilioDevEd/api-snippets",
"id": "59e4f091901b36333864040042c0e4b4997d5823",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twiml/voice/connect/virtualagent-2/virtualagent-2.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Podcast.author'
db.add_column('podcast_podcast', 'author',
self.gf('django.db.models.fields.CharField')(default='', max_length=300, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Podcast.author'
db.delete_column('podcast_podcast', 'author')
models = {
'podcast.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'podcast.contributor': {
'Meta': {'object_name': 'Contributor'},
'bio': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'home_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'podcast.episode': {
'Meta': {'object_name': 'Episode'},
'artwork': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'episodes'", 'blank': 'True', 'to': "orm['podcast.Contributor']"}),
'episode_number': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('durationfield.db.models.fields.duration.DurationField', [], {'blank': 'True'}),
'mp3': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'show_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
'podcast.podcast': {
'Meta': {'object_name': 'Podcast'},
'artwork': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'podcasts'", 'to': "orm['sites.Site']"}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['podcast']
|
{
"content_hash": "cf37b6b7e5da87f945ed821b9565b277",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 193,
"avg_line_length": 59.90277777777778,
"alnum_prop": 0.54022722003246,
"repo_name": "stickwithjosh/hypodrical",
"id": "aa58264aa5278abd4e96eb3311e5a9ec345f8ab7",
"size": "4337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/podcast/migrations/0004_auto__add_field_podcast_author.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55431"
}
],
"symlink_target": ""
}
|
'''
This submodule collects useful functionality required across the task
submodules, such as preprocessing, validation, and common computations.
'''
import os
import inspect
import six
import numpy as np
def index_labels(labels, case_sensitive=False):
"""Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]``
"""
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label
def generate_labels(items, prefix='__'):
"""Given an array of items (e.g. events, intervals), create a synthetic label
for each event of the form '(label prefix)(item number)'
Parameters
----------
items : list-like
A list or array of events or intervals
prefix : str
This prefix will be prepended to all synthetically generated labels
(Default value = '__')
Returns
-------
labels : list of str
Synthetically generated labels
"""
return ['{}{}'.format(prefix, n) for n in range(len(items))]
def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1,
fill_value=None):
"""Convert an array of labeled time intervals to annotated samples.
Parameters
----------
intervals : np.ndarray, shape=(n, d)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()` or
:func:`mir_eval.io.load_labeled_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
labels : list, shape=(n,)
The annotation for each interval
offset : float > 0
Phase offset of the sampled time grid (in seconds)
(Default value = 0)
sample_size : float > 0
duration of each sample to be generated (in seconds)
(Default value = 0.1)
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
sample_times : list
list of sample times
sample_labels : list
array of labels for each generated sample
Notes
-----
Intervals will be rounded down to the nearest multiple
of ``sample_size``.
"""
# Round intervals to the sample size
num_samples = int(np.floor(intervals.max() / sample_size))
sample_indices = np.arange(num_samples, dtype=np.float32)
sample_times = (sample_indices*sample_size + offset).tolist()
sampled_labels = interpolate_intervals(
intervals, labels, sample_times, fill_value)
return sample_times, sampled_labels
def interpolate_intervals(intervals, labels, time_points, fill_value=None):
"""Assign labels to a set of points in time given a set of intervals.
Time points that do not lie within an interval are mapped to `fill_value`.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Intervals are assumed to be disjoint.
labels : list, shape=(n,)
The annotation for each interval
time_points : array_like, shape=(m,)
Points in time to assign labels. These must be in
non-decreasing order.
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
aligned_labels : list
Labels corresponding to the given time points.
Raises
------
ValueError
If `time_points` is not in non-decreasing order.
"""
# Verify that time_points is sorted
time_points = np.asarray(time_points)
if np.any(time_points[1:] < time_points[:-1]):
raise ValueError('time_points must be in non-decreasing order')
aligned_labels = [fill_value] * len(time_points)
starts = np.searchsorted(time_points, intervals[:, 0], side='left')
ends = np.searchsorted(time_points, intervals[:, 1], side='right')
for (start, end, lab) in zip(starts, ends, labels):
aligned_labels[start:end] = [lab] * (end - start)
return aligned_labels
def sort_labeled_intervals(intervals, labels=None):
'''Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
'''
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted
else:
return intervals_sorted, [labels[_] for _ in idx]
def f_measure(precision, recall, beta=1.0):
"""Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure
"""
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall)
def intervals_to_boundaries(intervals, q=5):
"""Convert interval times into boundaries.
Parameters
----------
intervals : np.ndarray, shape=(n_events, 2)
Array of interval start and end-times
q : int
Number of decimals to round to. (Default value = 5)
Returns
-------
boundaries : np.ndarray
Interval boundary times, including the end of the final interval
"""
return np.unique(np.ravel(np.round(intervals, decimals=q)))
def boundaries_to_intervals(boundaries):
"""Convert an array of event times into intervals
Parameters
----------
boundaries : list-like
List-like of event times. These are assumed to be unique
timestamps in ascending order.
Returns
-------
intervals : np.ndarray, shape=(n_intervals, 2)
Start and end time for each interval
"""
if not np.allclose(boundaries, np.unique(boundaries)):
raise ValueError('Boundary times are not unique or not ascending.')
intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:])))
return intervals
def adjust_intervals(intervals,
labels=None,
t_min=0.0,
t_max=None,
start_label='__T_MIN',
end_label='__T_MAX'):
"""Adjust a list of time intervals to span the range ``[t_min, t_max]``.
Any intervals lying completely outside the specified range will be removed.
Any intervals lying partially outside the specified range will be cropped.
If the specified range exceeds the span of the provided data in either
direction, additional intervals will be appended. If an interval is
appended at the beginning, it will be given the label ``start_label``; if
an interval is appended at the end, it will be given the label
``end_label``.
Parameters
----------
intervals : np.ndarray, shape=(n_events, 2)
Array of interval start and end-times
labels : list, len=n_events or None
List of labels
(Default value = None)
t_min : float or None
Minimum interval start time.
(Default value = 0.0)
t_max : float or None
Maximum interval end time.
(Default value = None)
start_label : str or float or int
Label to give any intervals appended at the beginning
(Default value = '__T_MIN')
end_label : str or float or int
Label to give any intervals appended at the end
(Default value = '__T_MAX')
Returns
-------
new_intervals : np.ndarray
Intervals spanning ``[t_min, t_max]``
new_labels : list
List of labels for ``new_labels``
"""
# When supplied intervals are empty and t_max and t_min are supplied,
# create one interval from t_min to t_max with the label start_label
if t_min is not None and t_max is not None and intervals.size == 0:
return np.array([[t_min, t_max]]), [start_label]
# When intervals are empty and either t_min or t_max are not supplied,
# we can't append new intervals
elif (t_min is None or t_max is None) and intervals.size == 0:
raise ValueError("Supplied intervals are empty, can't append new"
" intervals")
if t_min is not None:
# Find the intervals that end at or after t_min
first_idx = np.argwhere(intervals[:, 1] >= t_min)
if len(first_idx) > 0:
# If we have events below t_min, crop them out
if labels is not None:
labels = labels[int(first_idx[0]):]
# Clip to the range (t_min, +inf)
intervals = intervals[int(first_idx[0]):]
intervals = np.maximum(t_min, intervals)
if intervals.min() > t_min:
# Lowest boundary is higher than t_min:
# add a new boundary and label
intervals = np.vstack(([t_min, intervals.min()], intervals))
if labels is not None:
labels.insert(0, start_label)
if t_max is not None:
# Find the intervals that begin after t_max
last_idx = np.argwhere(intervals[:, 0] > t_max)
if len(last_idx) > 0:
# We have boundaries above t_max.
# Trim to only boundaries <= t_max
if labels is not None:
labels = labels[:int(last_idx[0])]
# Clip to the range (-inf, t_max)
intervals = intervals[:int(last_idx[0])]
intervals = np.minimum(t_max, intervals)
if intervals.max() < t_max:
# Last boundary is below t_max: add a new boundary and label
intervals = np.vstack((intervals, [intervals.max(), t_max]))
if labels is not None:
labels.append(end_label)
return intervals, labels
def adjust_events(events, labels=None, t_min=0.0,
t_max=None, label_prefix='__'):
"""Adjust the given list of event times to span the range
``[t_min, t_max]``.
Any event times outside of the specified range will be removed.
If the times do not span ``[t_min, t_max]``, additional events will be
added with the prefix ``label_prefix``.
Parameters
----------
events : np.ndarray
Array of event times (seconds)
labels : list or None
List of labels
(Default value = None)
t_min : float or None
Minimum valid event time.
(Default value = 0.0)
t_max : float or None
Maximum valid event time.
(Default value = None)
label_prefix : str
Prefix string to use for synthetic labels
(Default value = '__')
Returns
-------
new_times : np.ndarray
Event times corrected to the given range.
"""
if t_min is not None:
first_idx = np.argwhere(events >= t_min)
if len(first_idx) > 0:
# We have events below t_min
# Crop them out
if labels is not None:
labels = labels[int(first_idx[0]):]
events = events[int(first_idx[0]):]
if events[0] > t_min:
# Lowest boundary is higher than t_min:
# add a new boundary and label
events = np.concatenate(([t_min], events))
if labels is not None:
labels.insert(0, '%sT_MIN' % label_prefix)
if t_max is not None:
last_idx = np.argwhere(events > t_max)
if len(last_idx) > 0:
# We have boundaries above t_max.
# Trim to only boundaries <= t_max
if labels is not None:
labels = labels[:int(last_idx[0])]
events = events[:int(last_idx[0])]
if events[-1] < t_max:
# Last boundary is below t_max: add a new boundary and label
events = np.concatenate((events, [t_max]))
if labels is not None:
labels.append('%sT_MAX' % label_prefix)
return events, labels
def intersect_files(flist1, flist2):
"""Return the intersection of two sets of filepaths, based on the file name
(after the final '/') and ignoring the file extension.
Examples
--------
>>> flist1 = ['/a/b/abc.lab', '/c/d/123.lab', '/e/f/xyz.lab']
>>> flist2 = ['/g/h/xyz.npy', '/i/j/123.txt', '/k/l/456.lab']
>>> sublist1, sublist2 = mir_eval.util.intersect_files(flist1, flist2)
>>> print sublist1
['/e/f/xyz.lab', '/c/d/123.lab']
>>> print sublist2
['/g/h/xyz.npy', '/i/j/123.txt']
Parameters
----------
flist1 : list
first list of filepaths
flist2 : list
second list of filepaths
Returns
-------
sublist1 : list
subset of filepaths with matching stems from ``flist1``
sublist2 : list
corresponding filepaths from ``flist2``
"""
def fname(abs_path):
"""Returns the filename given an absolute path.
Parameters
----------
abs_path :
Returns
-------
"""
return os.path.splitext(os.path.split(abs_path)[-1])[0]
fmap = dict([(fname(f), f) for f in flist1])
pairs = [list(), list()]
for f in flist2:
if fname(f) in fmap:
pairs[0].append(fmap[fname(f)])
pairs[1].append(f)
return pairs
def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels):
r"""Merge the time intervals of two sequences.
Parameters
----------
x_intervals : np.ndarray
Array of interval times (seconds)
x_labels : list or None
List of labels
y_intervals : np.ndarray
Array of interval times (seconds)
y_labels : list or None
List of labels
Returns
-------
new_intervals : np.ndarray
New interval times of the merged sequences.
new_x_labels : list
New labels for the sequence ``x``
new_y_labels : list
New labels for the sequence ``y``
"""
align_check = [x_intervals[0, 0] == y_intervals[0, 0],
x_intervals[-1, 1] == y_intervals[-1, 1]]
if False in align_check:
raise ValueError(
"Time intervals do not align; did you mean to call "
"'adjust_intervals()' first?")
time_boundaries = np.unique(
np.concatenate([x_intervals, y_intervals], axis=0))
output_intervals = np.array(
[time_boundaries[:-1], time_boundaries[1:]]).T
x_labels_out, y_labels_out = [], []
x_label_range = np.arange(len(x_labels))
y_label_range = np.arange(len(y_labels))
for t0, _ in output_intervals:
x_idx = x_label_range[(t0 >= x_intervals[:, 0])]
x_labels_out.append(x_labels[x_idx[-1]])
y_idx = y_label_range[(t0 >= y_intervals[:, 0])]
y_labels_out.append(y_labels[y_idx[-1]])
return output_intervals, x_labels_out, y_labels_out
def _bipartite_match(graph):
"""Find maximum cardinality matching of a bipartite graph (U,V,E).
The input format is a dictionary mapping members of U to a list
of their neighbors in V.
The output is a dict M mapping members of V to their matches in U.
Parameters
----------
graph : dictionary : left-vertex -> list of right vertices
The input bipartite graph. Each edge need only be specified once.
Returns
-------
matching : dictionary : right-vertex -> left vertex
A maximal bipartite matching.
"""
# Adapted from:
#
# Hopcroft-Karp bipartite max-cardinality matching and max independent set
# David Eppstein, UC Irvine, 27 Apr 2002
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while True:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first
# layer
preds = {}
unmatched = []
pred = dict([(u, unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
new_layer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
new_layer.setdefault(v, []).append(u)
layer = []
for v in new_layer:
preds[v] = new_layer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return matching
def recurse(v):
"""Recursively search backward through layers to find alternating
paths. recursion returns true if found path, false otherwise
"""
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return True
return False
for v in unmatched:
recurse(v)
def _outer_distance_mod_n(ref, est, modulus=12):
"""Compute the absolute outer distance modulo n.
Using this distance, d(11, 0) = 1 (modulo 12)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values.
est : np.ndarray, shape=(m,)
Array of estimated values.
modulus : int
The modulus.
12 by default for octave equivalence.
Returns
-------
outer_distance : np.ndarray, shape=(n, m)
The outer circular distance modulo n.
"""
ref_mod_n = np.mod(ref, modulus)
est_mod_n = np.mod(est, modulus)
abs_diff = np.abs(np.subtract.outer(ref_mod_n, est_mod_n))
return np.minimum(abs_diff, modulus - abs_diff)
def match_events(ref, est, window, distance=None):
"""Compute a maximum matching between reference and estimated event times,
subject to a window constraint.
Given two lists of event times ``ref`` and ``est``, we seek the largest set
of correspondences ``(ref[i], est[j])`` such that
``distance(ref[i], est[j]) <= window``, and each
``ref[i]`` and ``est[j]`` is matched at most once.
This is useful for computing precision/recall metrics in beat tracking,
onset detection, and segmentation.
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float > 0
Size of the window.
distance : function
function that computes the outer distance of ref and est.
By default uses ``|ref[i] - est[j]|``
Returns
-------
matching : list of tuples
A list of matched reference and event numbers.
``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``.
"""
if distance is not None:
# Compute the indices of feasible pairings
hits = np.where(distance(ref, est) <= window)
else:
hits = _fast_hit_windows(ref, est, window)
# Construct the graph input
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(_bipartite_match(G).items())
return matching
def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est
def validate_intervals(intervals):
"""Checks that an (n, 2) interval ndarray is well-formed, and raises errors
if not.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
Array of interval start/end locations.
"""
# Validate interval shape
if intervals.ndim != 2 or intervals.shape[1] != 2:
raise ValueError('Intervals should be n-by-2 numpy ndarray, '
'but shape={}'.format(intervals.shape))
# Make sure no times are negative
if (intervals < 0).any():
raise ValueError('Negative interval times found')
# Make sure all intervals have strictly positive duration
if (intervals[:, 1] <= intervals[:, 0]).any():
raise ValueError('All interval durations must be strictly positive')
def validate_events(events, max_time=30000.):
"""Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.)
"""
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.')
def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
"""Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values.
"""
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape))
def has_kwargs(function):
r'''Determine whether a function has \*\*kwargs.
Parameters
----------
function : callable
The function to test
Returns
-------
True if function accepts arbitrary keyword arguments.
False otherwise.
'''
if six.PY2:
return inspect.getargspec(function).keywords is not None
else:
sig = inspect.signature(function)
for param in sig.parameters.values():
if param.kind == param.VAR_KEYWORD:
return True
return False
def filter_kwargs(_function, *args, **kwargs):
"""Given a function and args and keyword args to pass to it, call the function
but using only the keyword arguments which it accepts. This is equivalent
to redefining the function with an additional \*\*kwargs to accept slop
keyword args.
If the target function already accepts \*\*kwargs parameters, no filtering
is performed.
Parameters
----------
_function : callable
Function to call. Can take in any number of args or kwargs
"""
if has_kwargs(_function):
return _function(*args, **kwargs)
# Get the list of function arguments
func_code = six.get_function_code(_function)
function_args = func_code.co_varnames[:func_code.co_argcount]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = {}
for kwarg, value in list(kwargs.items()):
if kwarg in function_args:
filtered_kwargs[kwarg] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function(*args, **filtered_kwargs)
def intervals_to_durations(intervals):
"""Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval.
"""
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten()
def hz_to_midi(freqs):
'''Convert Hz to MIDI numbers
Parameters
----------
freqs : number or ndarray
Frequency/frequencies in Hz
Returns
-------
midi : number or ndarray
MIDI note numbers corresponding to input frequencies.
Note that these may be fractional.
'''
return 12.0 * (np.log2(freqs) - np.log2(440.0)) + 69.0
def midi_to_hz(midi):
'''Convert MIDI numbers to Hz
Parameters
----------
midi : number or ndarray
MIDI notes
Returns
-------
freqs : number or ndarray
Frequency/frequencies in Hz corresponding to `midi`
'''
return 440.0 * (2.0 ** ((midi - 69.0)/12.0))
|
{
"content_hash": "2ffee8705236f582fc357af16e75b308",
"timestamp": "",
"source": "github",
"line_count": 958,
"max_line_length": 82,
"avg_line_length": 30.647181628392484,
"alnum_prop": 0.5880790190735695,
"repo_name": "bmcfee/mir_eval",
"id": "600677ace11510134b984aa51ce10aefa4ebbe9d",
"size": "29360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mir_eval/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10603"
},
{
"name": "Python",
"bytes": "585055"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
def mse(sname, true, model):
with tf.name_scope(sname):
waveform_loss = tf.reduce_mean(tf.square(tf.subtract(true, model)))
tf.summary.scalar(sname, waveform_loss)
return waveform_loss
def l2(sname, true, model):
with tf.name_scope(sname):
waveform_loss = tf.nn.l2_loss(tf.subtract(true, model))
tf.summary.scalar(sname, waveform_loss)
return waveform_loss
def linf(sname, true, model):
with tf.name_scope(sname):
waveform_loss = tf.reduce_max(tf.abs(tf.subtract(true, model)))
tf.summary.scalar(sname, waveform_loss)
return waveform_loss
def geo_mean(sname, true, model):
with tf.name_scope(sname):
waveform_loss = tf.exp(tf.reduce_mean(tf.log1p(
tf.abs(tf.subtract(true, model)))))
tf.summary.scalar(sname, waveform_loss)
return waveform_loss
|
{
"content_hash": "53a436e200a69cdeb4a164af239ea735",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 29.633333333333333,
"alnum_prop": 0.655793025871766,
"repo_name": "jhetherly/EnglishSpeechUpsampler",
"id": "aec0908e8e2c61f51ebc0b78c0a2ec142d9dcad5",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "losses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8261"
},
{
"name": "Python",
"bytes": "67237"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
}
|
import unittest
from pymmrouting.routeplanner import MultimodalRoutePlanner
from pymmrouting.inferenceengine import RoutingPlanInferer
from pymmrouting.orm_graphmodel import SwitchType, Mode, Session
class RoutePlannerTestCase(unittest.TestCase):
def setUp(self):
routing_options_file = \
"test/routing_options_driving_parking_and_go.json"
self.inferer = RoutingPlanInferer()
self.inferer.load_routing_options_from_file(routing_options_file)
self.plans = self.inferer.generate_routing_plan()
self.modes = {
str(m_name): m_id
for m_name, m_id in
Session.query(Mode.mode_name, Mode.mode_id)
}
self.switch_types = {
str(t_name): t_id
for t_name, t_id in
Session.query(SwitchType.type_name, SwitchType.type_id)
}
def test_find_path_for_walking_plan(self):
self.assertIn([self.modes["foot"]], [i.mode_list for i in self.plans])
for p in self.plans:
if p.mode_list == [self.modes["foot"]]:
plan = p
planner = MultimodalRoutePlanner()
result = planner.find_path(plan)
rd = result["routes"][0]
self.assertTrue(rd["existence"])
self.assertTrue("Walking", rd["summary"])
self.assertAlmostEqual(5567.744, rd["distance"], places=3)
self.assertAlmostEqual(74.237, rd["duration"], places=3)
self.assertEqual(rd["distance"], rd["walking_distance"])
self.assertEqual(rd["duration"], rd["walking_duration"])
self.assertTrue(not rd["switch_points"])
self.assertEqual(1, len(rd["geojson"]["features"]))
self.assertEqual("foot", rd["geojson"]["features"][0]["properties"]["mode"])
self.assertEqual("LineString", rd["geojson"]["features"][0]['geometry']["type"])
self.assertGreaterEqual(len(rd["geojson"]["features"][0]["geometry"]["coordinates"]), 2)
self.assertListEqual([11.5682317, 48.1500053],
rd["geojson"]["features"][0]["geometry"]["coordinates"][0])
self.assertListEqual([11.5036395, 48.1583208],
rd["geojson"]["features"][0]["geometry"]["coordinates"][-1])
self.assertListEqual([11.5682317, 48.1500053],
result["source"]["geometry"]["coordinates"])
self.assertListEqual([11.5036395, 48.1583208],
result["target"]["geometry"]["coordinates"])
planner.cleanup()
def test_find_path_for_driving_and_walking_plan(self):
self.assertIn([self.modes["private_car"], self.modes["foot"]],
[i.mode_list for i in self.plans])
for p in self.plans:
if p.mode_list == [self.modes["private_car"], self.modes["foot"]]:
plan = p
with MultimodalRoutePlanner() as planner:
result = planner.find_path(plan)
rd = result["routes"][0]
self.assertListEqual([11.5682317, 48.1500053],
result["source"]["geometry"]["coordinates"])
self.assertListEqual([11.5036395, 48.1583208],
result["target"]["geometry"]["coordinates"])
self.assertTrue(rd["existence"])
self.assertFalse(not rd["switch_points"])
self.assertEqual("car_parking",
rd["switch_points"][0]['properties']["switch_type"])
self.assertEqual("Point", rd["switch_points"][0]["geometry"]["type"])
self.assertTrue("Driving, parking and walking", rd["summary"])
self.assertEqual(3, len(rd["geojson"]["features"]))
self.assertEqual("private_car", rd["geojson"]["features"][0]["properties"]["mode"])
self.assertEqual("car_parking", rd["geojson"]["features"][1]["properties"]["switch_type"])
self.assertEqual("foot", rd["geojson"]["features"][2]["properties"]["mode"])
self.assertAlmostEqual(6700.675, rd["distance"], places=3)
self.assertAlmostEqual(20.2001, rd["duration"], places=3)
self.assertAlmostEqual(522.534, rd["walking_distance"], places=3)
self.assertAlmostEqual(6.967, rd["walking_duration"], places=3)
self.assertEqual("LineString", rd["geojson"]["features"][0]["geometry"]["type"])
self.assertListEqual([11.5682317, 48.1500053],
rd["geojson"]["features"][0]["geometry"]["coordinates"][0])
self.assertListEqual([11.5008518, 48.1611429],
rd["geojson"]["features"][0]["geometry"]["coordinates"][-1])
self.assertIn("stroke", rd['geojson']['features'][0]['properties'])
self.assertGreaterEqual(len(rd["geojson"]["features"][0]["geometry"]["coordinates"]), 2)
self.assertEqual("switch_point", rd['geojson']['features'][1]['properties']['type'])
self.assertEqual("car_parking", rd['geojson']['features'][1]['properties']['switch_type'])
self.assertIn("marker-size", rd['geojson']['features'][1]['properties'])
self.assertEqual("LineString", rd["geojson"]["features"][2]["geometry"]["type"])
self.assertListEqual([11.5008518, 48.1611429],
rd["geojson"]["features"][2]["geometry"]["coordinates"][0])
self.assertListEqual([11.5036395, 48.1583208],
rd["geojson"]["features"][2]["geometry"]["coordinates"][-1])
self.assertGreaterEqual(len(rd["geojson"]["features"][2]["geometry"]["coordinates"]), 2)
def test_batch_find_paths(self):
pass
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "fe30a917cdd21d9726bf885200d8ad06",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 102,
"avg_line_length": 56.22549019607843,
"alnum_prop": 0.5787271142109852,
"repo_name": "tumluliu/pymmrouting",
"id": "7c91bcaeebf4adf0b80cfd9f789958b4812fd8ed",
"size": "5735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_routeplanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115186"
}
],
"symlink_target": ""
}
|
import json
from tempest_lib.common import rest_client
from tempest_lib import exceptions as lib_exc
from neutron_lbaas.tests.tempest.lib.common import service_client
from neutron_lbaas.tests.tempest.lib import exceptions
class TokenClientJSON(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
dscv = disable_ssl_certificate_validation
super(TokenClientJSON, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests)
# Normalize URI to ensure /tokens is in it.
if 'tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/tokens'
self.auth_url = auth_url
def auth(self, user, password, tenant=None):
creds = {
'auth': {
'passwordCredentials': {
'username': user,
'password': password,
},
}
}
if tenant:
creds['auth']['tenantName'] = tenant
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body['access'])
def auth_token(self, token_id, tenant=None):
creds = {
'auth': {
'token': {
'id': token_id,
},
}
}
if tenant:
creds['auth']['tenantName'] = tenant
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body['access'])
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
if headers is None:
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise lib_exc.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
if isinstance(resp_body, str):
resp_body = json.loads(resp_body)
return resp, resp_body
def get_token(self, user, password, tenant, auth_data=False):
"""
Returns (token id, token data) for supplied credentials
"""
body = self.auth(user, password, tenant)
if auth_data:
return body['token']['id'], body
else:
return body['token']['id']
|
{
"content_hash": "ec04dd4af5a306766725bf46d3540f95",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 73,
"avg_line_length": 33.197916666666664,
"alnum_prop": 0.5591465327894571,
"repo_name": "gandelman-a/neutron-lbaas",
"id": "4795e720040fbdb85ca94bc3433f14edb1668f21",
"size": "3818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron_lbaas/tests/tempest/lib/services/identity/v2/json/token_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "2377577"
},
{
"name": "Ruby",
"bytes": "3365"
},
{
"name": "Shell",
"bytes": "13512"
}
],
"symlink_target": ""
}
|
from rdflib.graph import Graph
from rdflib.namespace import RDF
from rdflib.plugins.parsers.rdfxml import CORE_SYNTAX_TERMS
from rdflib.term import BNode, Literal, URIRef
"""
Ah... it's coming back to me...
[6:32p] eikeon: think it's so transitivity holds...
[6:32p] eikeon: if a==b and b==c then a should == c
[6:32p] eikeon: "foo"==Literal("foo")
[6:33p] eikeon: We don't want URIRef("foo")==Literal("foo")
[6:33p] eikeon: But if we have URIRef("foo")=="foo" then it implies it.
[6:33p] chimezie: yes, definately not the other RDFLib 'typed' RDF (and N3) terms
[6:34p] eikeon: Why do you need URIRef("foo")=="foo" ?
[6:34p] chimezie: i'm just wondering if a URI and a string with the same lexical value, are by definition 'different'
[6:35p] eikeon: Think so, actually. Think of trying to serialize some triples.
[6:36p] eikeon: If they are the same you'd serialize them the same, no?
[6:36p] chimezie: I guess I was thinking of a 'string' in a native datatype sense, not in the RDF sense (where they would be distinctly different)
[6:37p] eikeon: We should try and brain dump some of this...
[6:37p] eikeon: it look a fairly long time to work out.
[6:37p] eikeon: But think we finally landed in the right spot.
[6:38p] eikeon: I know many of the backends break if URIRef("foo")==Literal("foo")
[6:39p] eikeon: And if we want "foo"==Literal("foo") --- then we really can't have URIRef("foo") also == "foo"
"""
class TestIdentifierEquality:
def setup_method(self):
self.uriref = URIRef("http://example.org/")
self.bnode = BNode()
self.literal = Literal("http://example.org/")
self.python_literal = "http://example.org/"
self.python_literal_2 = "foo"
def testA(self):
assert self.uriref != self.literal
def testB(self):
assert self.literal != self.uriref
def testC(self):
assert self.uriref != self.python_literal
def testD(self):
assert self.python_literal != self.uriref
def testE(self):
assert self.literal != self.python_literal
def testE2(self):
assert self.literal.eq(self.python_literal)
def testF(self):
assert self.python_literal != self.literal
def testG(self):
assert "foo" not in CORE_SYNTAX_TERMS
def testH(self):
assert (
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF")
in CORE_SYNTAX_TERMS
)
def testI(self):
g = Graph()
g.add((self.uriref, RDF.value, self.literal))
g.add((self.uriref, RDF.value, self.uriref))
assert len(g) == 2
|
{
"content_hash": "68807583ec40191b4874de462ad57e7a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 146,
"avg_line_length": 37.507246376811594,
"alnum_prop": 0.6561051004636785,
"repo_name": "RDFLib/rdflib",
"id": "2dfdb734d19b7efe7cfb0ecfbe64764fa11823f0",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_literal/test_uriref_literal_comparison.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "41303"
},
{
"name": "Python",
"bytes": "2828721"
},
{
"name": "Ruby",
"bytes": "31777"
},
{
"name": "Shell",
"bytes": "6030"
},
{
"name": "XSLT",
"bytes": "1588"
}
],
"symlink_target": ""
}
|
"""Adds support for generic thermostat units."""
import asyncio
import logging
import math
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, CoreState, callback
from homeassistant.exceptions import ConditionError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): cv.positive_time_period,
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
unique_id = config.get(CONF_UNIQUE_ID)
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
)
]
)
class GenericThermostat(ClimateEntity, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._attr_preset_mode = PRESET_NONE
self._target_temp = target_temp
self._unit = unit
self._unique_id = unique_id
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._attr_preset_modes = [PRESET_NONE, PRESET_AWAY]
else:
self._attr_preset_modes = [PRESET_NONE]
self._away_temp = away_temp
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.sensor_entity_id], self._async_sensor_changed
)
)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.heater_entity_id], self._async_switch_changed
)
)
if self._keep_alive:
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
)
@callback
def _async_startup(*_):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self._async_update_temp(sensor_state)
self.async_write_ha_state()
switch_state = self.hass.states.get(self.heater_entity_id)
if switch_state and switch_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self.hass.create_task(self._check_switch_initial_state())
if self.hass.state == CoreState.running:
_async_startup()
else:
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
if (old_state := await self.async_get_last_state()) is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) in self._attr_preset_modes:
self._attr_preset_mode = old_state.attributes.get(ATTR_PRESET_MODE)
if not self._hvac_mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode:
self._hvac_mode = HVAC_MODE_OFF
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this thermostat."""
return self._unique_id
@property
def precision(self):
"""Return the precision of the system."""
if self._temp_precision is not None:
return self._temp_precision
return super().precision
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
# Since this integration does not yet have a step size parameter
# we have to re-use the precision as the step size for now.
return self.precision
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return current operation."""
return self._hvac_mode
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._hvac_mode == HVAC_MODE_OFF:
return CURRENT_HVAC_OFF
if not self._is_device_active:
return CURRENT_HVAC_IDLE
if self.ac_mode:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_HEAT
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_list
async def async_set_hvac_mode(self, hvac_mode):
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
self._hvac_mode = HVAC_MODE_HEAT
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_COOL:
self._hvac_mode = HVAC_MODE_COOL
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_OFF:
self._hvac_mode = HVAC_MODE_OFF
if self._is_device_active:
await self._async_heater_turn_off()
else:
_LOGGER.error("Unrecognized hvac mode: %s", hvac_mode)
return
# Ensure we update the current operation after changing the mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
self._target_temp = temperature
await self._async_control_heating(force=True)
self.async_write_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
if self._min_temp is not None:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._max_temp is not None:
return self._max_temp
# Get default temp from super class
return super().max_temp
async def _async_sensor_changed(self, event):
"""Handle temperature changes."""
new_state = event.data.get("new_state")
if new_state is None or new_state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return
self._async_update_temp(new_state)
await self._async_control_heating()
self.async_write_ha_state()
async def _check_switch_initial_state(self):
"""Prevent the device from keep running if HVAC_MODE_OFF."""
if self._hvac_mode == HVAC_MODE_OFF and self._is_device_active:
_LOGGER.warning(
"The climate mode is OFF, but the switch device is ON. Turning off device %s",
self.heater_entity_id,
)
await self._async_heater_turn_off()
@callback
def _async_switch_changed(self, event):
"""Handle heater switch state changes."""
new_state = event.data.get("new_state")
old_state = event.data.get("old_state")
if new_state is None:
return
if old_state is None:
self.hass.create_task(self._check_switch_initial_state())
self.async_write_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
try:
cur_temp = float(state.state)
if math.isnan(cur_temp) or math.isinf(cur_temp):
raise ValueError(f"Sensor has illegal state {state.state}")
self._cur_temp = cur_temp
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
async def _async_control_heating(self, time=None, force=False):
"""Check if we need to turn heating on or off."""
async with self._temp_lock:
if not self._active and None not in (
self._cur_temp,
self._target_temp,
):
self._active = True
_LOGGER.info(
"Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp,
self._target_temp,
)
if not self._active or self._hvac_mode == HVAC_MODE_OFF:
return
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if not force and time is None and self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = HVAC_MODE_OFF
try:
long_enough = condition.state(
self.hass,
self.heater_entity_id,
current_state,
self.min_cycle_duration,
)
except ConditionError:
long_enough = False
if not long_enough:
return
too_cold = self._target_temp >= self._cur_temp + self._cold_tolerance
too_hot = self._cur_temp >= self._target_temp + self._hot_tolerance
if self._is_device_active:
if (self.ac_mode and too_cold) or (not self.ac_mode and too_hot):
_LOGGER.info("Turning off heater %s", self.heater_entity_id)
await self._async_heater_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning on heater heater %s",
self.heater_entity_id,
)
await self._async_heater_turn_on()
else:
if (self.ac_mode and too_hot) or (not self.ac_mode and too_cold):
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
await self._async_heater_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning off heater %s", self.heater_entity_id
)
await self._async_heater_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
if not self.hass.states.get(self.heater_entity_id):
return None
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_ON, data, context=self._context
)
async def _async_heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_OFF, data, context=self._context
)
async def async_set_preset_mode(self, preset_mode: str):
"""Set new preset mode."""
if preset_mode not in (self._attr_preset_modes or []):
raise ValueError(
f"Got unsupported preset_mode {preset_mode}. Must be one of {self._attr_preset_modes}"
)
if preset_mode == self._attr_preset_mode:
# I don't think we need to call async_write_ha_state if we didn't change the state
return
if preset_mode == PRESET_AWAY:
self._attr_preset_mode = PRESET_AWAY
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
await self._async_control_heating(force=True)
elif preset_mode == PRESET_NONE:
self._attr_preset_mode = PRESET_NONE
self._target_temp = self._saved_target_temp
await self._async_control_heating(force=True)
self.async_write_ha_state()
|
{
"content_hash": "6075a2fe553c3b882d4bd79774d0268c",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 102,
"avg_line_length": 35.52310536044362,
"alnum_prop": 0.5755021334165886,
"repo_name": "jawilson/home-assistant",
"id": "2c27d371c5e3672811650fa7bcd79be22f46eae0",
"size": "19218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/generic_thermostat/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.exceptions import PolicyValidationError
from c7n.actions import AutoTagUser
from c7n.utils import query_instances
from .common import BaseTest, event_data
from mock import MagicMock
class AutoTagCreator(BaseTest):
def test_auto_tag_assumed(self):
# verify auto tag works with assumed roles and can optionally update
session_factory = self.replay_flight_data("test_ec2_autotag_assumed")
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "update": True, "tag": "Owner"}],
},
session_factory=session_factory,
)
event = {
"detail": event_data("event-cloud-trail-run-instance-creator-assumed.json"),
"debug": True,
}
resources = policy.push(event, None)
self.assertEqual(len(resources), 1)
tags = {t["Key"]: t["Value"] for t in resources[0]["Tags"]}
self.assertEqual(tags["Owner"], "Bob")
session = session_factory()
instances = query_instances(session, InstanceIds=[resources[0]["InstanceId"]])
tags = {t["Key"]: t["Value"] for t in instances[0]["Tags"]}
self.assertEqual(tags["Owner"], "Radiant")
def test_auto_tag_creator(self):
session_factory = self.replay_flight_data("test_ec2_autotag_creator")
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "tag": "Owner"}],
},
session_factory=session_factory,
)
event = {
"detail": event_data("event-cloud-trail-run-instance-creator.json"),
"debug": True,
}
resources = policy.push(event, None)
self.assertEqual(len(resources), 1)
# Verify tag added
session = session_factory()
instances = query_instances(session, InstanceIds=[resources[0]["InstanceId"]])
tags = {t["Key"]: t["Value"] for t in instances[0]["Tags"]}
self.assertEqual(tags["Owner"], "c7nbot")
# Verify we don't overwrite extant
client = session.client("ec2")
client.create_tags(
Resources=[resources[0]["InstanceId"]],
Tags=[{"Key": "Owner", "Value": "Bob"}],
)
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "tag": "Owner"}],
},
session_factory=session_factory,
)
resources = policy.push(event, None)
instances = query_instances(session, InstanceIds=[resources[0]["InstanceId"]])
tags = {t["Key"]: t["Value"] for t in instances[0]["Tags"]}
self.assertEqual(tags["Owner"], "Bob")
def test_error_auto_tag_bad_mode(self):
# mode type is not cloudtrail
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "auto-tag-error",
"resource": "ec2",
"mode": {"type": "not-cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "update": True, "tag": "Owner"}],
},
session_factory=None,
validate=False,
)
def test_auto_tag_user_class_method_process(self):
# check that it works with regular IAMUser creator
event = {
"detail": event_data("event-cloud-trail-run-instance-creator.json"),
"debug": True,
}
session_factory = self.replay_flight_data("test_ec2_autotag_creator")
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [
{
"type": "auto-tag-user",
"tag": "CreatorName",
"principal_id_tag": "CreatorId",
}
],
},
session_factory=session_factory,
)
resources = policy.push(event, None)
auto_tag_user = AutoTagUser()
auto_tag_user.data = {"tag": "CreatorName", "principal_id_tag": "CreatorId"}
auto_tag_user.manager = MagicMock()
result = auto_tag_user.process(resources, event)
self.assertEqual(result["CreatorName"], "c7nbot")
self.assertEqual(result["CreatorId"], "AIDAJEZOTH6YPO3DY45QW")
# check that it doesn't set principalId if not specified regular IAMUser creator
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "tag": "CreatorName"}],
},
session_factory=session_factory,
)
auto_tag_user.data = {"tag": "CreatorName"}
result = auto_tag_user.process(resources, event)
self.assertEqual(result, {"CreatorName": "c7nbot"})
# check that it sets principalId with assumeRole
session_factory = self.replay_flight_data("test_ec2_autotag_assumed")
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [
{
"type": "auto-tag-user",
"tag": "Owner",
"principal_id_tag": "OwnerId",
}
],
},
session_factory=session_factory,
)
event = {
"detail": event_data("event-cloud-trail-run-instance-creator-assumed.json"),
"debug": True,
}
resources = policy.push(event, None)
auto_tag_user.data = {"tag": "Owner", "principal_id_tag": "OwnerId"}
result = auto_tag_user.process(resources, event)
self.assertEqual(
result, {"Owner": "Radiant", "OwnerId": "AROAIFMJLHZRIKEFRKUUF"}
)
# check that it does not sets principalId with assumeRole
policy = self.load_policy(
{
"name": "ec2-auto-tag",
"resource": "ec2",
"mode": {"type": "cloudtrail", "events": ["RunInstances"]},
"actions": [{"type": "auto-tag-user", "tag": "Owner"}],
},
session_factory=session_factory,
)
auto_tag_user.data = {"tag": "Owner"}
result = auto_tag_user.process(resources, event)
self.assertEqual(result, {"Owner": "Radiant"})
|
{
"content_hash": "21037a1a27d03bfb4c5aa10a34b2ef1b",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 88,
"avg_line_length": 39.11351351351351,
"alnum_prop": 0.5174129353233831,
"repo_name": "FireballDWF/cloud-custodian",
"id": "3ea45aabcf5433774a3a85ef4543c6601c40ca40",
"size": "7826",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/test_autotag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "142024"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1440"
},
{
"name": "Python",
"bytes": "4893319"
},
{
"name": "Shell",
"bytes": "7227"
}
],
"symlink_target": ""
}
|
'''
Created on 2017/11/23
@author: sunyihuan
'''
import keras
import scipy
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
# import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from class_four.week_two.kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig / 255.
X_test = X_test_orig / 255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
# print("number of training examples = " + str(X_train.shape[0]))
# print("number of test examples = " + str(X_test.shape[0]))
# print("X_train shape: " + str(X_train.shape))
# print("Y_train shape: " + str(Y_train.shape))
# print("X_test shape: " + str(X_test.shape))
# print("Y_test shape: " + str(Y_test.shape))
def HappyModel(input_shape):
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(32, (7, 7), strides=(1, 1), name='conv0')(X)
X = BatchNormalization(axis=3, name='bno')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool')(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
model = Model(inputs=X_input, outputs=X, name='HappyModel')
return model
happyModel = HappyModel((64, 64, 3))
# # happyModel.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
# loss='binary_crossentropy', metrics=['accuracy'])
# happyModel.fit(x=X_train, y=Y_train, batch_size=16, epochs=20)
# preds = happyModel.evaluate(x=X_test, y=Y_test)
# print("The Accuracy" + str(preds[1]))
img_path = 'images/panjie0.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
# happyModel.summary()
|
{
"content_hash": "c83d52fdc346885cbfb5453b2c011060",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 117,
"avg_line_length": 30.29268292682927,
"alnum_prop": 0.7020933977455717,
"repo_name": "sunyihuan326/DeltaLab",
"id": "9b6aed0d3a9fd56fa251a5625f6941c129526797",
"size": "2499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Andrew_NG_learning/class_four/week_two/syh_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "31466"
},
{
"name": "Python",
"bytes": "1161366"
}
],
"symlink_target": ""
}
|
from req import WebRequestHandler
from req import Service
import tornado
import math
import datetime
from map import *
class WebContestsHandler(WebRequestHandler):
@tornado.gen.coroutine
def get(self):
args = ["page"]
meta = self.get_args(args)
meta['count'] = 10
meta["group_id"] = self.current_group
### default page is 1
if not meta['page']:
meta['page'] = 1
### if get page is not int then redirect to page 1
try:
meta["page"] = int(meta["page"])
except:
self.write_error((500, 'Argument page error'))
return
### should in range
err, count = yield from Service.Contest.get_contest_list_count(meta)
page_count = max(math.ceil(count / meta['count']), 1)
if int(meta['page']) < 1 or int(meta['page']) > page_count:
self.write_error((404, 'Page out of range'))
return
### get data
err, data = yield from Service.Contest.get_contest_list(meta)
### about pagination
page = {}
page['total'] = page_count
page['current'] = meta['page']
page['url'] = '/groups/%s/contests/' % meta['group_id']
page['get'] = {}
self.render('./contests/contests.html', data=data, page=page)
class WebContestHandler(WebRequestHandler):
def check_view(self, meta):
err, data = yield from Service.Contest.get_contest(meta)
if err:
self.write_error(500)
return False
if map_group_power['contest_manage'] in self.current_group_power or int(data['visible']) > 0:
return True
self.write_error(403)
return False
@tornado.gen.coroutine
def get(self, id=None, action=None):
meta = {}
meta['id'] = id
meta['group_id'] = self.current_group
if not (yield from self.check_view(meta)):
return
err, data = yield from Service.Contest.get_contest(meta)
self.render('./contests/contest.html', contest_data=data)
class WebContestEditHandler(WebRequestHandler):
def check_edit(self, meta):
err, data = yield from Service.Contest.get_contest(meta)
if err:
self.write_error(500)
return False
if map_group_power['contest_manage'] in self.current_group_power:
return True
self.write_error(403)
return False
@tornado.gen.coroutine
def get(self, id, action=None):
meta = {}
meta['id'] = id
meta['group_id'] = self.current_group
if not (yield from self.check_edit(meta)):
return
err, contest_data = yield from Service.Contest.get_contest(meta)
self.render('./contests/contest_edit.html', contest_data=contest_data)
class WebContestProblemHandler(WebRequestHandler):
def check_view(self, meta={}):
err, data = yield from Service.Problem.get_problem({'id': meta['id'], 'group_id': meta['group_id']})
err, contest_data = yield from Service.Contest.get_contest_problem_list( {'id': meta['contest_id']} )
for x in contest_data:
if int(x['id']) == int(meta['id']):
return True
if err:
self.write_error(err)
return False
if int(data['group_id']) == int(meta['group_id']) and (map_group_power['problem_manage'] in self.current_group_power or int(data['visible']) > 0):
return True
self.write_error(403)
return False
@tornado.gen.coroutine
def get(self, contest_id, problem_id, action=None):
meta = {
'id': problem_id,
'group_id': self.current_group,
'contest_id': contest_id,
}
if not (yield from self.check_view(meta)):
return
err, data = yield from Service.Problem.get_problem(meta)
err, contest_data = yield from Service.Contest.get_contest({"id": contest_id, "group_id": self.current_group})
if action == None:
self.render('./contests/contest_problem.html', data=data, contest_data=contest_data)
elif action == "submit":
self.render('./contests/contest_problem_submit.html', data=data, contest_data=contest_data)
else:
self.write_error(404)
class WebContestSubmissionsHandler(WebRequestHandler):
@tornado.gen.coroutine
def get(self, contest_id):
err, contest_data = yield from Service.Contest.get_contest({"id": contest_id, "group_id": self.current_group})
if err:
self.write_error(err)
return
args = ['account', 'problem_id', 'page']
meta = self.get_args(args)
meta["count"] = 10
meta['id'] = contest_id
meta['group_id'] = self.current_group
meta['user_id'] = self.account['id']
meta['current_group_power'] = self.current_group_power
### default page is 1
if not meta['page']:
meta['page'] = 1
### if get page is not int then throw the error
try:
meta["page"] = int(meta["page"])
except:
self.write_error((500, 'Argument page error'))
return
err, data = yield from Service.Contest.get_contest_submission_list(meta)
if err:
self.write_wrror(err)
return
### should in range
err, count = yield from Service.Contest.get_contest_submission_list_count(meta)
if err:
self.write_error(err)
return
page_count = max(math.ceil(count / meta['count']), 1)
if int(meta['page']) < 1 or int(meta['page']) > page_count:
self.write_error((500, 'Page out of range'))
return
### about pagination
page = {}
page['total'] = page_count
page['current'] = meta['page']
page['url'] = '/groups/%s/contests/%s/submissions/' % (meta['group_id'], contest_id)
page['get'] = self.get_args(args)
self.render('./contests/contest_submissions.html', contest_data=contest_data, data=data, page=page)
class WebContestSubmissionHandler(WebRequestHandler):
@tornado.gen.coroutine
def get(self, contest_id, id):
err, contest_data = yield from Service.Contest.get_contest({"id": contest_id, "group_id": self.current_group})
err, data = yield from Service.Contest.get_contest_submission({"id": contest_id, 'user_id': self.account['id'], 'current_group_power': self.current_group_power, "submission_id": id,"group_id": self.current_group})
if err:
self.write_error(err)
return
self.render('./contests/contest_submission.html', contest_data=contest_data, data=data)
class WebContestScoreboardHandler(WebRequestHandler):
@tornado.gen.coroutine
def get(self, contest_id):
meta = {
"id": contest_id,
"current_group_power": self.current_group_power
}
err, data = yield from Service.Contest.get_contest_submissions_scoreboard(meta)
err, contest_data = yield from Service.Contest.get_contest({"id": contest_id, "group_id": self.current_group})
self.render('./contests/contest_scoreboard.html', data=data, contest_data=contest_data)
|
{
"content_hash": "bbab754be18fca462d4f3c0805773d04",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 221,
"avg_line_length": 40.79329608938548,
"alnum_prop": 0.5905231443440153,
"repo_name": "Tocknicsu/nctuoj",
"id": "4c5ae5b939ab23e6f32794379dc4145b0aff9b69",
"size": "7302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/web/contest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4554"
},
{
"name": "HTML",
"bytes": "184348"
},
{
"name": "PLSQL",
"bytes": "552"
},
{
"name": "PLpgSQL",
"bytes": "27142"
},
{
"name": "Python",
"bytes": "250101"
},
{
"name": "Shell",
"bytes": "1519"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Casella = apps.get_model("taulell", "Casella")
Casella.objects.create(nCasella=20, nom="APARCAMENT GRATUÏT")
Sortida = apps.get_model("taulell", "Sortida")
Sortida.objects.create(nCasella=0, nom="Sortida")
Presso = apps.get_model("taulell", "Presso")
unaPreso=Presso.objects.create(nCasella=10, nom="PRESÓ", preu_sortir=20)
Ves_a_la_presso = apps.get_model("taulell", "Ves_a_la_presso")
Ves_a_la_presso.objects.create(nCasella=30, nom="VÉS A LA PRESÓ", posicio_presso=unaPreso)
#Jugador = apps.get_model("usuaris", "Jugador")
#Jugador.objects.create(nom="John", diners=1500, casella_Actual=Sortida)
Carrer = apps.get_model("taulell", "Carrer")
db_alias = schema_editor.connection.alias
Carrer.objects.using(db_alias).create(nCasella=1, nom="CARRER D'AVINYÓ", preu=60, lloguer=6, color="#722E81"),
Carrer.objects.using(db_alias).create(nCasella=3, nom="CARRER DE ROSSELLÓ", preu=60, lloguer=6, color="#722E81"),
Carrer.objects.using(db_alias).create(nCasella=6, nom="CARRER DE GIRONA", preu=100, lloguer=10, color="#C7DAF3"),
Carrer.objects.using(db_alias).create(nCasella=8, nom="CARRER D'URGELL", preu=100, lloguer=10, color="#C7DAF3"),
Carrer.objects.using(db_alias).create(nCasella=9, nom="CARRER DE CONSELL DE CENT", preu=120, lloguer=12, color="#C7DAF3"),
Carrer.objects.using(db_alias).create(nCasella=11, nom="CARRER DE MUNTANER", preu=140, lloguer=14, color="#C10A80"),
Carrer.objects.using(db_alias).create(nCasella=13, nom="CARRER D'ARIBAU", preu=140, lloguer=14, color="#C10A80"),
Carrer.objects.using(db_alias).create(nCasella=14, nom="CARRER DE TARRAGONA", preu=160, lloguer=16, color="#C10A80"),
Carrer.objects.using(db_alias).create(nCasella=16, nom="PASSEIG DE SANT JOAN", preu=180, lloguer=18, color="#FABD2A"),
Carrer.objects.using(db_alias).create(nCasella=18, nom="PLAÇA DE SANT JAUME", preu=180, lloguer=18, color="#FABD2A"),
Carrer.objects.using(db_alias).create(nCasella=19, nom="CARRER D'ARAGÓ", preu=200, lloguer=20, color="#FABD2A"),
Carrer.objects.using(db_alias).create(nCasella=21, nom="CARRER DE LLEIDA", preu=220, lloguer=22, color="#FC3B22"),
Carrer.objects.using(db_alias).create(nCasella=23, nom="CARRER DE FONTANELLA", preu=220, lloguer=22, color="#FC3B22"),
Carrer.objects.using(db_alias).create(nCasella=24, nom="TRAVESSERA DE LES CORTS", preu=240, lloguer=24, color="#FC3B22"),
Carrer.objects.using(db_alias).create(nCasella=26, nom="RAMBLES", preu=260, lloguer=26, color="#F8FD0F"),
Carrer.objects.using(db_alias).create(nCasella=27, nom="VIA LAIETANA", preu=260, lloguer=26, color="#F8FD0F"),
Carrer.objects.using(db_alias).create(nCasella=29, nom="PLAÇA DE CATALUNYA", preu=280, lloguer=28, color="#F8FD0F"),
Carrer.objects.using(db_alias).create(nCasella=31, nom="AVINGUDA PORTAL DE L'ANGEL", preu=300, lloguer=30, color="#43C42C"),
Carrer.objects.using(db_alias).create(nCasella=32, nom="PASSEIG MARAGALL", preu=300, lloguer=30, color="#43C42C"),
Carrer.objects.using(db_alias).create(nCasella=34, nom="AVINGUDA DE SARRIÀ", preu=320, lloguer=32, color="#43C42C"),
Carrer.objects.using(db_alias).create(nCasella=37, nom="CARRER DE BALMES", preu=350, lloguer=35, color="#677DB8"),
Carrer.objects.using(db_alias).create(nCasella=39, nom="PASSEIG DE GRÀCIA", preu=400, lloguer=40, color="#677DB8"),
Estacio = apps.get_model("taulell", "Estacio")
Estacio.objects.using(db_alias).create(nCasella=5, nom="ESTACIÓ FERROCARRILS CATALANS", preu=200, lloguer=50),
Estacio.objects.using(db_alias).create(nCasella=15, nom="ESTACIÓ PASSEIG DE GRACIA", preu=200, lloguer=50),
Estacio.objects.using(db_alias).create(nCasella=25, nom="ESTACIÓ DE FRANÇA", preu=200, lloguer=50),
Estacio.objects.using(db_alias).create(nCasella=35, nom="ESTACIÓ DE SANTS", preu=200, lloguer=50) ,
Especial = apps.get_model("taulell", "Especial")
Especial.objects.using(db_alias).create(nCasella=2, nom="CAIXA DE COMUNITAT"),
Especial.objects.using(db_alias).create(nCasella=4, nom="IMPOST SOBRE REINTEGRAMENTS"),
Especial.objects.using(db_alias).create(nCasella=7, nom="SORT"),
Especial.objects.using(db_alias).create(nCasella=17, nom="CAIXA DE COMUNITAT"),
Especial.objects.using(db_alias).create(nCasella=22, nom="SORT"),
Especial.objects.using(db_alias).create(nCasella=33, nom="CAIXA DE COMUNITAT"),
Especial.objects.using(db_alias).create(nCasella=38, nom="IMPOST SOBRE REINTEGRAMENTS"),
Suppliers = apps.get_model("taulell", "Suppliers")
db_alias = schema_editor.connection.alias
Suppliers.objects.using(db_alias).create(nCasella=12, nom="COMPANYIA ELÈCTRICA", preu=150, lloguer=10),
Suppliers.objects.using(db_alias).create(nCasella=28, nom="COMPANYIA D'AIGÜES", preu=150, lloguer=10),
def reverse_func(apps, schema_editor):
# forwards_func() creates two Country instances,
# so reverse_func() should delete them.
#ugador = apps.get_model("usuaris", "Jugador")
#Jugador.objects.all().delete()
Sortida = apps.get_model("taulell", "Sortida")
Sortida.objects.all().delete()
Casella = apps.get_model("taulell", "Casella")
Casella.objects.all().delete()
Presso = apps.get_model("taulell","Presso")
Presso.objects.all().delete()
Ves_a_la_presso=apps.get_model("taullel","Ves_a_la_presso")
Ves_a_la_presso.objects.all().delete()
Estacio=apps.get_model("taulell","Estacio")
db_alias = schema_editor.connection.alias
Estacio.objects.using(db_alias).filter(nCasella=5, nom="ESTACIÓ FERROCARRILS CATALANS", preu=200, lloguer=50).delete()
Estacio.objects.using(db_alias).filter(nCasella=15, nom="ESTACIÓ PASSEIG DE GRACIA", preu=200, lloguer=50).delete()
Estacio.objects.using(db_alias).filter(nCasella=25, nom="ESTACIÓ DE FRANÇA", preu=200, lloguer=50).delete()
Estacio.objects.using(db_alias).fiter(nCasella=35, nom="ESTACIÓ DE SANTS", preu=200, lloguer=50).delete()
Especial=apps.get_model("taulell","Especial")
db_alias = schema_editor.connection.alias
Especial.objects.using(db_alias).filter(nCasella=2, nom="CAIXA DE COMUNITAT").delete()
Especial.objects.using(db_alias).filter(nCasella=4, nom="IMPOST SOBRE REINTEGRAMENTS").delete()
Especial.objects.using(db_alias).filter(nCasella=7, nom="SORT").delete()
Especial.objects.using(db_alias).filter(nCasella=17, nom="CAIXA DE COMUNITAT").delete()
Especial.objects.using(db_alias).filter(nCasella=22, nom="SORT").delete()
Especial.objects.using(db_alias).filter(nCasella=33, nom="CAIXA DE COMUNITAT").delete()
Especial.objects.using(db_alias).filter(nCasella=38, nom="IMPOST SOBRE REINTEGRAMENTS").delete()
Suppliers=apps.get_model("taulell","Suppliers")
db_alias = schema_editor.connection.alias
Suppliers.objects.using(db_alias).filter(nCasella=12, nom="COMPANYIA ELÈCTRICA", preu=150, lloguer=10).delete() # Alquiler temporal
Suppliers.objects.using(db_alias).filter(nCasella=28, nom="COMPANYIA D'AIGÜES", preu=150, lloguer=10).delete() # Alquiler temporal
Carrer=apps.get_model("taulell","Carrer")
db_alias = schema_editor.connection.alias
Carrer.objects.using(db_alias).filter(nCasella=1, nom="CARRER D'AVINYÓ", preu=60, lloguer=6, color="#722E81").delete()
Carrer.objects.using(db_alias).filter(nCasella=3, nom="CARRER DE ROSSELLÓ", preu=60, lloguer=6, color="#722E81").delete()
Carrer.objects.using(db_alias).filter(nCasella=6, nom="CARRER DE GIRONA", preu=100, lloguer=10, color="#C7DAF3").delete()
Carrer.objects.using(db_alias).filter(nCasella=8, nom="CARRER D'URGELL", preu=100, lloguer=10, color="#C7DAF3").delete()
Carrer.objects.using(db_alias).filter(nCasella=9, nom="CARRER DE CONSELL DE CENT", preu=120, lloguer=12, color="#C7DAF3").delete()
Carrer.objects.using(db_alias).filter(nCasella=11, nom="CARRER DE MUNTANER", preu=140, lloguer=14, color="#C10A80").delete()
Carrer.objects.using(db_alias).filter(nCasella=13, nom="CARRER D'ARIBAU", preu=140, lloguer=14, color="#C10A80").delete()
Carrer.objects.using(db_alias).filter(nCasella=14, nom="CARRER DE TARRAGONA", preu=160, lloguer=16, color="#C10A80").delete()
Carrer.objects.using(db_alias).filter(nCasella=16, nom="PASSEIG DE SANT JOAN", preu=180, lloguer=18, color="#FABD2A").delete()
Carrer.objects.using(db_alias).filter(nCasella=18, nom="PLAÇA DE SANT JAUME", preu=180, lloguer=18, color="#FABD2A").delete()
Carrer.objects.using(db_alias).filter(nCasella=19, nom="CARRER D'ARAGÓ", preu=200, lloguer=20, color="#FABD2A").delete()
Carrer.objects.using(db_alias).filter(nCasella=21, nom="CARRER DE LLEIDA", preu=220, lloguer=22, color="#FC3B22").delete()
Carrer.objects.using(db_alias).filter(nCasella=23, nom="CARRER DE FONTANELLA", preu=220, lloguer=22, color="#FC3B22").delete()
Carrer.objects.using(db_alias).filter(nCasella=24, nom="TRAVESSERA DE LES CORTS", preu=240, lloguer=24, color="#FC3B22").delete()
Carrer.objects.using(db_alias).filter(nCasella=26, nom="RAMBLES", preu=260, lloguer=26, color="#F8FD0F").delete()
Carrer.objects.using(db_alias).filter(nCasella=27, nom="VIA LAIETANA", preu=260, lloguer=26, color="#F8FD0F").delete()
Carrer.objects.using(db_alias).filter(nCasella=29, nom="PLAÇA DE CATALUNYA", preu=280, lloguer=28, color="#F8FD0F").delete()
Carrer.objects.using(db_alias).filter(nCasella=31, nom="AVINGUDA PORTAL DE L'ANGEL", preu=300, lloguer=30, color="#43C42C").delete()
Carrer.objects.using(db_alias).filter(nCasella=32, nom="PASSEIG MARAGALL", preu=300, lloguer=30, color="#43C42C").delete()
Carrer.objects.using(db_alias).filter(nCasella=34, nom="AVINGUDA DE SARRIÀ", preu=320, lloguer=32, color="#43C42C").delete()
Carrer.objects.using(db_alias).filter(nCasella=37, nom="CARRER DE BALMES", preu=350, lloguer=35, color="#677DB8").delete()
Carrer.objects.using(db_alias).filter(nCasella=39, nom="PASSEIG DE GRÀCIA", preu=400, lloguer=40, color="#677DB8v").delete()
class Migration(migrations.Migration):
dependencies = [
('taulell', '0003_auto_20170110_0150'),
]
operations = [
migrations.AlterField(
model_name='ves_a_la_presso',
name='posicio_presso',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='taulell.Presso'),
),
migrations.RunPython(reverse_func, forwards_func),
]
|
{
"content_hash": "c2ad5b8c7ded95a6a1c0e858e2857536",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 136,
"avg_line_length": 71.12582781456953,
"alnum_prop": 0.7078212290502793,
"repo_name": "ctrl-alt-d/practicaMonoPoli",
"id": "026661fe28046aff44ab0a6667e801f6154cb7bf",
"size": "10842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taulell/migrations/.~c9_invoke_2vIONL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3066"
},
{
"name": "HTML",
"bytes": "35368"
},
{
"name": "Python",
"bytes": "79849"
}
],
"symlink_target": ""
}
|
import os
import unittest
from datetime import datetime, timedelta
from time import sleep
import pytest
from reactivex.scheduler.eventloop import EventletScheduler
eventlet = pytest.importorskip("eventlet")
CI = os.getenv("CI") is not None
class TestEventletScheduler(unittest.TestCase):
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_eventlet_schedule_now(self):
scheduler = EventletScheduler(eventlet)
hub = eventlet.hubs.get_hub()
diff = scheduler.now - datetime.utcfromtimestamp(hub.clock())
assert abs(diff) < timedelta(milliseconds=1)
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_eventlet_schedule_now_units(self):
scheduler = EventletScheduler(eventlet)
diff = scheduler.now
sleep(0.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_eventlet_schedule_action(self):
scheduler = EventletScheduler(eventlet)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
eventlet.sleep(0.1)
assert ran is True
def test_eventlet_schedule_action_due(self):
scheduler = EventletScheduler(eventlet)
starttime = datetime.now()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = datetime.now()
scheduler.schedule_relative(0.2, action)
eventlet.sleep(0.3)
assert endtime is not None
diff = endtime - starttime
assert diff > timedelta(seconds=0.18)
def test_eventlet_schedule_action_cancel(self):
scheduler = EventletScheduler(eventlet)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(1.0, action)
d.dispose()
eventlet.sleep(0.01)
assert ran is False
def test_eventlet_schedule_action_periodic(self):
scheduler = EventletScheduler(eventlet)
period = 0.05
counter = 3
def action(state):
nonlocal counter
if counter:
counter -= 1
scheduler.schedule_periodic(period, action)
eventlet.sleep(0.3)
assert counter == 0
|
{
"content_hash": "9ac88ebbfafb245916b2f3175b2df5f1",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 28.129411764705882,
"alnum_prop": 0.6344625679631953,
"repo_name": "ReactiveX/RxPY",
"id": "6d7adb1f25386cdcfee0f46e0706106c1920d278",
"size": "2391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_scheduler/test_eventloop/test_eventletscheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1503"
},
{
"name": "Jupyter Notebook",
"bytes": "347338"
},
{
"name": "Python",
"bytes": "1726895"
}
],
"symlink_target": ""
}
|
import datetime
from cinder import exception as exc
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['source_volid'] = None
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_all(context, search_opts=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, marker, limit, sort_key,
sort_dir, filters={}):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_get_all(self):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_snapshot_get_all_by_project(self, context):
return [stub_snapshot(1)]
def stub_snapshot_update(self, context, *args, **param):
pass
|
{
"content_hash": "5b1404895dd29f1a9e744ee215e30a78",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 75,
"avg_line_length": 28.887931034482758,
"alnum_prop": 0.5768427335123844,
"repo_name": "tomasdubec/openstack-cinder",
"id": "c787ef78c2068d426afbbcf9fd9cc999e7692268",
"size": "4026",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/api/v2/stubs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python update_a_database's_threat_detection_policy_with_all_parameters.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.database_security_alert_policies.create_or_update(
resource_group_name="securityalert-4799",
server_name="securityalert-6440",
database_name="testdb",
security_alert_policy_name="Default",
parameters={
"properties": {
"disabledAlerts": ["Sql_Injection", "Usage_Anomaly"],
"emailAccountAdmins": True,
"emailAddresses": ["test@microsoft.com", "user@microsoft.com"],
"retentionDays": 6,
"state": "Enabled",
"storageAccountAccessKey": "sdlfkjabc+sdlfkjsdlkfsjdfLDKFTERLKFDFKLjsdfksjdflsdkfD2342309432849328476458/3RSD==",
"storageEndpoint": "https://mystorage.blob.core.windows.net",
}
},
)
print(response)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2020-11-01-preview/examples/DatabaseSecurityAlertCreateMax.json
if __name__ == "__main__":
main()
|
{
"content_hash": "6888fba88ce76677c73162e1201e29ea",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 142,
"avg_line_length": 39.04347826086956,
"alnum_prop": 0.6765033407572383,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4828bdfc7fab791c2a8b8fd195ba8a92be90b5ac",
"size": "2264",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/generated_samples/update_a_database's_threat_detection_policy_with_all_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import fnmatch
def Walk(root='.', recurse=True, pattern='*'):
"""
Generator for walking a directory tree.
Starts at specified root folder, returning files
that match our pattern. Optionally will also
recurse through sub-folders.
"""
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
if not recurse:
break
def LOC(root='', recurse=True):
"""
Counts lines of code in two ways:
maximal size (source LOC) with blank lines and comments
minimal size (logical LOC) stripping same
Sums all Python files in the specified folder.
By default recurses through subfolders.
"""
count_mini, count_maxi = 0, 0
for fspec in Walk(root, recurse, '*.py'):
skip = False
for line in open(fspec).readlines():
count_maxi += 1
line = line.strip()
if line:
if line.startswith('#'):
continue
if line.startswith('"""'):
skip = not skip
continue
if not skip:
count_mini += 1
return count_mini, count_maxi
|
{
"content_hash": "fa81224c308061377a707135d34c7d9f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 67,
"avg_line_length": 30.906976744186046,
"alnum_prop": 0.5334838224228743,
"repo_name": "ActiveState/code",
"id": "c49654a1b72f361a99709956929bdfcfe9636569",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/527746_Line_Of_Code_Counter/recipe-527746.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
from pyxb.exceptions_ import *
import unittest
import pyxb.binding.datatypes as xsd
class Test_NCName (unittest.TestCase):
def testValid (self):
valid = [ 'schema', '_Underscore', '_With.Dot', 'With-Hyphen' ]
for f in valid:
self.assertEqual(f, xsd.NCName(f))
def testInvalid (self):
invalid = [ '.DotFirst', 'With Spaces', 'With:Colon',
'With?Illegal', '??LeadingIllegal', 'TrailingIllegal??',
' LeadingSpace', 'TrailingSpace ']
for f in invalid:
self.assertRaises(SimpleTypeValueError, xsd.NCName, f)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4159d1eda13b687cacd6be772faa451e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 33.17391304347826,
"alnum_prop": 0.5937090432503277,
"repo_name": "jonfoster/pyxb-upstream-mirror",
"id": "7fd3fd6d7afa4f0f6dd9cc0e208686146c19352d",
"size": "787",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/datatypes/test-NCName.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "1854695"
},
{
"name": "Shell",
"bytes": "37524"
}
],
"symlink_target": ""
}
|
"""
Output CSV format from output generated by algostat.py
"""
import sys
from operator import itemgetter
from algorithm import ALGORITHMS
DELIMITER = ","
def write_header():
sys.stdout.write(DELIMITER.join(["repository"] + sorted(ALGORITHMS)) + "\n")
def write_line(line):
algorithms = {key: 0 for key in ALGORITHMS}
columns = line.split(" ")
repo = columns[0]
for algo in columns[1:]:
values = algo.split(":")
algorithms[values[0]] += int(values[1])
sorted_results = sorted(algorithms.items(), key=itemgetter(0))
counts = [str(count) for algo, count in sorted_results]
sys.stdout.write(DELIMITER.join([repo] + counts) + "\n")
if __name__ == '__main__':
write_header()
for line in sys.stdin:
write_line(line.strip())
|
{
"content_hash": "6de9c872a8c3c347e786d0639a7d4028",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 24,
"alnum_prop": 0.6401515151515151,
"repo_name": "lukasmartinelli/algostat",
"id": "8af1b2e05020daee90f43e5bd853bd8499f9a395",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create-csv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12593"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Team, Seat
import sys
if sys.version_info < (3, 0):
try:
import unicodecsv as csv
except ImportError:
import csv
else:
import csv
class Command(BaseCommand):
help = 'Export seats to csv'
def get_config(self, export_attr):
if export_attr == 'drivers':
fieldnames = ['id', 'first_name', 'last_name', 'country', 'year_of_birth']
export_cls = Driver
elif export_attr == 'teams':
fieldnames = ['id', 'name', 'full_name', 'country']
export_cls = Team
else:
fieldnames = ['id', 'driver_id', 'driver__last_name', 'driver__first_name', 'team_id', 'team__name']
export_cls = Seat
objects = list(export_cls.objects.values(*fieldnames))
return {'fieldnames': fieldnames, 'objects': objects}
def add_arguments(self, parser):
parser.add_argument('csv',)
parser.add_argument(
'--export',
default='seats',
help='By default, export seats. Options: seats, drivers, teams',
)
def handle(self, *args, **options):
with open(options['csv'], 'wb') as csvfile:
export_config = self.get_config(options['export'])
writer = csv.DictWriter(csvfile, fieldnames=export_config['fieldnames'])
writer.writeheader()
for entry in export_config['objects']:
writer.writerow(entry)
|
{
"content_hash": "accf24ee01b436671d2882cfeb80eccd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 112,
"avg_line_length": 33.52173913043478,
"alnum_prop": 0.585603112840467,
"repo_name": "SRJ9/django-driver27",
"id": "71cc0ae1232b64873792b5ee88038a80077d7c1b",
"size": "1566",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "driver27/management/commands/export_seats_for_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5957"
},
{
"name": "HTML",
"bytes": "62415"
},
{
"name": "JavaScript",
"bytes": "6457"
},
{
"name": "Python",
"bytes": "225813"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="xpad", parent_name="heatmapgl.colorbar", **kwargs):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "3e265af3642b8a3260d70c5192a84681",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.5933609958506224,
"repo_name": "plotly/python-api",
"id": "f62cbe0488560ad66e3a59eda537bd5b7776c0eb",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmapgl/colorbar/_xpad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
'''
TabbedPanel
===========
.. image:: images/tabbed_panel.jpg
:align: right
.. versionadded:: 1.3.0
The `TabbedPanel` widget manages different widgets in tabs, with a header area
for the actual tab buttons and a content area for showing the current tab
content.
The :class:`TabbedPanel` provides one default tab.
Simple example
--------------
.. include:: ../../examples/widgets/tabbedpanel.py
:literal:
.. note::
A new class :class:`TabbedPanelItem` has been introduced in 1.5.0 for
convenience. So now one can simply add a :class:`TabbedPanelItem` to a
:class:`TabbedPanel` and `content` to the :class:`TabbedPanelItem`
as in the example provided above.
Customize the Tabbed Panel
--------------------------
You can choose the position in which the tabs are displayed::
tab_pos = 'top_mid'
An individual tab is called a TabbedPanelHeader. It is a special button
containing a `content` property. You add the TabbedPanelHeader first, and set
its `content` property separately::
tp = TabbedPanel()
th = TabbedPanelHeader(text='Tab2')
tp.add_widget(th)
An individual tab, represented by a TabbedPanelHeader, needs its content set.
This content can be any widget. It could be a layout with a deep
hierarchy of widgets, or it could be an individual widget, such as a label or a
button::
th.content = your_content_instance
There is one "shared" main content area active at any given time, for all
the tabs. Your app is responsible for adding the content of individual tabs
and for managing them, but it's not responsible for content switching. The
tabbed panel handles switching of the main content object as per user action.
There is a default tab added when the tabbed panel is instantiated.
Tabs that you add individually as above, are added in addition to the default
tab. Thus, depending on your needs and design, you will want to customize the
default tab::
tp.default_tab_text = 'Something Specific To Your Use'
The default tab machinery requires special consideration and management.
Accordingly, an `on_default_tab` event is provided for associating a callback::
tp.bind(default_tab = my_default_tab_callback)
It's important to note that by default, :attr:`default_tab_cls` is of type
:class:`TabbedPanelHeader` and thus has the same properties as other tabs.
Since 1.5.0, it is now possible to disable the creation of the
:attr:`default_tab` by setting :attr:`do_default_tab` to False.
Tabs and content can be removed in several ways::
tp.remove_widget(widget/tabbed_panel_header)
or
tp.clear_widgets() # to clear all the widgets in the content area
or
tp.clear_tabs() # to remove the TabbedPanelHeaders
To access the children of the tabbed panel, use content.children::
tp.content.children
To access the list of tabs::
tp.tab_list
To change the appearance of the main tabbed panel content::
background_color = (1, 0, 0, .5) #50% translucent red
border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
To change the background of a individual tab, use these two properties::
tab_header_instance.background_normal = 'path/to/tab_head/img'
tab_header_instance.background_down = 'path/to/tab_head/img_pressed'
A TabbedPanelStrip contains the individual tab headers. To change the
appearance of this tab strip, override the canvas of TabbedPanelStrip.
For example, in the kv language:
.. code-block:: kv
<TabbedPanelStrip>
canvas:
Color:
rgba: (0, 1, 0, 1) # green
Rectangle:
size: self.size
pos: self.pos
By default the tabbed panel strip takes its background image and color from the
tabbed panel's background_image and background_color.
'''
__all__ = ('StripLayout', 'TabbedPanel', 'TabbedPanelContent',
'TabbedPanelHeader', 'TabbedPanelItem', 'TabbedPanelStrip',
'TabbedPanelException')
from functools import partial
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, NumericProperty, AliasProperty, BooleanProperty
class TabbedPanelException(Exception):
'''The TabbedPanelException class.
'''
pass
class TabbedPanelHeader(ToggleButton):
'''A Base for implementing a Tabbed Panel Head. A button intended to be
used as a Heading/Tab for a TabbedPanel widget.
You can use this TabbedPanelHeader widget to add a new tab to a
TabbedPanel.
'''
content = ObjectProperty(None, allownone=True)
'''Content to be loaded when this tab header is selected.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
# only allow selecting the tab if not already selected
def on_touch_down(self, touch):
if self.state == 'down':
# dispatch to children, not to self
for child in self.children:
child.dispatch('on_touch_down', touch)
return
else:
super(TabbedPanelHeader, self).on_touch_down(touch)
def on_release(self, *largs):
# Tabbed panel header is a child of tab_strib which has a
# `tabbed_panel` property
if self.parent:
self.parent.tabbed_panel.switch_to(self)
else:
# tab removed before we could switch to it. Switch back to
# previous tab
self.panel.switch_to(self.panel.current_tab)
class TabbedPanelItem(TabbedPanelHeader):
'''This is a convenience class that provides a header of type
TabbedPanelHeader and links it with the content automatically. Thus
facilitating you to simply do the following in kv language:
.. code-block:: kv
<TabbedPanel>:
# ...other settings
TabbedPanelItem:
BoxLayout:
Label:
text: 'Second tab content area'
Button:
text: 'Button that does nothing'
.. versionadded:: 1.5.0
'''
def add_widget(self, widget, index=0):
self.content = widget
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.switch_to(self)
def remove_widget(self, widget):
self.content = None
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.remove_widget(widget)
class TabbedPanelStrip(GridLayout):
'''A strip intended to be used as background for Heading/Tab.
This does not cover the blank areas in case the tabs don't cover
the entire width/height of the TabbedPanel(use :class:`StripLayout`
for that).
'''
tabbed_panel = ObjectProperty(None)
'''Link to the panel that the tab strip is a part of.
:attr:`tabbed_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None .
'''
class StripLayout(GridLayout):
''' The main layout that is used to house the entire tabbedpanel strip
including the blank areas in case the tabs don't cover the entire
width/height.
.. versionadded:: 1.8.0
'''
border = ListProperty([4, 4, 4, 4])
'''Border property for the :attr:`background_image`.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to [4, 4, 4, 4]
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image to be used for the Strip layout of the TabbedPanel.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to a transparent image.
'''
class TabbedPanelContent(FloatLayout):
'''The TabbedPanelContent class.
'''
pass
class TabbedPanel(GridLayout):
'''The TabbedPanel class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction, used itself for :attr:`background_image`.
Can be changed for a custom background.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instructions for more information.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (16, 16, 16, 16)
'''
background_image = StringProperty('atlas://data/images/defaulttheme/tab')
'''Background image of the main shared content object.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/tab'.
'''
background_disabled_image = StringProperty(
'atlas://data/images/defaulttheme/tab_disabled')
'''Background image of the main shared content object when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_image` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/tab'.
'''
strip_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image of the tabbed strip.
.. versionadded:: 1.8.0
:attr:`strip_image` is a :class:`~kivy.properties.StringProperty`
and defaults to a empty image.
'''
strip_border = ListProperty([4, 4, 4, 4])
'''Border to be used on :attr:`strip_image`.
.. versionadded:: 1.8.0
:attr:`strip_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [4, 4, 4, 4].
'''
_current_tab = ObjectProperty(None)
def get_current_tab(self):
return self._current_tab
current_tab = AliasProperty(get_current_tab, None, bind=('_current_tab', ))
'''Links to the currently selected or active tab.
.. versionadded:: 1.4.0
:attr:`current_tab` is an :class:`~kivy.AliasProperty`, read-only.
'''
tab_pos = OptionProperty(
'top_left',
options=('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the tabs relative to the content.
Can be one of: `left_top`, `left_mid`, `left_bottom`, `top_left`,
`top_mid`, `top_right`, `right_top`, `right_mid`, `right_bottom`,
`bottom_left`, `bottom_mid`, `bottom_right`.
:attr:`tab_pos` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'top_left'.
'''
tab_height = NumericProperty('40dp')
'''Specifies the height of the tab header.
:attr:`tab_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 40.
'''
tab_width = NumericProperty('100dp', allownone=True)
'''Specifies the width of the tab header.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
do_default_tab = BooleanProperty(True)
'''Specifies whether a default_tab head is provided.
.. versionadded:: 1.5.0
:attr:`do_default_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to 'True'.
'''
default_tab_text = StringProperty('Default tab')
'''Specifies the text displayed on the default tab header.
:attr:`default_tab_text` is a :class:`~kivy.properties.StringProperty` and
defaults to 'default tab'.
'''
default_tab_cls = ObjectProperty(TabbedPanelHeader)
'''Specifies the class to use for the styling of the default tab.
.. versionadded:: 1.4.0
.. warning::
`default_tab_cls` should be subclassed from `TabbedPanelHeader`
:attr:`default_tab_cls` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `TabbedPanelHeader`. If you set a string, the
:class:`~kivy.factory.Factory` will be used to resolve the class.
.. versionchanged:: 1.8.0
The :class:`~kivy.factory.Factory` will resolve the class if a string
is set.
'''
def get_tab_list(self):
if self._tab_strip:
return self._tab_strip.children
return 1.
tab_list = AliasProperty(get_tab_list, None)
'''List of all the tab headers.
:attr:`tab_list` is an :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
content = ObjectProperty(None)
'''This is the object holding (current_tab's content is added to this)
the content of the current tab. To Listen to the changes in the content
of the current tab, you should bind to current_tabs `content` property.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
_default_tab = ObjectProperty(None, allow_none=True)
def get_def_tab(self):
return self._default_tab
def set_def_tab(self, new_tab):
if not issubclass(new_tab.__class__, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
if self._default_tab == new_tab:
return
oltab = self._default_tab
self._default_tab = new_tab
self.remove_widget(oltab)
self._original_tab = None
self.switch_to(new_tab)
new_tab.state = 'down'
default_tab = AliasProperty(get_def_tab, set_def_tab,
bind=('_default_tab', ))
'''Holds the default tab.
.. Note:: For convenience, the automatically provided default tab is
deleted when you change default_tab to something else.
As of 1.5.0, this behaviour has been extended to every
`default_tab` for consistency and not just the automatically
provided one.
:attr:`default_tab` is an :class:`~kivy.properties.AliasProperty`.
'''
def get_def_tab_content(self):
return self.default_tab.content
def set_def_tab_content(self, *l):
self.default_tab.content = l[0]
default_tab_content = AliasProperty(get_def_tab_content,
set_def_tab_content)
'''Holds the default tab content.
:attr:`default_tab_content` is an :class:`~kivy.properties.AliasProperty`.
'''
_update_top_ev = _update_tab_ev = _update_tabs_ev = None
def __init__(self, **kwargs):
# these variables need to be initialized before the kv lang is
# processed setup the base layout for the tabbed panel
self._childrens = []
self._tab_layout = StripLayout(rows=1)
self.rows = 1
self._tab_strip = TabbedPanelStrip(
tabbed_panel=self,
rows=1, size_hint=(None, None),
height=self.tab_height, width=self.tab_width)
self._partial_update_scrollview = None
self.content = TabbedPanelContent()
self._current_tab = self._original_tab \
= self._default_tab = TabbedPanelHeader()
super(TabbedPanel, self).__init__(**kwargs)
self.fbind('size', self._reposition_tabs)
if not self.do_default_tab:
Clock.schedule_once(self._switch_to_first_tab)
return
self._setup_default_tab()
self.switch_to(self.default_tab)
def switch_to(self, header, do_scroll=False):
'''Switch to a specific panel header.
.. versionchanged:: 1.10.0
If used with `do_scroll=True`, it scrolls
to the header's tab too.
'''
header_content = header.content
self._current_tab.state = 'normal'
header.state = 'down'
self._current_tab = header
self.clear_widgets()
if header_content is None:
return
# if content has a previous parent remove it from that parent
parent = header_content.parent
if parent:
parent.remove_widget(header_content)
self.add_widget(header_content)
if do_scroll:
tabs = self._tab_strip
tabs.parent.scroll_to(header)
def clear_tabs(self, *l):
self_tabs = self._tab_strip
self_tabs.clear_widgets()
if self.do_default_tab:
self_default_tab = self._default_tab
self_tabs.add_widget(self_default_tab)
self_tabs.width = self_default_tab.width
self._reposition_tabs()
def add_widget(self, widget, index=0):
content = self.content
if content is None:
return
parent = widget.parent
if parent:
parent.remove_widget(widget)
if widget in (content, self._tab_layout):
super(TabbedPanel, self).add_widget(widget, index)
elif isinstance(widget, TabbedPanelHeader):
self_tabs = self._tab_strip
self_tabs.add_widget(widget, index)
widget.group = '__tab%r__' % self_tabs.uid
self.on_tab_width()
else:
widget.pos_hint = {'x': 0, 'top': 1}
self._childrens.append(widget)
content.disabled = self.current_tab.disabled
content.add_widget(widget, index)
def remove_widget(self, widget):
content = self.content
if content is None:
return
if widget in (content, self._tab_layout):
super(TabbedPanel, self).remove_widget(widget)
elif isinstance(widget, TabbedPanelHeader):
if not (self.do_default_tab and widget is self._default_tab):
self_tabs = self._tab_strip
self_tabs.width -= widget.width
self_tabs.remove_widget(widget)
if widget.state == 'down' and self.do_default_tab:
self._default_tab.on_release()
self._reposition_tabs()
else:
Logger.info('TabbedPanel: default tab! can\'t be removed.\n' +
'Change `default_tab` to a different tab.')
else:
self._childrens.pop(widget, None)
if widget in content.children:
content.remove_widget(widget)
def clear_widgets(self, **kwargs):
content = self.content
if content is None:
return
if kwargs.get('do_super', False):
super(TabbedPanel, self).clear_widgets()
else:
content.clear_widgets()
def on_strip_image(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.background_image = value
def on_strip_border(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.border = value
def on_do_default_tab(self, instance, value):
if not value:
dft = self.default_tab
if dft in self.tab_list:
self.remove_widget(dft)
self._switch_to_first_tab()
self._default_tab = self._current_tab
else:
self._current_tab.state = 'normal'
self._setup_default_tab()
def on_default_tab_text(self, *args):
self._default_tab.text = self.default_tab_text
def on_tab_width(self, *l):
ev = self._update_tab_ev
if ev is None:
ev = self._update_tab_ev = Clock.create_trigger(
self._update_tab_width, 0)
ev()
def on_tab_height(self, *l):
self._tab_layout.height = self._tab_strip.height = self.tab_height
self._reposition_tabs()
def on_tab_pos(self, *l):
# ensure canvas
self._reposition_tabs()
def _setup_default_tab(self):
if self._default_tab in self.tab_list:
return
content = self._default_tab.content
_tabs = self._tab_strip
cls = self.default_tab_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
if not issubclass(cls, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
# no need to instantiate if class is TabbedPanelHeader
if cls != TabbedPanelHeader:
self._current_tab = self._original_tab = self._default_tab = cls()
default_tab = self.default_tab
if self._original_tab == self.default_tab:
default_tab.text = self.default_tab_text
default_tab.height = self.tab_height
default_tab.group = '__tab%r__' % _tabs.uid
default_tab.state = 'down'
default_tab.width = self.tab_width if self.tab_width else 100
default_tab.content = content
tl = self.tab_list
if default_tab not in tl:
_tabs.add_widget(default_tab, len(tl))
if default_tab.content:
self.clear_widgets()
self.add_widget(self.default_tab.content)
else:
Clock.schedule_once(self._load_default_tab_content)
self._current_tab = default_tab
def _switch_to_first_tab(self, *l):
ltl = len(self.tab_list) - 1
if ltl > -1:
self._current_tab = dt = self._original_tab \
= self.tab_list[ltl]
self.switch_to(dt)
def _load_default_tab_content(self, dt):
if self.default_tab:
self.switch_to(self.default_tab)
def _reposition_tabs(self, *l):
ev = self._update_tabs_ev
if ev is None:
ev = self._update_tabs_ev = Clock.create_trigger(
self._update_tabs, 0)
ev()
def _update_tabs(self, *l):
self_content = self.content
if not self_content:
return
# cache variables for faster access
tab_pos = self.tab_pos
tab_layout = self._tab_layout
tab_layout.clear_widgets()
scrl_v = ScrollView(size_hint=(None, 1))
tabs = self._tab_strip
parent = tabs.parent
if parent:
parent.remove_widget(tabs)
scrl_v.add_widget(tabs)
scrl_v.pos = (0, 0)
self_update_scrollview = self._update_scrollview
# update scrlv width when tab width changes depends on tab_pos
if self._partial_update_scrollview is not None:
tabs.unbind(width=self._partial_update_scrollview)
self._partial_update_scrollview = partial(
self_update_scrollview, scrl_v)
tabs.bind(width=self._partial_update_scrollview)
# remove all widgets from the tab_strip
self.clear_widgets(do_super=True)
tab_height = self.tab_height
widget_list = []
tab_list = []
pos_letter = tab_pos[0]
if pos_letter == 'b' or pos_letter == 't':
# bottom or top positions
# one col containing the tab_strip and the content
self.cols = 1
self.rows = 2
# tab_layout contains the scrollview containing tabs and two blank
# dummy widgets for spacing
tab_layout.rows = 1
tab_layout.cols = 3
tab_layout.size_hint = (1, None)
tab_layout.height = (tab_height + tab_layout.padding[1] +
tab_layout.padding[3] + dp(2))
self_update_scrollview(scrl_v)
if pos_letter == 'b':
# bottom
if tab_pos == 'bottom_mid':
tab_list = (Widget(), scrl_v, Widget())
widget_list = (self_content, tab_layout)
else:
if tab_pos == 'bottom_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'bottom_right':
# add two dummy widgets
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (self_content, tab_layout)
else:
# top
if tab_pos == 'top_mid':
tab_list = (Widget(), scrl_v, Widget())
elif tab_pos == 'top_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'top_right':
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (tab_layout, self_content)
elif pos_letter == 'l' or pos_letter == 'r':
# left ot right positions
# one row containing the tab_strip and the content
self.cols = 2
self.rows = 1
# tab_layout contains two blank dummy widgets for spacing
# "vertically" and the scatter containing scrollview
# containing tabs
tab_layout.rows = 3
tab_layout.cols = 1
tab_layout.size_hint = (None, 1)
tab_layout.width = tab_height
scrl_v.height = tab_height
self_update_scrollview(scrl_v)
# rotate the scatter for vertical positions
rotation = 90 if tab_pos[0] == 'l' else -90
sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
auto_bring_to_front=False,
size=scrl_v.size)
sctr.add_widget(scrl_v)
lentab_pos = len(tab_pos)
# Update scatter's top when it's pos changes.
# Needed for repositioning scatter to the correct place after its
# added to the parent. Use clock_schedule_once to ensure top is
# calculated after the parent's pos on canvas has been calculated.
# This is needed for when tab_pos changes to correctly position
# scatter. Without clock.schedule_once the positions would look
# fine but touch won't translate to the correct position
if tab_pos[lentab_pos - 4:] == '_top':
# on positions 'left_top' and 'right_top'
sctr.bind(pos=partial(self._update_top, sctr, 'top', None))
tab_list = (sctr, )
elif tab_pos[lentab_pos - 4:] == '_mid':
# calculate top of scatter
sctr.bind(pos=partial(self._update_top, sctr, 'mid',
scrl_v.width))
tab_list = (Widget(), sctr, Widget())
elif tab_pos[lentab_pos - 7:] == '_bottom':
tab_list = (Widget(), Widget(), sctr)
if pos_letter == 'l':
widget_list = (tab_layout, self_content)
else:
widget_list = (self_content, tab_layout)
# add widgets to tab_layout
add = tab_layout.add_widget
for widg in tab_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_tab_width(self, *l):
if self.tab_width:
for tab in self.tab_list:
tab.size_hint_x = 1
tsw = self.tab_width * len(self._tab_strip.children)
else:
# tab_width = None
tsw = 0
for tab in self.tab_list:
if tab.size_hint_x:
# size_hint_x: x/.xyz
tab.size_hint_x = 1
# drop to default tab_width
tsw += 100
else:
# size_hint_x: None
tsw += tab.width
self._tab_strip.width = tsw
self._reposition_tabs()
def _update_top(self, *args):
sctr, top, scrl_v_width, x, y = args
ev = self._update_top_ev
if ev is not None:
ev.cancel()
ev = self._update_top_ev = Clock.schedule_once(
partial(self._updt_top, sctr, top, scrl_v_width), 0)
def _updt_top(self, sctr, top, scrl_v_width, *args):
if top[0] == 't':
sctr.top = self.top
else:
sctr.top = self.top - (self.height - scrl_v_width) / 2
def _update_scrollview(self, scrl_v, *l):
self_tab_pos = self.tab_pos
self_tabs = self._tab_strip
if self_tab_pos[0] == 'b' or self_tab_pos[0] == 't':
# bottom or top
scrl_v.width = min(self.width, self_tabs.width)
# required for situations when scrl_v's pos is calculated
# when it has no parent
scrl_v.top += 1
scrl_v.top -= 1
else:
# left or right
scrl_v.width = min(self.height, self_tabs.width)
self_tabs.pos = (0, 0)
|
{
"content_hash": "47b891aad4b9eee969c3d47a05438b74",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 79,
"avg_line_length": 34.33804475853946,
"alnum_prop": 0.598566185298254,
"repo_name": "jegger/kivy",
"id": "39aa327ca3ac5d5fc0c29906c9564dc9f65d739d",
"size": "29153",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "kivy/uix/tabbedpanel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "337106"
},
{
"name": "C++",
"bytes": "3551"
},
{
"name": "Emacs Lisp",
"bytes": "9671"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4202"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3792478"
},
{
"name": "Vim script",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
from heat.common.i18n import _
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class NovaFlavor(resource.Resource):
"""
A resource for creating OpenStack virtual hardware templates.
Due to default nova security policy usage of this resource is limited to
being used by administrators only. The rights may also be delegated to
other users by redefining the access controls on the nova-api server.
Note that the current implementation of the Nova Flavor resource does not
allow specifying the name and flavorid properties for the resource.
This is done to avoid potential naming collision upon flavor creation as
all flavor have a global scope.
Here is an example nova flavor resource::
heat_template_version: 2013-05-23
description: Heat Flavor creation example
resources:
test_flavor:
type: OS::Nova::Flavor
properties:
ram: 1024
vcpus: 1
disk: 20
swap: 2
extra_specs: {"quota:disk_read_bytes_sec": "10240000"}
"""
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'nova'
entity = 'flavors'
PROPERTIES = (
RAM, VCPUS, DISK, SWAP, EPHEMERAL,
RXTX_FACTOR, EXTRA_SPECS,
) = (
'ram', 'vcpus', 'disk', 'swap', 'ephemeral',
'rxtx_factor', 'extra_specs',
)
properties_schema = {
RAM: properties.Schema(
properties.Schema.INTEGER,
_('Memory in MB for the flavor.'),
required=True
),
VCPUS: properties.Schema(
properties.Schema.INTEGER,
_('Number of VCPUs for the flavor.'),
required=True
),
DISK: properties.Schema(
properties.Schema.INTEGER,
_('Size of local disk in GB. The "0" size is a special case that '
'uses the native base image size as the size of the ephemeral '
'root volume.'),
default=0
),
SWAP: properties.Schema(
properties.Schema.INTEGER,
_('Swap space in MB.'),
default=0
),
EPHEMERAL: properties.Schema(
properties.Schema.INTEGER,
_('Size of a secondary ephemeral data disk in GB.'),
default=0
),
RXTX_FACTOR: properties.Schema(
properties.Schema.NUMBER,
_('RX/TX factor.'),
default=1.0
),
EXTRA_SPECS: properties.Schema(
properties.Schema.MAP,
_('Key/Value pairs to extend the capabilities of the flavor.'),
update_allowed=True,
),
}
def __init__(self, name, json_snippet, stack):
super(NovaFlavor, self).__init__(name, json_snippet, stack)
def handle_create(self):
args = dict(self.properties)
args['flavorid'] = 'auto'
args['name'] = self.physical_resource_name()
args['is_public'] = False
flavor_keys = args.pop(self.EXTRA_SPECS)
flavor = self.client().flavors.create(**args)
self.resource_id_set(flavor.id)
if flavor_keys:
flavor.set_keys(flavor_keys)
tenant = self.stack.context.tenant_id
# grant access to the active project and the admin project
self.client().flavor_access.add_tenant_access(flavor, tenant)
self.client().flavor_access.add_tenant_access(flavor, 'admin')
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update nova flavor."""
if self.EXTRA_SPECS in prop_diff:
flavor = self.client().flavors.get(self.resource_id)
old_keys = flavor.get_keys()
flavor.unset_keys(old_keys)
new_keys = prop_diff.get(self.EXTRA_SPECS)
if new_keys is not None:
flavor.set_keys(new_keys)
def resource_mapping():
return {
'OS::Nova::Flavor': NovaFlavor
}
|
{
"content_hash": "ed317574a4ee3bec69d98aa9aa834504",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 32.62903225806452,
"alnum_prop": 0.5884824518042511,
"repo_name": "cryptickp/heat",
"id": "b1474639d64d28236a1fe0d23504f5d5d038507b",
"size": "4621",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/nova/nova_flavor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6610494"
},
{
"name": "Shell",
"bytes": "33316"
}
],
"symlink_target": ""
}
|
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class PlanCounty(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, plan_id=None, county_id=None):
"""
PlanCounty - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'plan_id': 'int',
'county_id': 'int'
}
self.attribute_map = {
'plan_id': 'plan_id',
'county_id': 'county_id'
}
self._plan_id = plan_id
self._county_id = county_id
@property
def plan_id(self):
"""
Gets the plan_id of this PlanCounty.
Foreign key to plan
:return: The plan_id of this PlanCounty.
:rtype: int
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id):
"""
Sets the plan_id of this PlanCounty.
Foreign key to plan
:param plan_id: The plan_id of this PlanCounty.
:type: int
"""
self._plan_id = plan_id
@property
def county_id(self):
"""
Gets the county_id of this PlanCounty.
Foreign key to county
:return: The county_id of this PlanCounty.
:rtype: int
"""
return self._county_id
@county_id.setter
def county_id(self, county_id):
"""
Sets the county_id of this PlanCounty.
Foreign key to county
:param county_id: The county_id of this PlanCounty.
:type: int
"""
self._county_id = county_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "cac54826ed5b02da34e00eb8667a5169",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 228,
"avg_line_length": 36.725433526011564,
"alnum_prop": 0.6133627134650192,
"repo_name": "vericred/vericred-python",
"id": "d7869465b9a8fb2c71c146f634a6154a28ecd1f3",
"size": "12724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vericred_client/models/plan_county.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2550150"
}
],
"symlink_target": ""
}
|
import argparse
import json
import random
import subprocess
import time
#------------------------------------------------------------------------------
# Configuration mode: return the custom metrics data should be defined
def config():
settings = {
'maxruntime': 30000, # How long the script is allowed to run
'period': 60, # The period the script will run, in this case it will run every 60 seconds
'metrics': [
{
'id': 0,
'datatype': 'DOUBLE',
'name': 'Uptime total',
'description': '100% if commands succeed',
'groups': 'Docker Test',
'unit': '',
'tags': '',
'calctype': 'Instant'
},
{
'id': 1,
'datatype': 'DOUBLE',
'name': 'Runtime total',
'description': 'Total runtime of Docker test',
'groups': 'Docker Test',
'unit': 'ms',
'tags': '',
'calctype': 'Instant'
},
{
'id': 2,
'datatype': 'DOUBLE',
'name': 'Uptime pull',
'description': '100% if pull command succeeds',
'groups': 'Docker Test',
'unit': '',
'tags': '',
'calctype': 'Instant'
},
{
'id': 3,
'datatype': 'DOUBLE',
'name': 'Runtime pull',
'description': 'Pull runtime of Docker test',
'groups': 'Docker Test',
'unit': 'ms',
'tags': '',
'calctype': 'Instant'
},
{
'id': 4,
'datatype': 'DOUBLE',
'name': 'Uptime run',
'description': '100% if run command succeeds',
'groups': 'Docker Test',
'unit': '',
'tags': '',
'calctype': 'Instant'
},
{
'id': 5,
'datatype': 'DOUBLE',
'name': 'Runtime run',
'description': 'Run runtime of Docker test',
'groups': 'Docker Test',
'unit': 'ms',
'tags': '',
'calctype': 'Instant'
},
{
'id': 6,
'datatype': 'DOUBLE',
'name': 'Uptime stop',
'description': '100% if stop command succeeds',
'groups': 'Docker Test',
'unit': '',
'tags': '',
'calctype': 'Instant'
},
{
'id': 7,
'datatype': 'DOUBLE',
'name': 'Runtime stop',
'description': 'Stop runtime of Docker test',
'groups': 'Docker Test',
'unit': 'ms',
'tags': '',
'calctype': 'Instant'
},
{
'id': 8,
'datatype': 'DOUBLE',
'name': 'Uptime remove',
'description': '100% if remove command succeeds',
'groups': 'Docker Test',
'unit': '',
'tags': '',
'calctype': 'Instant'
},
{
'id': 9,
'datatype': 'DOUBLE',
'name': 'Runtime remove',
'description': 'Remove runtime of Docker test',
'groups': 'Docker Test',
'unit': 'ms',
'tags': '',
'calctype': 'Instant'
}
]
}
print json.dumps(settings, indent=4)
# Data retrieval mode: return the data for the custom metrics
def data():
# Success value
result = 0
# Total time spent
total = 0
metrics = [None] * 10
# pull busybox
(success, time) = run(['docker pull busybox'])
result += success - 100
total += time
metrics[2] = 'M2 {0}'.format(success)
metrics[3] = 'M3 {0:.2f}'.format(time)
# run coscale-test container
(success, time) = run(['docker run --name coscale-test -d busybox'])
result += success - 100
total += time
metrics[4] = 'M4 {0}'.format(success)
metrics[5] = 'M5 {0:.2f}'.format(time)
# stop coscale-test container
(success, time) = run(['docker stop coscale-test'])
result += success - 100
total += time
metrics[6] = 'M6 {0}'.format(success)
metrics[7] = 'M7 {0:.2f}'.format(time)
# remove coscale-test
(success, time) = run(['docker rm coscale-test'])
result += success - 100
total += time
metrics[8] = 'M8 {0}'.format(success)
metrics[9] = 'M9 {0:.2f}'.format(time)
# Convert result to meaningfull value
if result < 0:
result = 0
else:
result = 100
metrics[0] = 'M0 {0}'.format(result)
metrics[1] = 'M1 {0:.2f}'.format(total)
for metric in metrics:
print metric
def run(command):
success = 100
start = time.time()
try:
subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as grepexc:
success = 0
end = (time.time() - start) * 1000
return [success, end]
#------------------------------------------------------------------------------
# Switch to check in which mode the script is running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', action='store_true', help='output a JSON object detailing the metrics this script collects')
parser.add_argument('-d', action='store_true', help='output the metrics this script collects')
args = parser.parse_args()
if args.c:
config()
elif args.d:
data()
|
{
"content_hash": "592e53f013c9e3f91cde7783636345fa",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 122,
"avg_line_length": 31.110526315789475,
"alnum_prop": 0.43495178480798513,
"repo_name": "CoScale/coscale-generic-scripts",
"id": "97fc280ddea0655bec8783cc2d104128afe836c2",
"size": "6129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/docker-check.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "27696"
},
{
"name": "Shell",
"bytes": "13622"
}
],
"symlink_target": ""
}
|
"""Exceptions used by Cisco Nexus1000V ML2 mechanism driver."""
from neutron.common import exceptions
class VSMConnectionFailed(exceptions.ServiceUnavailable):
"""No response from Cisco Nexus1000V VSM."""
message = _("Connection to VSM failed: %(reason)s.")
class VSMError(exceptions.NeutronException):
"""A response from Cisco Nexus1000V VSM was not HTTP OK."""
message = _("Internal VSM Error: %(reason)s.")
class NetworkBindingNotFound(exceptions.NotFound):
"""Network Binding for network cannot be found."""
message = _("Network Binding for network %(network_id)s could "
"not be found.")
class PortBindingNotFound(exceptions.NotFound):
"""Port Binding for port cannot be found."""
message = _("Port Binding for port %(port_id)s could "
"not be found.")
class NetworkProfileNotFound(exceptions.NotFound):
"""Network Profile with given UUID/name/network-type cannot be found."""
message = _("Network Profile %(profile)s could not be found.")
class PolicyProfileNotFound(exceptions.NotFound):
"""Policy Profile with given UUID/name cannot be found."""
message = _("Policy Profile %(profile)s could not be found.")
|
{
"content_hash": "9c68d3d45c5d47ef22b7ba9bf9869194",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 34.542857142857144,
"alnum_prop": 0.6989247311827957,
"repo_name": "hareeshpc/networking-cisco",
"id": "08fb50215356b93ada75a4439cfbf233a0ca6019",
"size": "1844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networking_cisco/plugins/ml2/drivers/cisco/n1kv/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1234061"
},
{
"name": "Shell",
"bytes": "42688"
}
],
"symlink_target": ""
}
|
"""SCons.Platform.posix
Platform-specific initialization for POSIX (Linux, UNIX, etc.) systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
import popen2
import string
import sys
import select
import SCons.Util
from SCons.Platform import TempFileMunge
exitvalmap = {
2 : 127,
13 : 126,
}
def escape(arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
return '"' + arg + '"'
def exec_system(l, env):
stat = os.system(string.join(l))
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_spawnvpe(l, env):
stat = os.spawnvpe(os.P_WAIT, l[0], l, env)
# os.spawnvpe() returns the actual exit code, not the encoding
# returned by os.waitpid() or os.system().
return stat
def exec_fork(l, env):
pid = os.fork()
if not pid:
# Child process.
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process.
pid, stat = os.waitpid(pid, 0)
if stat & 0xff:
return stat | 0x80
return stat >> 8
def _get_env_command(sh, escape, cmd, args, env):
s = string.join(args)
if env:
l = ['env', '-'] + \
map(lambda t, e=escape: t[0]+'='+e(t[1]), env.items()) + \
[sh, '-c', escape(s)]
s = string.join(l)
return s
def env_spawn(sh, escape, cmd, args, env):
return exec_system([_get_env_command( sh, escape, cmd, args, env)], env)
def spawnvpe_spawn(sh, escape, cmd, args, env):
return exec_spawnvpe([sh, '-c', string.join(args)], env)
def fork_spawn(sh, escape, cmd, args, env):
return exec_fork([sh, '-c', string.join(args)], env)
def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
stdout_eof = stderr_eof = 0
while not (stdout_eof and stderr_eof):
(i,o,e) = select.select([cmd_stdout, cmd_stderr], [], [])
if cmd_stdout in i:
str = cmd_stdout.read()
if len(str) == 0:
stdout_eof = 1
elif stdout != None:
stdout.write(str)
if cmd_stderr in i:
str = cmd_stderr.read()
if len(str) == 0:
#sys.__stderr__.write( "stderr_eof=1\n" )
stderr_eof = 1
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
def exec_popen3(l, env, stdout, stderr):
proc = popen2.Popen3(string.join(l), 1)
process_cmd_output(proc.fromchild, proc.childerr, stdout, stderr)
stat = proc.wait()
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_piped_fork(l, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
if stdout != stderr:
(rFdOut, wFdOut) = os.pipe()
(rFdErr, wFdErr) = os.pipe()
else:
(rFdOut, wFdOut) = os.pipe()
rFdErr = rFdOut
wFdErr = wFdOut
# do the fork
pid = os.fork()
if not pid:
# Child process
os.close( rFdOut )
if rFdOut != rFdErr:
os.close( rFdErr )
os.dup2( wFdOut, 1 ) # is there some symbolic way to do that ?
os.dup2( wFdErr, 2 )
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process
pid, stat = os.waitpid(pid, 0)
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
childOut = os.fdopen( rFdOut )
if stdout != stderr:
childErr = os.fdopen( rFdErr )
else:
childErr = childOut
process_cmd_output(childOut, childErr, stdout, stderr)
os.close( rFdOut )
if stdout != stderr:
os.close( rFdErr )
if stat & 0xff:
return stat | 0x80
return stat >> 8
def piped_env_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using Popen3 combined with the env command
# the command name and the command's stdout is written to stdout
# the command's stderr is written to stderr
return exec_popen3([_get_env_command(sh, escape, cmd, args, env)],
env, stdout, stderr)
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
return exec_piped_fork([sh, '-c', string.join(args)],
env, stdout, stderr)
def generate(env):
# If os.spawnvpe() exists, we use it to spawn commands. Otherwise
# if the env utility exists, we use os.system() to spawn commands,
# finally we fall back on os.fork()/os.exec().
#
# os.spawnvpe() is prefered because it is the most efficient. But
# for Python versions without it, os.system() is prefered because it
# is claimed that it works better with threads (i.e. -j) and is more
# efficient than forking Python.
#
# NB: Other people on the scons-users mailing list have claimed that
# os.fork()/os.exec() works better than os.system(). There may just
# not be a default that works best for all users.
if os.__dict__.has_key('spawnvpe'):
spawn = spawnvpe_spawn
elif env.Detect('env'):
spawn = env_spawn
else:
spawn = fork_spawn
if env.Detect('env'):
pspawn = piped_env_spawn
else:
pspawn = piped_fork_spawn
if not env.has_key('ENV'):
env['ENV'] = {}
env['ENV']['PATH'] = '/usr/local/bin:/opt/bin:/bin:/usr/bin'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = ''
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = '$LIBPREFIX'
env['SHLIBSUFFIX'] = '.so'
env['LIBPREFIXES'] = '$LIBPREFIX'
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['PSPAWN'] = pspawn
env['SPAWN'] = spawn
env['SHELL'] = 'sh'
env['ESCAPE'] = escape
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
#Based on LINUX: ARG_MAX=ARG_MAX=131072 - 3000 for environment expansion
#Note: specific platforms might rise or lower this value
env['MAXLINELENGTH'] = 128072
# This platform supports RPATH specifications.
env['__RPATH'] = '$_RPATH'
|
{
"content_hash": "c7f86edc8ffe916a541e87b4c5658ba0",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 76,
"avg_line_length": 33.00398406374502,
"alnum_prop": 0.5899323998068566,
"repo_name": "datalogics-robb/scons",
"id": "1d4e9f70d82d1e26b10325ff4498f480f3267739",
"size": "8284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Platform/posix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4753658"
},
{
"name": "Shell",
"bytes": "25935"
}
],
"symlink_target": ""
}
|
from netmiko.oneaccess.oneaccess_oneos import OneaccessOneOSSSH, OneaccessOneOSTelnet
__all__ = ["OneaccessOneOSSSH", "OneaccessOneOSTelnet"]
|
{
"content_hash": "601c554b6b0e594bf10a224ba78bf030",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 85,
"avg_line_length": 47.666666666666664,
"alnum_prop": 0.8181818181818182,
"repo_name": "ktbyers/netmiko",
"id": "a3d597813cb65053a1b2c4ab1c7c12bdc6edae0a",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netmiko/oneaccess/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "384"
},
{
"name": "Python",
"bytes": "726727"
},
{
"name": "Shell",
"bytes": "21540"
}
],
"symlink_target": ""
}
|
'''Migration script for Search-enabled Models.'''
from __future__ import absolute_import
import logging
from elasticsearch import helpers
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website import settings
from framework.auth import User
from website.models import Node
from website.app import init_app
import website.search.search as search
from scripts import utils as script_utils
from website.search.elastic_search import es
logger = logging.getLogger(__name__)
app = init_app("website.settings", set_backends=True, routes=True)
logger = logging.getLogger(__name__)
def migrate_nodes(index):
logger.info("Migrating nodes to index: {}".format(index))
n_iter = 0
nodes = Node.find(Q('is_public', 'eq', True) & Q('is_deleted', 'eq', False))
for node in nodes:
search.update_node(node, index=index)
n_iter += 1
logger.info('Nodes migrated: {}'.format(n_iter))
def migrate_users(index):
logger.info("Migrating users to index: {}".format(index))
n_migr = 0
n_iter = 0
for user in User.find():
if user.is_active:
search.update_user(user, index=index)
n_migr += 1
n_iter += 1
logger.info('Users iterated: {0}\nUsers migrated: {1}'.format(n_iter, n_migr))
def migrate(delete, index=settings.ELASTIC_INDEX):
script_utils.add_file_logger(logger, __file__)
ctx = app.test_request_context()
ctx.push()
new_index = set_up_index(index)
migrate_nodes(new_index)
migrate_users(new_index)
set_up_alias(index, new_index)
if delete:
delete_old(new_index)
ctx.pop()
def set_up_index(idx):
alias = es.indices.get_aliases(index=idx)
if not alias or not alias.keys() or idx in alias.keys():
# Deal with empty indices or the first migration
index = '{}_v1'.format(idx)
search.create_index(index=index)
logger.info("Reindexing {0} to {1}_v1".format(idx, idx))
helpers.reindex(es, idx, index)
logger.info("Deleting {} index".format(idx))
es.indices.delete(index=idx)
es.indices.put_alias(idx, index)
else:
# Increment version
version = int(alias.keys()[0].split('_v')[1]) + 1
logger.info("Incrementing index version to {}".format(version))
index = '{0}_v{1}'.format(idx, version)
search.create_index(index=index)
logger.info("{} index created".format(index))
return index
def set_up_alias(old_index, index):
alias = es.indices.get_aliases(index=old_index)
if alias:
logger.info("Removing old aliases to {}".format(old_index))
es.indices.delete_alias(index=old_index, name='_all', ignore=404)
logger.info("Creating new alias from {0} to {1}".format(old_index, index))
es.indices.put_alias(old_index, index)
def delete_old(index):
old_version = int(index.split('_v')[1]) - 1
if old_version < 1:
logger.info("No index before {} to delete".format(index))
pass
else:
old_index = index.split('_v')[0] + '_v' + str(old_version)
logger.info("Deleting {}".format(old_index))
es.indices.delete(index=old_index, ignore=404)
if __name__ == '__main__':
migrate(False)
|
{
"content_hash": "5dd17c5a7f799393b8477f286301d019",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 82,
"avg_line_length": 29.761467889908257,
"alnum_prop": 0.6430332922318126,
"repo_name": "himanshuo/osf.io",
"id": "f9fc0682ee02fdf322c24ddde073ca985e56f030",
"size": "3290",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/search_migration/migrate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78345"
},
{
"name": "HTML",
"bytes": "34188"
},
{
"name": "JavaScript",
"bytes": "885345"
},
{
"name": "Mako",
"bytes": "442634"
},
{
"name": "Python",
"bytes": "2536134"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.log import app_log
from tornado.stack_context import (StackContext, wrap, NullContext, StackContextInconsistentError,
ExceptionStackContext, run_with_stack_context, _state)
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest
from tornado.web import asynchronous, Application, RequestHandler
import contextlib
import functools
import logging
class TestRequestHandler(RequestHandler):
def __init__(self, app, request, io_loop):
super(TestRequestHandler, self).__init__(app, request)
self.io_loop = io_loop
@asynchronous
def get(self):
logging.debug('in get()')
# call self.part2 without a self.async_callback wrapper. Its
# exception should still get thrown
self.io_loop.add_callback(self.part2)
def part2(self):
logging.debug('in part2()')
# Go through a third layer to make sure that contexts once restored
# are again passed on to future callbacks
self.io_loop.add_callback(self.part3)
def part3(self):
logging.debug('in part3()')
raise Exception('test exception')
def get_error_html(self, status_code, **kwargs):
if 'exception' in kwargs and str(kwargs['exception']) == 'test exception':
return 'got expected exception'
else:
return 'unexpected failure'
class HTTPStackContextTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', TestRequestHandler,
dict(io_loop=self.io_loop))])
def test_stack_context(self):
with ExpectLog(app_log, "Uncaught exception GET /"):
self.http_client.fetch(self.get_url('/'), self.handle_response)
self.wait()
self.assertEqual(self.response.code, 500)
self.assertTrue(b'got expected exception' in self.response.body)
def handle_response(self, response):
self.response = response
self.stop()
class StackContextTest(AsyncTestCase):
def setUp(self):
super(StackContextTest, self).setUp()
self.active_contexts = []
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
# Simulates the effect of an asynchronous library that uses its own
# StackContext internally and then returns control to the application.
def test_exit_library_context(self):
def library_function(callback):
# capture the caller's context before introducing our own
callback = wrap(callback)
with StackContext(functools.partial(self.context, 'library')):
self.io_loop.add_callback(
functools.partial(library_inner_callback, callback))
def library_inner_callback(callback):
self.assertEqual(self.active_contexts[-2:],
['application', 'library'])
callback()
def final_callback():
# implementation detail: the full context stack at this point
# is ['application', 'library', 'application']. The 'library'
# context was not removed, but is no longer innermost so
# the application context takes precedence.
self.assertEqual(self.active_contexts[-1], 'application')
self.stop()
with StackContext(functools.partial(self.context, 'application')):
library_function(final_callback)
self.wait()
def test_deactivate(self):
deactivate_callbacks = []
def f1():
with StackContext(functools.partial(self.context, 'c1')) as c1:
deactivate_callbacks.append(c1)
self.io_loop.add_callback(f2)
def f2():
with StackContext(functools.partial(self.context, 'c2')) as c2:
deactivate_callbacks.append(c2)
self.io_loop.add_callback(f3)
def f3():
with StackContext(functools.partial(self.context, 'c3')) as c3:
deactivate_callbacks.append(c3)
self.io_loop.add_callback(f4)
def f4():
self.assertEqual(self.active_contexts, ['c1', 'c2', 'c3'])
deactivate_callbacks[1]()
# deactivating a context doesn't remove it immediately,
# but it will be missing from the next iteration
self.assertEqual(self.active_contexts, ['c1', 'c2', 'c3'])
self.io_loop.add_callback(f5)
def f5():
self.assertEqual(self.active_contexts, ['c1', 'c3'])
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_deactivate_order(self):
# Stack context deactivation has separate logic for deactivation at
# the head and tail of the stack, so make sure it works in any order.
def check_contexts():
# Make sure that the full-context array and the exception-context
# linked lists are consistent with each other.
full_contexts, chain = _state.contexts
exception_contexts = []
while chain is not None:
exception_contexts.append(chain)
chain = chain.old_contexts[1]
self.assertEqual(list(reversed(full_contexts)), exception_contexts)
return list(self.active_contexts)
def make_wrapped_function():
"""Wraps a function in three stack contexts, and returns
the function along with the deactivation functions.
"""
# Remove the test's stack context to make sure we can cover
# the case where the last context is deactivated.
with NullContext():
partial = functools.partial
with StackContext(partial(self.context, 'c0')) as c0:
with StackContext(partial(self.context, 'c1')) as c1:
with StackContext(partial(self.context, 'c2')) as c2:
return (wrap(check_contexts), [c0, c1, c2])
# First make sure the test mechanism works without any deactivations
func, deactivate_callbacks = make_wrapped_function()
self.assertEqual(func(), ['c0', 'c1', 'c2'])
# Deactivate the tail
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[0]()
self.assertEqual(func(), ['c1', 'c2'])
# Deactivate the middle
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[1]()
self.assertEqual(func(), ['c0', 'c2'])
# Deactivate the head
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[2]()
self.assertEqual(func(), ['c0', 'c1'])
def test_isolation_nonempty(self):
# f2 and f3 are a chain of operations started in context c1.
# f2 is incidentally run under context c2, but that context should
# not be passed along to f3.
def f1():
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f2)
with StackContext(functools.partial(self.context, 'c2')):
wrapped()
def f2():
self.assertIn('c1', self.active_contexts)
self.io_loop.add_callback(f3)
def f3():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_isolation_empty(self):
# Similar to test_isolation_nonempty, but here the f2/f3 chain
# is started without any context. Behavior should be equivalent
# to the nonempty case (although historically it was not)
def f1():
with NullContext():
wrapped = wrap(f2)
with StackContext(functools.partial(self.context, 'c2')):
wrapped()
def f2():
self.io_loop.add_callback(f3)
def f3():
self.assertNotIn('c2', self.active_contexts)
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_yield_in_with(self):
@gen.engine
def f():
with StackContext(functools.partial(self.context, 'c1')):
# This yield is a problem: the generator will be suspended
# and the StackContext's __exit__ is not called yet, so
# the context will be left on _state.contexts for anything
# that runs before the yield resolves.
yield gen.Task(self.io_loop.add_callback)
with self.assertRaises(StackContextInconsistentError):
f()
self.wait()
@gen_test
def test_yield_outside_with(self):
# This pattern avoids the problem in the previous test.
cb = yield gen.Callback('k1')
with StackContext(functools.partial(self.context, 'c1')):
self.io_loop.add_callback(cb)
yield gen.Wait('k1')
def test_yield_in_with_exception_stack_context(self):
# As above, but with ExceptionStackContext instead of StackContext.
@gen.engine
def f():
with ExceptionStackContext(lambda t, v, tb: False):
yield gen.Task(self.io_loop.add_callback)
with self.assertRaises(StackContextInconsistentError):
f()
self.wait()
@gen_test
def test_yield_outside_with_exception_stack_context(self):
cb = yield gen.Callback('k1')
with ExceptionStackContext(lambda t, v, tb: False):
self.io_loop.add_callback(cb)
yield gen.Wait('k1')
def test_run_with_stack_context(self):
@gen.coroutine
def f1():
self.assertEqual(self.active_contexts, ['c1'])
yield run_with_stack_context(
StackContext(functools.partial(self.context, 'c1')),
f2)
self.assertEqual(self.active_contexts, ['c1'])
@gen.coroutine
def f2():
self.assertEqual(self.active_contexts, ['c1', 'c2'])
yield gen.Task(self.io_loop.add_callback)
self.assertEqual(self.active_contexts, ['c1', 'c2'])
self.assertEqual(self.active_contexts, [])
run_with_stack_context(
StackContext(functools.partial(self.context, 'c1')),
f1)
self.assertEqual(self.active_contexts, [])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "8908d3ce156f1bae301581053b452c2b",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 98,
"avg_line_length": 38.392857142857146,
"alnum_prop": 0.6015813953488373,
"repo_name": "mywaiting/LifeLogger",
"id": "d6f8239e67c050d538301ce7674dd767b2c30c97",
"size": "10772",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tornado/test/stack_context_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10676"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "1070427"
}
],
"symlink_target": ""
}
|
import random
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import pickle
import os
import pylab
matplotlib.rcParams['backend'] = "Qt4Agg"
with open('../dataset/ENGLISH_TRAIN','r') as f:
x = np.array([a.count(' ') for a in f.readlines()])
with open('../dataset/TAMIL_TRAIN','r') as f:
y = np.array([a.count(' ') for a in f.readlines()])
plt.figure()
plt.axis([0, 200, 0, 120000])
plt.subplots_adjust(hspace=.4)
ax = plt.subplot(2,1,1)
plt.hist(x, bins=10, alpha=0.5, label='English')
plt.hist(y, bins=10, alpha=0.5, label='Tamil')
plt.title('Overlapping')
plt.xlabel('No. of words')
plt.ylabel('No of sentences')
plt.legend()
common_params = dict(bins=20,
range=(0, 80))
plt.subplot(2,1,2)
plt.title('Skinny shift')
plt.hist((x, y), **common_params)
plt.legend(loc='upper right')
common_params['histtype'] = 'step'
plt.xlabel('No. of words')
plt.ylabel('No of sentences')
plt.legend()
pylab.savefig('Histogram.png', bbox_inches='tight')
plt.show()
plt.subplots_adjust(hspace=.4)
plt.subplot(2,1,1)
plt.title('Scatter - Correlation analysis')
plt.xlabel('English')
plt.ylabel('Tamil')
plt.plot(x,y)
plt.subplot(2,1,2)
plt.xlabel('English')
plt.ylabel('Tamil')
plt.hist2d(x,y,bins=10, range=[[0,70],[0,50]]);
pylab.savefig('Correlation.png', bbox_inches='tight')
plt.show()
|
{
"content_hash": "4cb21caff16a5b086a1b23e3c7a3f2d0",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 55,
"avg_line_length": 25.50943396226415,
"alnum_prop": 0.6708579881656804,
"repo_name": "hanskrupakar/English-Tamil-MT",
"id": "5b7e79c54e870934687f0de187efce063b569391",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tensorflow with Eng and Tam Word2vec/misc/visual.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "109722"
}
],
"symlink_target": ""
}
|
import yaml
import json
with open("ex6.yml") as f:
f_yml = yaml.load(f)
print yaml.dump(f_yml, default_flow_style=False)
with open("ex6.json") as f:
f_json = json.load(f)
print json.dumps(f_json, indent=4)
|
{
"content_hash": "7034283a34edce528e2b4c3f9f05452c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 22.4,
"alnum_prop": 0.6517857142857143,
"repo_name": "rickkosa/pynet_test",
"id": "c8cd7b9166192e0eeb86c77ed911eb3e736be99a",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaml_json/yaml_json_read.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1782"
}
],
"symlink_target": ""
}
|
"""Module with helper classes used by grr_response_test."""
|
{
"content_hash": "fdcee42b09cb52a13c6d585a38872be0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 59,
"avg_line_length": 30.5,
"alnum_prop": 0.7213114754098361,
"repo_name": "google/grr",
"id": "78b1043ff02188d791aa1b3ccf644fae949a09ac",
"size": "83",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/test/grr_response_test/lib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
import numpy as np
# Load the dataset and scrap everything but the first 16 entries
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 20000
max_length = 80
if __name__ == '__main__':
print('Fetching the imdb dataset')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = x_train[:16]
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
np.save('imdb16.npy', x_train)
|
{
"content_hash": "df8c76d5e5fa197d925c0addb08b35b4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 31.466666666666665,
"alnum_prop": 0.6970338983050848,
"repo_name": "plaidml/plaidml",
"id": "b3f0b1b3eba2a389ad74a4614341e2214a237d5b",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/plaidml-v1",
"path": "plaidbench/plaidbench/gen-imdb.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12908"
},
{
"name": "C++",
"bytes": "2299440"
},
{
"name": "CMake",
"bytes": "85677"
},
{
"name": "HTML",
"bytes": "2745"
},
{
"name": "MLIR",
"bytes": "339818"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "588389"
},
{
"name": "TeX",
"bytes": "2194"
}
],
"symlink_target": ""
}
|
''' Display a variety of simple scatter marker shapes whose attributes
can be associated with data columns from ``ColumnDataSources``.
The full list of markers built into Bokeh is given below:
* :class:`~bokeh.models.markers.Asterisk`
* :class:`~bokeh.models.markers.Circle`
* :class:`~bokeh.models.markers.CircleCross`
* :class:`~bokeh.models.markers.CircleDot`
* :class:`~bokeh.models.markers.CircleY`
* :class:`~bokeh.models.markers.CircleX`
* :class:`~bokeh.models.markers.Cross`
* :class:`~bokeh.models.markers.Dash`
* :class:`~bokeh.models.markers.Diamond`
* :class:`~bokeh.models.markers.DiamondCross`
* :class:`~bokeh.models.markers.DiamondDot`
* :class:`~bokeh.models.markers.Dot`
* :class:`~bokeh.models.markers.Hex`
* :class:`~bokeh.models.markers.HexDot`
* :class:`~bokeh.models.markers.InvertedTriangle`
* :class:`~bokeh.models.markers.Plus`
* :class:`~bokeh.models.markers.Square`
* :class:`~bokeh.models.markers.SquareCross`
* :class:`~bokeh.models.markers.SquareDot`
* :class:`~bokeh.models.markers.SquarePin`
* :class:`~bokeh.models.markers.SquareX`
* :class:`~bokeh.models.markers.Triangle`
* :class:`~bokeh.models.markers.TriangleDot`
* :class:`~bokeh.models.markers.TrianglePin`
* :class:`~bokeh.models.markers.X`
* :class:`~bokeh.models.markers.Y`
Markers are all subclasses of ``Glyph``. Additionally, they all share the
same common interface providing fill and line properties provided by their
base class ``Marker``. Note that a few glyphs, ``Cross`` and ``X``, only
draw lines. For these the fill property values are ignored. Also note that
the ``Circle`` glyph has some additional properties such as ``radius`` that
other markers do not.
.. autoclass:: Marker
:members:
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import enumeration
from ..core.has_props import abstract
from ..core.properties import (
AngleSpec,
DistanceSpec,
Enum,
Include,
MarkerSpec,
NumberSpec,
ScreenDistanceSpec,
)
from ..core.property_mixins import FillProps, LineProps
from .glyph import FillGlyph, LineGlyph, XYGlyph
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Asterisk',
'Circle',
'CircleCross',
'CircleDot',
'CircleX',
'CircleY',
'Cross',
'Dash',
'Diamond',
'DiamondCross',
'DiamondDot',
'Dot',
'Hex',
'HexDot',
'InvertedTriangle',
'Marker',
'Plus',
'Scatter',
'Square',
'SquareCross',
'SquareDot',
'SquarePin',
'SquareX',
'Triangle',
'TriangleDot',
'TrianglePin',
'X',
'Y',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Marker(XYGlyph, LineGlyph, FillGlyph):
''' Base class for glyphs that are simple markers with line and
fill properties, located at an (x, y) location with a specified
size.
.. note::
For simplicity, all markers have both line and fill properties
declared, however some markers (`Asterisk`, `Cross`, `X`) only
draw lines. For these markers, the fill values are simply
ignored.
'''
# a canonical order for positional args that can be used for any
# functions derived from this class
_args = ('x', 'y', 'size', 'angle')
x = NumberSpec(help="""
The x-axis coordinates for the center of the markers.
""")
y = NumberSpec(help="""
The y-axis coordinates for the center of the markers.
""")
size = ScreenDistanceSpec(default=4, help="""
The size (diameter) values for the markers in screen space units.
""")
angle = AngleSpec(default=0.0, help="""
The angles to rotate the markers.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the markers.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the markers.
""")
class Scatter(Marker):
''' Render arbitrary markers according a specification.
The Scatter can draw any built-in marker type. It can be configured
to draw the same marker for all values by specifying the name of a
marker, e.g.
.. code-block:: python
glyph = Scatter(x="x", y="y", size="sizes", marker="square")
plot.add_glyph(source, glyph)
will render only Square markers for all points. Alternatively, the
Scatter marker can be configured to use marker types specified in a
data source column:
.. code-block:: python
# source.data['markers'] = ["circle", "square", "circle", ... ]
glyph = Scatter(x="x", y="y", size="sizes", marker="markers")
plot.add_glyph(source, glyph)
Note that circles drawn with `Scatter` conform to the standard Marker
interface, and can only vary by size (in screen units) and *not* by radius
(in data units). If you need to control circles by radius in data units,
you should use the Circle glyph directly.
'''
# a canonical order for positional args that can be used for any
# functions derived from this class
_args = ('x', 'y', 'size', 'angle', 'marker')
marker = MarkerSpec(default="circle", help="""
Which marker to render. This can be the name of any built in marker,
e.g. "circle", or a reference to a data column containing such names.
""")
__example__ = "examples/reference/models/Scatter.py"
class Asterisk(Marker):
''' Render asterisk '*' markers. '''
__example__ = "examples/reference/models/Asterisk.py"
class Circle(Marker):
''' Render circle markers. '''
__example__ = "examples/reference/models/Circle.py"
# a canonical order for positional args that can be used for any
# functions derived from this class
_args = ('x', 'y')
radius = DistanceSpec(None, help="""
The radius values for circle markers (in "data space" units, by default).
.. note::
Circle markers are slightly unusual in that they support specifying
a radius in addition to a size. Only one of ``radius`` or ``size``
should be given.
.. warning::
Note that ``Circle`` glyphs are always drawn as circles on the screen,
even in cases where the data space aspect ratio is not 1-1. In all
cases where radius values are specified, the "distance" for the radius
is measured along the dimension specified by ``radius_dimension``. If
the aspect ratio is very large or small, the drawn circles may appear
much larger or smaller than expected. See :bokeh-issue:`626` for more
information.
""")
radius_dimension = Enum(enumeration('x', 'y', 'max', 'min'), help="""
What dimension to measure circle radii along.
When the data space aspect ratio is not 1-1, then the size of the drawn
circles depends on what direction is used to measure the "distance" of
the radius. This property allows that direction to be controlled.
Setting this dimension to 'max' will calculate the radius on both the x
and y dimensions and use the maximum of the two, 'min' selects the minimum.
""")
class CircleCross(Marker):
''' Render circle markers with a '+' cross through the center. '''
__example__ = "examples/reference/models/CircleCross.py"
class CircleDot(Marker):
''' Render circle markers with center dots. '''
__example__ = "examples/reference/models/CircleDot.py"
class CircleX(Marker):
''' Render circle markers with an 'X' cross through the center. '''
__example__ = "examples/reference/models/CircleX.py"
class CircleY(Marker):
''' Render circle markers with an 'Y' cross through the center. '''
__example__ = "examples/reference/models/CircleY.py"
class Cross(Marker):
''' Render '+' cross markers. '''
__example__ = "examples/reference/models/Cross.py"
class Dash(Marker):
''' Render dash markers. Use ``angle`` to rotate and create vertically
oriented short lines.
'''
__example__ = "examples/reference/models/Dash.py"
class Diamond(Marker):
''' Render diamond markers. '''
__example__ = "examples/reference/models/Diamond.py"
class DiamondCross(Marker):
''' Render diamond markers with a '+' cross through the center. '''
__example__ = "examples/reference/models/DiamondCross.py"
class DiamondDot(Marker):
''' Render diamond markers with center dots. '''
__example__ = "examples/reference/models/DiamondDot.py"
class Dot(Marker):
''' Render dots (one-quarter radius circles). '''
__example__ = "examples/reference/models/Dot.py"
class Hex(Marker):
''' Render hexagon markers. '''
__example__ = "examples/reference/models/Hex.py"
class HexDot(Marker):
''' Render hexagon markers with center dots. '''
__example__ = "examples/reference/models/HexDot.py"
class InvertedTriangle(Marker):
''' Render upside-down triangle markers. '''
__example__ = "examples/reference/models/InvertedTriangle.py"
class Plus(Marker):
''' Render filled plus markers '''
__example__ = "examples/reference/models/Plus.py"
class Square(Marker):
''' Render square markers. '''
__example__ = "examples/reference/models/Square.py"
class SquareDot(Marker):
''' Render square markers with center dots. '''
__example__ = "examples/reference/models/SquareDot.py"
class SquarePin(Marker):
''' Render pin-cushion square markers. '''
__example__ = "examples/reference/models/SquarePin.py"
class SquareCross(Marker):
''' Render square markers with a '+' cross through the center. '''
__example__ = "examples/reference/models/SquareCross.py"
class SquareX(Marker):
''' Render square markers with an 'X' cross through the center. '''
__example__ = "examples/reference/models/SquareX.py"
class Triangle(Marker):
''' Render triangle markers. '''
__example__ = "examples/reference/models/Triangle.py"
class TriangleDot(Marker):
''' Render triangle markers with center dots. '''
__example__ = "examples/reference/models/TriangleDot.py"
class TrianglePin(Marker):
''' Render pin-cushion triangle markers. '''
__example__ = "examples/reference/models/TrianglePin.py"
class X(Marker):
''' Render 'X' markers. '''
__example__ = "examples/reference/models/X.py"
class Y(Marker):
''' Render 'Y' markers. '''
__example__ = "examples/reference/models/Y.py"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
marker_types = {
"asterisk": Asterisk,
"circle": Circle,
"circle_cross": CircleCross,
"circle_dot": CircleDot,
"circle_x": CircleX,
"circle_y": CircleY,
"cross": Cross,
"dash": Dash,
"diamond": Diamond,
"diamond_cross": DiamondCross,
"diamond_dot": DiamondDot,
"dot": Dot,
"hex": Hex,
"hex_dot": HexDot,
"inverted_triangle": InvertedTriangle,
"plus": Plus,
"square": Square,
"square_cross": SquareCross,
"square_dot": SquareDot,
"square_pin": SquarePin,
"square_x": SquareX,
"triangle": Triangle,
"triangle_dot": TriangleDot,
"triangle_pin": TrianglePin,
"x": X,
"y": Y,
}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "dc45490446003250f8cc4b129896fed9",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 79,
"avg_line_length": 30.55919395465995,
"alnum_prop": 0.6027035938015166,
"repo_name": "ericmjl/bokeh",
"id": "d85eb566456ea645c849b133e1ea244e9df683a3",
"size": "12463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/models/markers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
from parcels import FieldSet, GridCode
import numpy as np
import math
try:
from pympler import asizeof
except:
asizeof = None
from os import path
import xarray as xr
try:
from parcels.tools import perlin2d as PERLIN
except:
PERLIN = None
noctaves = 4
perlinres = (32, 8)
shapescale = (1, 1)
perlin_persistence = 0.3
scalefac = 2.0
def generate_testfieldset(xdim, ydim, zdim, tdim):
lon = np.linspace(0., 2., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.linspace(0., 0.5, zdim, dtype=np.float32)
time = np.linspace(0., tdim, tdim, dtype=np.float64)
U = np.ones((xdim, ydim, zdim, tdim), dtype=np.float32)
V = np.zeros((xdim, ydim, zdim, tdim), dtype=np.float32)
P = 2.*np.ones((xdim, ydim, zdim, tdim), dtype=np.float32)
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat, 'depth': depth, 'time': time}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
fieldset.write('testfields')
def generate_perlin_testfield():
img_shape = (int(math.pow(2, noctaves)) * perlinres[0] * shapescale[0], int(math.pow(2, noctaves)) * perlinres[1] * shapescale[1])
# Coordinates of the test fieldset (on A-grid in deg)
lon = np.linspace(-180.0, 180.0, img_shape[0], dtype=np.float32)
lat = np.linspace(-90.0, 90.0, img_shape[1], dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
time = np.array(time) if not isinstance(time, np.ndarray) else time
# Define arrays U (zonal), V (meridional), W (vertical) and P (sea
# surface height) all on A-grid
if PERLIN is not None:
U = PERLIN.generate_fractal_noise_2d(img_shape, perlinres, noctaves, perlin_persistence) * scalefac
V = PERLIN.generate_fractal_noise_2d(img_shape, perlinres, noctaves, perlin_persistence) * scalefac
else:
U = np.ones(img_shape, dtype=np.float32)*scalefac
V = np.ones(img_shape, dtype=np.float32)*scalefac
U = np.transpose(U, (1, 0))
U = np.expand_dims(U, 0)
V = np.transpose(V, (1, 0))
V = np.expand_dims(V, 0)
data = {'U': U, 'V': V}
dimensions = {'time': time, 'lon': lon, 'lat': lat}
if asizeof is not None:
print("Perlin U-field requires {} bytes of memory.".format(U.size * U.itemsize))
print("Perlin V-field requires {} bytes of memory.".format(V.size * V.itemsize))
fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=False)
# fieldset.write("perlinfields") # can also be used, but then has a ghost depth dimension
write_simple_2Dt(fieldset.U, path.join(path.dirname(__file__), 'perlinfields'), varname='vozocrtx')
write_simple_2Dt(fieldset.V, path.join(path.dirname(__file__), 'perlinfields'), varname='vomecrty')
def write_simple_2Dt(field, filename, varname=None):
"""Write a :class:`Field` to a netcdf file
:param filename: Basename of the file
:param varname: Name of the field, to be appended to the filename"""
filepath = str('%s%s.nc' % (filename, field.name))
if varname is None:
varname = field.name
# Create DataArray objects for file I/O
if field.grid.gtype == GridCode.RectilinearZGrid:
nav_lon = xr.DataArray(field.grid.lon + np.zeros((field.grid.ydim, field.grid.xdim), dtype=np.float32),
coords=[('y', field.grid.lat), ('x', field.grid.lon)])
nav_lat = xr.DataArray(field.grid.lat.reshape(field.grid.ydim, 1) + np.zeros(field.grid.xdim, dtype=np.float32),
coords=[('y', field.grid.lat), ('x', field.grid.lon)])
elif field.grid.gtype == GridCode.CurvilinearZGrid:
nav_lon = xr.DataArray(field.grid.lon, coords=[('y', range(field.grid.ydim)), ('x', range(field.grid.xdim))])
nav_lat = xr.DataArray(field.grid.lat, coords=[('y', range(field.grid.ydim)), ('x', range(field.grid.xdim))])
else:
raise NotImplementedError('Field.write only implemented for RectilinearZGrid and CurvilinearZGrid')
attrs = {'units': 'seconds since ' + str(field.grid.time_origin)} if field.grid.time_origin.calendar else {}
time_counter = xr.DataArray(field.grid.time,
dims=['time_counter'],
attrs=attrs)
vardata = xr.DataArray(field.data.reshape((field.grid.tdim, field.grid.ydim, field.grid.xdim)),
dims=['time_counter', 'y', 'x'])
# Create xarray Dataset and output to netCDF format
attrs = {'parcels_mesh': field.grid.mesh}
dset = xr.Dataset({varname: vardata}, coords={'nav_lon': nav_lon,
'nav_lat': nav_lat,
'time_counter': time_counter}, attrs=attrs)
dset.to_netcdf(filepath)
if asizeof is not None:
mem = 0
mem += asizeof.asizeof(field)
mem += asizeof.asizeof(field.data[:])
mem += asizeof.asizeof(field.grid)
mem += asizeof.asizeof(vardata)
mem += asizeof.asizeof(nav_lat)
mem += asizeof.asizeof(nav_lon)
mem += asizeof.asizeof(time_counter)
print("Field '{}' requires {} bytes of memory.".format(field.name, mem))
if __name__ == "__main__":
generate_testfieldset(xdim=5, ydim=3, zdim=2, tdim=15)
generate_perlin_testfield()
|
{
"content_hash": "cfbccb9d4879c1fe03c975db5d4e792a",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 134,
"avg_line_length": 46.530434782608694,
"alnum_prop": 0.6236217529433751,
"repo_name": "OceanPARCELS/parcels",
"id": "efd0dc4bfd1d9c6b3e9864ad35f16e5912dc9388",
"size": "5351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_data/create_testfields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10024"
},
{
"name": "Python",
"bytes": "201184"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import all_transformers
def best_quality_properties(props, *guesses):
"""Retrieve the best quality guess, based on given properties
:param props: Properties to include in the rating
:type props: list of strings
:param guesses: Guesses to rate
:type guesses: :class:`guessit.guess.Guess`
:return: Best quality guess from all passed guesses
:rtype: :class:`guessit.guess.Guess`
"""
best_guess = None
best_rate = None
for guess in guesses:
for transformer in all_transformers():
rate = transformer.rate_quality(guess, *props)
if best_rate is None or best_rate < rate:
best_rate = rate
best_guess = guess
return best_guess
def best_quality(*guesses):
"""Retrieve the best quality guess.
:param guesses: Guesses to rate
:type guesses: :class:`guessit.guess.Guess`
:return: Best quality guess from all passed guesses
:rtype: :class:`guessit.guess.Guess`
"""
best_guess = None
best_rate = None
for guess in guesses:
for transformer in all_transformers():
rate = transformer.rate_quality(guess)
if best_rate is None or best_rate < rate:
best_rate = rate
best_guess = guess
return best_guess
|
{
"content_hash": "a2cf6ba0a6529e6e2f7bb95e2dfbc296",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6463157894736842,
"repo_name": "Kallehz/Python",
"id": "870bbdbb477872c7f81fcf962a57ce0df76fe561",
"size": "2245",
"binary": false,
"copies": "51",
"ref": "refs/heads/master",
"path": "Verkefni 4/guessit/quality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "267463"
},
{
"name": "Python",
"bytes": "475606"
},
{
"name": "Tcl",
"bytes": "3559"
}
],
"symlink_target": ""
}
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
{
"content_hash": "8f43111fd39e0e7e47c0cab3b99aed4f",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 79,
"avg_line_length": 38.491002570694086,
"alnum_prop": 0.5862552594670407,
"repo_name": "costypetrisor/scikit-learn",
"id": "bf5b1e9e2562f47a46ae42ca23ee370578996efa",
"size": "14973",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/partial_dependence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1370"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5841302"
},
{
"name": "Shell",
"bytes": "3952"
}
],
"symlink_target": ""
}
|
from .xcbuildsystem import *
from .xcbuildrule import *
from .xcspec_helper import *
from .xcplatform import *
from .xcsdk import *
from .xccompiler import *
from .swiftcompiler import *
from .clangcompiler import *
from .xclinker import *
from .XCSpec.xcspec import *
from .LangSpec.langspec import *
|
{
"content_hash": "81cd9168979ebd6f233a63b5ce9e5c74",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 32,
"avg_line_length": 19.125,
"alnum_prop": 0.761437908496732,
"repo_name": "samdmarshall/xcparse",
"id": "d1c8d4ea52dfe3a8cc3616de419fc2fc9d4f1e6f",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "xcparse/Xcode/BuildSystem/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "260"
},
{
"name": "C++",
"bytes": "270"
},
{
"name": "Objective-C",
"bytes": "1783"
},
{
"name": "Objective-C++",
"bytes": "309"
},
{
"name": "Python",
"bytes": "228296"
},
{
"name": "Shell",
"bytes": "42"
},
{
"name": "Swift",
"bytes": "945"
}
],
"symlink_target": ""
}
|
import re
import os
import os.path
import sys
import traceback
import IECore
## This function provides an easy means of providing a flexible configuration
# mechanism for any software. It works by executing all .py files found on
# a series of searchpaths. It is expected that these files will then make appropriate
# calls to objects passed in via the specified contextDict.
# \ingroup python
def loadConfig( searchPaths, contextDict, raiseExceptions = False ) :
paths = searchPaths.paths
paths.reverse()
for path in paths :
# \todo Perhaps filter out filenames that begin with "~", also? This would
# exclude certain types of auto-generated backup files.
pyExtTest = re.compile( "^[^~].*\.py$" )
for dirPath, dirNames, fileNames in os.walk( path ) :
for fileName in filter( pyExtTest.search, fileNames ) :
fullFileName = os.path.join( dirPath, fileName )
if raiseExceptions:
execfile( fullFileName, contextDict, contextDict )
else:
try :
execfile( fullFileName, contextDict, contextDict )
except Exception, m :
stacktrace = traceback.format_exc()
IECore.msg( IECore.Msg.Level.Error, "IECore.loadConfig", "Error executing file \"%s\" - \"%s\".\n %s" % ( fullFileName, m, stacktrace ) )
loadConfig( IECore.SearchPath( os.environ.get( "IECORE_CONFIG_PATHS", "" ), ":" ), { "IECore" : IECore } )
|
{
"content_hash": "70b288beb4de7048914569f7d30c703e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 143,
"avg_line_length": 41.09090909090909,
"alnum_prop": 0.7116519174041298,
"repo_name": "tectronics/cortex-vfx",
"id": "64a770b069c47a34053cc7eb27c065e5ce0393e8",
"size": "3145",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python/IECore/ConfigLoader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "65905"
},
{
"name": "C++",
"bytes": "10534690"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31102"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "1859"
},
{
"name": "Python",
"bytes": "4463622"
},
{
"name": "Slash",
"bytes": "7896"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
from pcaspy import Driver, SimpleServer, Alarm, Severity
prefix = 'MTEST:'
pvdb = {
'RAND' : {
'prec' : 3,
'low' : -5, 'high': 5,
'lolo': -10,'hihi': 10,
},
'STATUS' : {
'type' : 'enum',
'enums': ['OK', 'ERROR'],
'states': [Severity.NO_ALARM, Severity.MAJOR_ALARM]
},
'MSG' : {
'type' : 'str',
}
}
class myDriver(Driver):
def __init__(self):
super(myDriver, self).__init__()
def write(self, reason, value):
status = False
if reason == 'MSG':
status = True
# store the value and this also resets alarm status and severity for string type
self.setParam(reason, value)
# set alarm status and severity
if value != '':
self.setParamStatus(reason, Alarm.COMM_ALARM, Severity.MINOR_ALARM)
else:
self.setParamStatus(reason, Alarm.NO_ALARM, Severity.NO_ALARM)
else:
status = True
# store the value and this also resets alarm status and severity for string type
self.setParam(reason, value)
return status
if __name__ == '__main__':
server = SimpleServer()
server.createPV(prefix, pvdb)
driver = myDriver()
# process CA transactions
while True:
server.process(0.1)
|
{
"content_hash": "6642d5cf1f37fb4a82818e9481a56046",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 92,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.5368188512518409,
"repo_name": "HaveF/pcaspy",
"id": "dbcc36d12400c4f4c2b7580c6150179404f97041",
"size": "1381",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "example/alarm_severity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "143"
},
{
"name": "C++",
"bytes": "7975"
},
{
"name": "Python",
"bytes": "47405"
}
],
"symlink_target": ""
}
|
"""
Verifies builds are the same even with different PYTHONHASHSEEDs.
Tests both solibs and implicit_deps.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'ninja':
os.environ["PYTHONHASHSEED"] = "1"
test.run_gyp('empty-targets.gyp')
base = open(test.built_file_path('build.ninja')).read()
for i in range(1,5):
os.environ["PYTHONHASHSEED"] = str(i)
test.run_gyp('empty-targets.gyp')
contents = open(test.built_file_path('build.ninja')).read()
if base != contents:
test.fail_test()
del os.environ["PYTHONHASHSEED"]
test.pass_test()
|
{
"content_hash": "004af8ae2722c340d8c048d023b7b86f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 25.125,
"alnum_prop": 0.681592039800995,
"repo_name": "ayoubserti/winpty",
"id": "cf49f5008484b7270cef06bd1aaa7af7ea8f19d9",
"size": "783",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "build-gyp/test/determinism/gyptest-empty-target-names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1133"
},
{
"name": "Batchfile",
"bytes": "5543"
},
{
"name": "C",
"bytes": "71768"
},
{
"name": "C++",
"bytes": "598953"
},
{
"name": "Emacs Lisp",
"bytes": "14357"
},
{
"name": "Makefile",
"bytes": "14202"
},
{
"name": "Objective-C",
"bytes": "17182"
},
{
"name": "Objective-C++",
"bytes": "1873"
},
{
"name": "PowerShell",
"bytes": "5013"
},
{
"name": "Python",
"bytes": "2294940"
},
{
"name": "Shell",
"bytes": "24228"
},
{
"name": "Swift",
"bytes": "116"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import ast
import pycodestyle
from collections import namedtuple
from functools import partial
class SentryVisitor(ast.NodeVisitor):
NODE_WINDOW_SIZE = 4
def __init__(self, filename, lines):
self.errors = []
self.filename = filename
self.lines = lines
self.has_absolute_import = False
self.node_stack = []
self.node_window = []
def finish(self):
if not self.has_absolute_import:
self.errors.append(
B003(1, 1),
)
def visit(self, node):
self.node_stack.append(node)
self.node_window.append(node)
self.node_window = self.node_window[-self.NODE_WINDOW_SIZE:]
super(SentryVisitor, self).visit(node)
self.node_stack.pop()
def visit_ExceptHandler(self, node):
if node.type is None:
self.errors.append(B001(node.lineno, node.col_offset))
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.module in B307.names:
self.errors.append(B307(node.lineno, node.col_offset))
if node.module == '__future__':
for nameproxy in node.names:
if nameproxy.name == 'absolute_import':
self.has_absolute_import = True
break
def visit_Import(self, node):
for alias in node.names:
if alias.name.split('.', 1)[0] in B307.names:
self.errors.append(B307(node.lineno, node.col_offset))
def visit_Call(self, node):
if isinstance(node.func, ast.Attribute):
for bug in (B301, B302, B305):
if node.func.attr in bug.methods:
call_path = '.'.join(self.compose_call_path(node.func.value))
if call_path not in bug.valid_paths:
self.errors.append(bug(node.lineno, node.col_offset))
break
for bug in (B312, ):
if node.func.attr in bug.methods:
call_path = '.'.join(self.compose_call_path(node.func.value))
if call_path in bug.invalid_paths:
self.errors.append(bug(node.lineno, node.col_offset))
break
self.generic_visit(node)
def visit_Attribute(self, node):
call_path = list(self.compose_call_path(node))
if '.'.join(call_path) == 'sys.maxint':
self.errors.append(B304(node.lineno, node.col_offset))
elif len(call_path) == 2 and call_path[1] == 'message':
name = call_path[0]
for elem in reversed(self.node_stack[:-1]):
if isinstance(elem, ast.ExceptHandler) and elem.name == name:
self.errors.append(B306(node.lineno, node.col_offset))
break
if node.attr in B101.methods:
self.errors.append(
B101(
message="B101: Avoid using the {} mock call as it is "
"confusing and prone to causing invalid test "
"behavior.".format(node.attr),
lineno=node.lineno,
col=node.col_offset,
),
)
def visit_Assign(self, node):
# TODO(dcramer): pretty sure these aren't working correctly on Python2
if isinstance(self.node_stack[-2], ast.ClassDef):
# note: by hasattr belowe we're ignoring starred arguments, slices
# and tuples for simplicity.
assign_targets = {t.id for t in node.targets if hasattr(t, 'id')}
if '__metaclass__' in assign_targets:
self.errors.append(B303(node.lineno, node.col_offset))
if '__unicode__' in assign_targets:
self.errors.append(B313(node.lineno, node.col_offset))
self.generic_visit(node)
def visit_Name(self, node):
for bug in (B308, B309, B310, B311):
if node.id in bug.names:
self.errors.append(
bug(
lineno=node.lineno,
col=node.col_offset,
),
)
if node.id == 'print':
self.check_print(node)
def visit_Print(self, node):
self.check_print(node)
def check_print(self, node):
if not self.filename.startswith('tests/'):
self.errors.append(B314(lineno=node.lineno, col=node.col_offset))
def compose_call_path(self, node):
if isinstance(node, ast.Attribute):
for item in self.compose_call_path(node.value):
yield item
yield node.attr
elif isinstance(node, ast.Name):
yield node.id
class SentryCheck(object):
name = 'sentry-checker'
def __init__(self, tree, filename=None, lines=None):
self.tree = tree
self.filename = filename
self.lines = lines
self.visitor = SentryVisitor
def run(self):
if not self.tree or not self.lines:
self.load_file()
visitor = self.visitor(
filename=self.filename,
lines=self.lines,
)
visitor.visit(self.tree)
visitor.finish()
for e in visitor.errors:
try:
if pycodestyle.noqa(self.lines[e.lineno - 1]):
continue
except IndexError:
pass
yield e
def load_file(self):
"""
Loads the file in a way that auto-detects source encoding and deals
with broken terminal encodings for stdin.
Stolen from flake8_import_order because it's good.
"""
if self.filename in ("stdin", "-", None):
self.filename = "stdin"
self.lines = pycodestyle.stdin_get_value().splitlines(True)
else:
self.lines = pycodestyle.readlines(self.filename)
if not self.tree:
self.tree = ast.parse("".join(self.lines))
# def run(self):
# visitor = Py2to3Visitor()
# visitor.visit(self.tree)
# for code, lineno, name in visitor.errors:
# yield lineno, 0, self.codes[code], type(self)
error = namedtuple('error', 'lineno col message type')
B001 = partial(
error,
message="B001: Do not use bare `except:`, it also catches unexpected "
"events like memory errors, interrupts, system exit, and so on. "
"Prefer `except Exception:`. If you're sure what you're doing, "
"be explicit and write `except BaseException:`.",
type=SentryCheck,
)
B002 = partial(
error,
message="B002: Python does not support the unary prefix increment. Writing "
"++n is equivalent to +(+(n)), which equals n. You meant n += 1.",
type=SentryCheck,
)
B003 = partial(
error,
message="B003: Missing `from __future__ import absolute_import`",
type=SentryCheck,
)
B101 = partial(error, type=SentryCheck)
B101.methods = {
'assert_calls', 'assert_not_called', 'assert_called', 'assert_called_once', 'not_called',
'called_once', 'called_once_with'
}
# Those could be false positives but it's more dangerous to let them slip
# through if they're not.
B301 = partial(
error,
message="B301: Python 3 does not include .iter* methods on dictionaries. "
"Use `six.iter*` or `future.utils.iter*` instead.",
type=SentryCheck,
)
B301.methods = {'iterkeys', 'itervalues', 'iteritems', 'iterlists'}
B301.valid_paths = {'six', 'future.utils', 'builtins'}
B302 = partial(
error,
message="B302: Python 3 does not include .view* methods on dictionaries. "
"Remove the ``view`` prefix from the method name. Use `six.view*` "
"or `future.utils.view*` instead.",
type=SentryCheck,
)
B302.methods = {'viewkeys', 'viewvalues', 'viewitems', 'viewlists'}
B302.valid_paths = {'six', 'future.utils', 'builtins'}
B303 = partial(
error,
message="B303: __metaclass__ does not exist in Python 3. Use "
"use `@six.add_metaclass()` instead.",
type=SentryCheck,
)
B304 = partial(
error,
message="B304: sys.maxint does not exist in Python 3. Use `sys.maxsize`.",
type=SentryCheck,
)
B305 = partial(
error,
message="B305: .next() does not exist in Python 3. Use ``six.next()`` "
"instead.",
type=SentryCheck,
)
B305.methods = {'next'}
B305.valid_paths = {'six', 'future.utils', 'builtins'}
B306 = partial(
error,
message="B306: ``BaseException.message`` has been deprecated as of Python "
"2.6 and is removed in Python 3. Use ``str(e)`` to access the "
"user-readable message. Use ``e.args`` to access arguments passed "
"to the exception.",
type=SentryCheck,
)
B307 = partial(
error,
message="B307: Python 3 has combined urllib, urllib2, and urlparse into "
"a single library. For Python 2 compatibility, utilize the "
"six.moves.urllib module.",
type=SentryCheck
)
B307.names = {'urllib', 'urlib2', 'urlparse'}
B308 = partial(
error,
message="B308: The usage of ``str`` differs between Python 2 and 3. Use "
"``six.binary_type`` instead.",
type=SentryCheck,
)
B308.names = {'str'}
B309 = partial(
error,
message="B309: ``unicode`` does not exist in Python 3. Use "
"``six.text_type`` instead.",
type=SentryCheck,
)
B309.names = {'unicode'}
B310 = partial(
error,
message="B310: ``basestring`` does not exist in Python 3. Use "
"``six.string_types`` instead.",
type=SentryCheck,
)
B310.names = {'basestring'}
B311 = partial(
error,
message="B311: ``long`` should not be used. Use int instead, and allow "
"Python to deal with handling large integers.",
type=SentryCheck,
)
B311.names = {'long'}
B312 = partial(
error,
message="B312: ``cgi.escape`` and ``html.escape`` should not be used. Use "
"sentry.utils.html.escape instead.",
type=SentryCheck,
)
B312.methods = {'escape'}
B312.invalid_paths = {'cgi', 'html'}
B313 = partial(
error,
message="B313: ``__unicode__`` should not be defined on classes. Define "
"just ``__str__`` returning a unicode text string, and use the "
"sentry.utils.compat.implements_to_string class decorator.",
type=SentryCheck,
)
B314 = partial(
error,
message="B314: print functions or statements are not allowed.",
type=SentryCheck,
)
|
{
"content_hash": "c5f3e42fa375cd91a7856db6f001bf92",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 93,
"avg_line_length": 31.51063829787234,
"alnum_prop": 0.5882125976656699,
"repo_name": "gencer/sentry",
"id": "229add3206070de8d8b092e287960eb4109627a0",
"size": "11526",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/lint/sentry_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
import django.db.models.deletion
from django.db import migrations, models
UNFULFILLED = "unfulfilled"
PARTIALLY_FULFILLED = "partially fulfilled"
def create_allocation(
product_variant, warehouse, order_line, quantity_allocated, Allocation
):
stock = product_variant.stocks.get(warehouse=warehouse)
Allocation.objects.create(
order_line=order_line, stock=stock, quantity_allocated=quantity_allocated
)
def create_allocations(apps, schema_editor):
Allocation = apps.get_model("warehouse", "Allocation")
OrderLine = apps.get_model("order", "OrderLine")
Warehouse = apps.get_model("warehouse", "Warehouse")
for warehouse in Warehouse.objects.iterator():
shipping_zone = warehouse.shipping_zones.first()
if not shipping_zone:
continue
shipping_zone_pk = shipping_zone.pk
for order_line in OrderLine.objects.filter(
order__shipping_method__shipping_zone__pk=shipping_zone_pk,
order__status__in=[UNFULFILLED, PARTIALLY_FULFILLED],
).iterator():
quantity_unfulfilled = order_line.quantity - order_line.quantity_fulfilled
if quantity_unfulfilled > 0 and order_line.variant:
create_allocation(
order_line.variant,
warehouse,
order_line,
quantity_unfulfilled,
Allocation,
)
class Migration(migrations.Migration):
dependencies = [
("order", "0081_auto_20200406_0456"),
("warehouse", "0006_auto_20200228_0519"),
]
operations = [
migrations.AlterModelOptions(
name="stock",
options={"ordering": ("pk",)},
),
migrations.CreateModel(
name="Allocation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("quantity_allocated", models.PositiveIntegerField(default=0)),
(
"order_line",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="allocations",
to="order.OrderLine",
),
),
(
"stock",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="allocations",
to="warehouse.Stock",
),
),
],
options={
"ordering": ("pk",),
"unique_together": {("order_line", "stock")},
},
),
migrations.RunPython(create_allocations),
migrations.RemoveField(
model_name="stock",
name="quantity_allocated",
),
]
|
{
"content_hash": "579759b6d0601e5a359e3ca793f4016b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 86,
"avg_line_length": 33.634408602150536,
"alnum_prop": 0.5067135549872123,
"repo_name": "mociepka/saleor",
"id": "b4d41551974a4fb65733e3a95638977238e01cb3",
"size": "3177",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/warehouse/migrations/0007_auto_20200406_0341.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
import sys, re
print >>sys.stderr, "Loading Go Runtime support."
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
ptr = self.val["array"]
for idx in range(self.val["len"]):
yield ('[%d]' % idx, (ptr + idx).dereference())
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hash<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
stab = self.val['st']
i = 0
for v in self.traverse_hash(stab):
yield ("[%d]" % i, v['key'])
yield ("[%d]" % (i + 1), v['val'])
i += 2
def traverse_hash(self, stab):
ptr = stab['entry'].address
end = stab['end']
while ptr < end:
v = ptr.dereference()
ptr = ptr + 1
if v['hash'] == 0: continue
if v['hash'] & 63 == 63: # subtable
for v in self.traverse_hash(v['key'].cast(self.val['st'].type)):
yield v
else:
yield v
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf()
et = [x.type for x in self.val['free'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[%d]' % i, (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.commonType'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.commonType'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" \
and str(val['data'].type) == "void *"
except:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" \
and str(val['data'].type) == "void *"
except:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except:
pass
try:
return gdb.lookup_type('struct ' + name)
except:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except:
pass
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
ct = gdb.lookup_type("struct runtime.commonType").pointer()
dynamic_go_type = go_type_ptr['ptr'].cast(ct).dereference()
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
dynamic_gdb_type = lookup_type(dtype_name)
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except:
return "<bad dynamic type>"
try:
return self.val['data'].cast(dtype).dereference()
except:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len' ),
(SliceTypePrinter, 'len'),
(MapTypePrinter, 'count'),
(ChanTypePrinter, 'qcount'))
def __init__(self):
super(GoLenFunc, self).__init__("len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'),
(ChanTypePrinter, 'dataqsiz'))
def __init__(self):
super(GoCapFunc, self).__init__("cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
super(DTypeFunc, self).__init__("dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except:
pass
return obj
#
# Commands
#
sts = ( 'idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
super(GoroutinesCmd, self).__init__("info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
sp = ptr['sched']['sp'].cast(vp)
blk = gdb.block_for_pc(long((pc)))
print s, ptr['goid'], "%8s" % sts[long((ptr['status']))], blk.function
def find_goroutine(goid):
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
return [ptr['sched'][x].cast(vp) for x in 'pc', 'sp']
return None, None
class GoroutineCmd(gdb.Command):
"""Execute gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
super(GoroutineCmd, self).__init__("goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
goid, cmd = arg.split(None, 1)
pc, sp = find_goroutine(int(goid))
if not pc:
print "No such goroutine: ", goid
return
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$pc = 0x%x' % long(pc))
gdb.parse_and_eval('$sp = 0x%x' % long(sp))
try:
gdb.execute(cmd)
finally:
gdb.parse_and_eval('$pc = $save_pc')
gdb.parse_and_eval('$sp = $save_sp')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
super(GoIfaceCmd, self).__init__("iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval("%s" % obj)
except Exception, e:
print "Can't parse ", obj, ": ", e
continue
dtype = iface_dtype(obj)
if not dtype:
print "Not an interface: ", obj.type
continue
print "%s: %s" % (obj.type, dtype)
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at itab.inter->numMethods
#i am sure i have the names wrong but look at the interface type and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
for k in vars().values():
if hasattr(k, 'invoke'):
k()
|
{
"content_hash": "05aa69179de02c6c73223a00286e021b",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 100,
"avg_line_length": 24.904040404040405,
"alnum_prop": 0.6489555871020077,
"repo_name": "niemeyer/golang",
"id": "a96f3f3828c32e6a84991225bb0310a6fd613cde",
"size": "10019",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/pkg/runtime/runtime-gdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "201537"
},
{
"name": "C",
"bytes": "3752019"
},
{
"name": "C++",
"bytes": "597"
},
{
"name": "D",
"bytes": "13711763"
},
{
"name": "Emacs Lisp",
"bytes": "21395"
},
{
"name": "Go",
"bytes": "11717609"
},
{
"name": "JavaScript",
"bytes": "81714"
},
{
"name": "Perl",
"bytes": "23176"
},
{
"name": "Python",
"bytes": "154466"
},
{
"name": "Shell",
"bytes": "62986"
},
{
"name": "VimL",
"bytes": "20658"
}
],
"symlink_target": ""
}
|
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_nopassword'
copyright = u'2014, Rolf Erik Lekang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django_nopassworddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django_nopassword.tex', u'django\\_nopassword Documentation',
u'Rolf Erik Lekang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django_nopassword', u'django_nopassword Documentation',
[u'Rolf Erik Lekang'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django_nopassword', u'django_nopassword Documentation',
u'Rolf Erik Lekang', 'django_nopassword', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
{
"content_hash": "b7d1346797532c7aea0665dda77b9328",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 82,
"avg_line_length": 31.745689655172413,
"alnum_prop": 0.70020366598778,
"repo_name": "mjumbewu/django-nopassword",
"id": "be2c265fba6c2ed5a0452b443d27427349595e8b",
"size": "8423",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "949"
},
{
"name": "Python",
"bytes": "32592"
}
],
"symlink_target": ""
}
|
"""
Offer numeric state listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#numeric-state-trigger
"""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_VALUE_TEMPLATE, CONF_PLATFORM, CONF_ENTITY_ID,
CONF_BELOW, CONF_ABOVE)
from homeassistant.helpers.event import track_state_change
from homeassistant.helpers import condition, config_validation as cv
TRIGGER_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_PLATFORM): 'numeric_state',
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}), cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
_LOGGER = logging.getLogger(__name__)
def trigger(hass, config, action):
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
# pylint: disable=unused-argument
def state_automation_listener(entity, from_s, to_s):
"""Listen for state changes and calls action."""
if to_s is None:
return
variables = {
'trigger': {
'platform': 'numeric_state',
'entity_id': entity,
'below': below,
'above': above,
}
}
# If new one doesn't match, nothing to do
if not condition.numeric_state(
hass, to_s, below, above, value_template, variables):
return
# Only match if old didn't exist or existed but didn't match
# Written as: skip if old one did exist and matched
if from_s is not None and condition.numeric_state(
hass, from_s, below, above, value_template, variables):
return
variables['trigger']['from_state'] = from_s
variables['trigger']['to_state'] = to_s
action(variables)
track_state_change(
hass, entity_id, state_automation_listener)
return True
|
{
"content_hash": "3feef7044a3a396fa696c9d261a214e1",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 32.01449275362319,
"alnum_prop": 0.6414667270258035,
"repo_name": "emilhetty/home-assistant",
"id": "3a148b0880f04265c7ec82a0729705d4ef2bdcf5",
"size": "2209",
"binary": false,
"copies": "7",
"ref": "refs/heads/patch-3",
"path": "homeassistant/components/automation/numeric_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1307989"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2562500"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
import sys
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from accumulo import AccumuloProxy
from accumulo.ttypes import *
transport = TSocket.TSocket('localhost', 42424)
transport = TTransport.TFramedTransport(transport)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = AccumuloProxy.Client(protocol)
transport.open()
login = client.login('root', {'password':'secret'})
print client.listTables(login)
testtable = "pythontest"
if not client.tableExists(login, testtable):
client.createTable(login, testtable, True, TimeType.MILLIS)
row1 = {'a':[ColumnUpdate('a','a',value='value1'), ColumnUpdate('b','b',value='value2')]}
client.updateAndFlush(login, testtable, row1)
cookie = client.createScanner(login, testtable, None)
for entry in client.nextK(cookie, 10).results:
print entry
|
{
"content_hash": "83a1517acc6c5f20832ee974e36f9404",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 30.433333333333334,
"alnum_prop": 0.7831325301204819,
"repo_name": "phrocker/accumulo",
"id": "5509ded0946a16455eea8b570a96a71ba15d4ebf",
"size": "1717",
"binary": false,
"copies": "2",
"ref": "refs/heads/ACCUMULO-3709",
"path": "proxy/examples/python/TestClient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12217"
},
{
"name": "C++",
"bytes": "1297281"
},
{
"name": "CSS",
"bytes": "5889"
},
{
"name": "HTML",
"bytes": "6628"
},
{
"name": "Java",
"bytes": "14131538"
},
{
"name": "JavaScript",
"bytes": "249599"
},
{
"name": "Makefile",
"bytes": "3565"
},
{
"name": "Perl",
"bytes": "28190"
},
{
"name": "Python",
"bytes": "906539"
},
{
"name": "Ruby",
"bytes": "193322"
},
{
"name": "Shell",
"bytes": "194601"
},
{
"name": "Thrift",
"bytes": "46026"
}
],
"symlink_target": ""
}
|
"""Subclass of pnlExisting, which is generated by wxFormBuilder."""
import wx
from odmtools.view import clsExisting
from odmtools.odmdata import series
import wx.wizard as wiz
import datetime
# Implementing pnlExisting
#class pnlExisting(clsExisting.pnlExisting):
# def __init__(self, parent):
# clsExisting.pnlExisting.__init__(self, parent)
########################################################################
class pageExisting(wiz.WizardPageSimple):
def __init__(self, parent, title, series_service , site):
"""Constructor"""
wiz.WizardPageSimple.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer = sizer
self.SetSizer(sizer)
#self.series = series
title = wx.StaticText(self, -1, title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
sizer.Add(title, 10, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(wx.StaticLine(self, -1), 5, wx.EXPAND | wx.ALL, 5)
self.pnlExisting = clsExisting.pnlExisting(self) #, id=wxID_PNLEXISTING, name=u'pnlExisting',
#pos=wx.Point(536, 285), size=wx.Size(439, 357),
#style=wx.TAB_TRAVERSAL)#, sm = service_man, series = series)
self.sizer.Add(self.pnlExisting, 85, wx.ALL, 5)
self._init_data(series_service, site.id)
self.pnlExisting.olvSeriesList.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnOLVItemSelected)
self.pnlExisting.rbOverwrite.Bind(wx.EVT_RADIOBUTTON, self.onOverwrite)
self.pnlExisting.rbAppend.Bind(wx.EVT_RADIOBUTTON, self.onAppend)
def _init_data(self, series_serv, site_id):
index = 0
self.initTable(series_serv, site_id)
#if q.code == self.qcl.code:
# index = i
self.pnlExisting.olvSeriesList.Focus(index)
self.pnlExisting.olvSeriesList.Select(index)
# Handlers for pnlExisting events.
def OnOLVItemSelected(self, event):
# TODO: Implement OnOLVItemSelected
pass
def onOverwrite(self, event):
self.enableButtons(False)
# event.skip()
def onAppend(self, event):
self.enableButtons(True)
# event.Skip()
def enableButtons(self, isEnabled):
self.pnlExisting.rbNew.Enable(isEnabled)
self.pnlExisting.rbOriginal.Enable(isEnabled)
self.pnlExisting.lblOverlap.Enable(isEnabled)
def getSeries(self):
selectedObject = self.pnlExisting.olvSeriesList.GetSelectedObject()
return selectedObject.method, selectedObject.quality_control_level, selectedObject.variable, selectedObject.source
def initTable(self, dbservice, site_id):
"""Set up columns and objects to be used in the objectlistview to be visible in the series selector"""
seriesColumns = [clsExisting.ColumnDefn(key, align="left",
minimumWidth=-1, valueGetter=value,
stringConverter= '%Y-%m-%d %H:%M:%S' if 'date' in key.lower() else '%s')
for key, value in series.returnDict().iteritems()]
self.pnlExisting.olvSeriesList.SetColumns(seriesColumns)
objects = dbservice.get_series_by_site(site_id= site_id)
self.pnlExisting.olvSeriesList.SetObjects(objects)
|
{
"content_hash": "8567aa73362ee312e16855011268ed4d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 122,
"avg_line_length": 40,
"alnum_prop": 0.6387195121951219,
"repo_name": "ODM2/ODMToolsPython",
"id": "302bd5223b3eba295be1bb3c779dfa640022e682",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odmtools/controller/pageExisting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2742"
},
{
"name": "Inno Setup",
"bytes": "10064"
},
{
"name": "PLpgSQL",
"bytes": "50590441"
},
{
"name": "PowerShell",
"bytes": "7130"
},
{
"name": "Python",
"bytes": "1516957"
},
{
"name": "Shell",
"bytes": "5544"
}
],
"symlink_target": ""
}
|
from keystone import exception
from keystone import tests
from keystone.tests import test_backend
class RulesPolicy(tests.TestCase, test_backend.PolicyTests):
def setUp(self):
super(RulesPolicy, self).setUp()
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_rules.conf')])
self.load_backends()
def test_create(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_create)
def test_get(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_get)
def test_list(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_list)
def test_update(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_update)
def test_delete(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_delete)
def test_get_policy_404(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_get_policy_404)
def test_update_policy_404(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_update_policy_404)
def test_delete_policy_404(self):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_delete_policy_404)
|
{
"content_hash": "26878911267b109de50d6aa440beb508",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 36.44444444444444,
"alnum_prop": 0.6189024390243902,
"repo_name": "dsiddharth/access-keys",
"id": "124dca152da36784f1d52aef1d41d3292a93d261",
"size": "2227",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/test_backend_rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2619408"
},
{
"name": "Shell",
"bytes": "11206"
}
],
"symlink_target": ""
}
|
import csv
import sys
import pyodbc
import numpy as np
#TODO:Implementar coluna com id de cada linha de transmissao
dataini = "'"+str(sys.argv[1])
horaini = str(sys.argv[2])+"'"
datafim = "'"+str(sys.argv[3])
horafim = str(sys.argv[4])+"'"
cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=.\SQLExpress;DATABASE=DTS_Teste;UID=sa;PWD=Elipse21')
cursor = cnxn.cursor()
string = ' '
for i in np.arange(501)[1:501]:
if i == 500:
string = string + 'T' + str(i)
else:
string = string + 'T' + str(i) + ','
querystring = 'SELECT top 1 '+string+' FROM Hist_SupportTest1 WHERE E3TimeStamp >= CAST('+str(dataini)+' '+str(horaini)+' AS datetime) AND E3TimeStamp <= CAST('+str(datafim)+' '+str(horafim)+' AS datetime) '
cursor.execute(querystring)
rows = cursor.fetchall()
array = np.array(rows)
print array
print np.shape(array)
with open('Heatmap_Google.html', 'w') as html: #enter the output filename
html.write('''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Heatmaps</title>
<style>
html, body {
height: 100%;
margin: 0;
padding: 0;
}
#map {
height: 100%;
}
#floating-panel {
position: absolute;
top: 10px;
left: 25%;
z-index: 5;
background-color: #fff;
padding: 5px;
border: 1px solid #999;
text-align: center;
font-family: 'Roboto','sans-serif';
line-height: 30px;
padding-left: 10px;
}
#floating-panel {
background-color: #fff;
border: 1px solid #999;
left: 25%;
padding: 5px;
position: absolute;
top: 10px;
z-index: 5;
}
</style>
</head>
<body>
<div id="floating-panel">
<button onclick="toggleHeatmap()">Toggle Heatmap</button>
<button onclick="changeGradient()">Change gradient</button>
<button onclick="changeRadius()">Change radius</button>
<button onclick="changeOpacity()">Change opacity</button>
</div>
<div id="map"></div>
<script>
// This example requires the Visualization library. Include the libraries=visualization
// parameter when you first load the API. For example:
// <script src="https://maps.googleapis.com/maps/api/js?key=YOUR_API_KEY&libraries=visualization">
var map, heatmap;
function initMap() {
map = new google.maps.Map(document.getElementById('map'), {
zoom: 13,
center: {lat: -25.44395279, lng: -49.28262949},
mapTypeId: google.maps.MapTypeId.SATELLITE
});
heatmap = new google.maps.visualization.HeatmapLayer({
data: getPoints(),
map: map
});
}
function toggleHeatmap() {
heatmap.setMap(heatmap.getMap() ? null : map);
}
function changeGradient() {
var gradient = [
'rgba(0, 255, 255, 0)',
'rgba(0, 255, 255, 1)',
'rgba(0, 191, 255, 1)',
'rgba(0, 127, 255, 1)',
'rgba(0, 63, 255, 1)',
'rgba(0, 0, 255, 1)',
'rgba(0, 0, 223, 1)',
'rgba(0, 0, 191, 1)',
'rgba(0, 0, 159, 1)',
'rgba(0, 0, 127, 1)',
'rgba(63, 0, 91, 1)',
'rgba(127, 0, 63, 1)',
'rgba(191, 0, 31, 1)',
'rgba(255, 0, 0, 1)'
]
heatmap.set('gradient', heatmap.get('gradient') ? null : gradient);
}
function changeRadius() {
heatmap.set('radius', heatmap.get('radius') ? null : 20);
}
function changeOpacity() {
heatmap.set('opacity', heatmap.get('opacity') ? null : 0.2);
}
//Heatmap data
function getPoints() {
var data = [ ''')
filecsv = csv.reader(open("LatLongMap.csv","rb"))
i = 1
for row in filecsv:
html.write('''
{weight: '''+str(array[0][i])+''' ,location: new google.maps.LatLng('''+row[1]+''','''+row[2]+''')},''')
i = i + 1
html.write('''];
var pointArray = new google.maps.MVCArray(data);
return pointArray;
};
</script>
<script async defer
src="https://maps.googleapis.com/maps/api/js?key=AIzaSyAbJWz2WFVeuftUAqongpQCa_pYwQnNntk&libraries=visualization&callback=initMap">
</script>
</body>
</html>
''')
|
{
"content_hash": "978bb973579d7e5855fc8096277e5280",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 207,
"avg_line_length": 26.41717791411043,
"alnum_prop": 0.5534138411518811,
"repo_name": "lucaskotres/DTS_Charts",
"id": "8ff0a77ccda8637f2a99d67d00b47509f8dc30c2",
"size": "4306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "E3_DTS/heatmap_google.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "64633"
},
{
"name": "Python",
"bytes": "38810"
}
],
"symlink_target": ""
}
|
__all__ = ['BasicGenerator', 'CodeGenerator', 'RandomCodeGenerator',
'GeneratorChild', 'FunctionGenerator', 'ConditionalGenerator']
# Import all submodules as classes
from .basic import BasicGenerator
from .code import CodeGenerator
from .random import RandomCodeGenerator
from .child import GeneratorChild
from .function import FunctionGenerator
from .if_else import ConditionalGenerator
|
{
"content_hash": "5f74f17d0401622cc36da79f80f5d768",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 36.45454545454545,
"alnum_prop": 0.7955112219451371,
"repo_name": "lgrahl/klausuromat",
"id": "8ee7e090fcefa4ecfa632ba42dc01867550159f0",
"size": "428",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "klausuromat/generator/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2339"
},
{
"name": "CoffeeScript",
"bytes": "2096"
},
{
"name": "JavaScript",
"bytes": "79447"
},
{
"name": "Python",
"bytes": "120053"
}
],
"symlink_target": ""
}
|
from __future__ import division
from math import sin, cos
def eoms(_x, t, _params):
"""Point mass pendulum equations of motion.
_x is an array/list in the following order:
q1: Angle of pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
b: Damping coefficient at hinge.
"""
# Unpack function arguments
q1, u1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
s1 = sin(q1)
# Calculate return values
q1d = u1
u1d = -g*s1/l - b*u1/(l**2*m)
# Return calculated values
return [q1d, u1d]
def energy(_x, _params):
"""Kinetic and Potential Energy of point mass pendulum.
_x is an array/list in the following order:
q1: Angle of first pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
Returns a list/array of kinetic energy and potential energy, respectively.
"""
# Unpack function arguments
q1, u1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
c1 = cos(q1)
# Calculate return values
ke = m*l**2*u1**2/2
pe = g*l*m*(1 - c1)
# Return calculated values
return [ke, pe]
def anim(_x, _params):
"""Calculate configuration of pendulum for purposes of animation.
_x is an array/list of the configuration coordinates of the disc:
q1: Angle of first pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is the radius of the disc.
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
Output is:
P: Position of first point mass.
"""
# Unpack function arguments
q1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
c1 = cos(q1)
s1 = sin(q1)
# Calculate return values
P_1 = 0
P_2 = l*s1
P_3 = -l*c1
# Return calculated values
return [P_1, P_2, P_3]
|
{
"content_hash": "86c3046dd7891786808aec7d606db2d3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 26.50515463917526,
"alnum_prop": 0.6285492026448852,
"repo_name": "hazelnusse/pydy",
"id": "08975846f80484c523ca3699266ddf394f4dc1de",
"size": "2571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pendulum/pendulum_lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1784463"
},
{
"name": "Python",
"bytes": "426365"
}
],
"symlink_target": ""
}
|
"""
Module for input/output utilities
"""
import numpy as np
def _fmt_string(array, float_format='{}'):
"""makes a formatting string for a rec-array; given a desired float_format."""
fmt_string = ''
for field in array.dtype.descr:
vtype = field[1][1].lower()
if (vtype == 'i'):
fmt_string += '{:.0f} '
elif (vtype == 'f'):
fmt_string += '{} '.format(float_format)
elif (vtype == 'o'):
fmt_string += '{} '
elif (vtype == 's'):
raise Exception("MfList error: '\str\' type found it dtype." + \
" This gives unpredictable results when " + \
"recarray to file - change to \'object\' type")
else:
raise Exception("MfList.fmt_string error: unknown vtype " + \
"in dtype:" + vtype)
return fmt_string
def line_parse(line):
"""
Convert a line of text into to a list of values. This handles the
case where a free formatted MODFLOW input file may have commas in
it.
"""
line = line.replace(',', ' ')
return line.strip().split()
def write_fixed_var(v, length=10, ipos=None, free=False, comment=None):
"""
Parameters
----------
v : list, int, float, bool, or numpy array
list, int, float, bool, or numpy array containing the data to be
written to a string.
length : int
length of each column for fixed column widths. (default is 10)
ipos : list, int, or numpy array
user-provided column widths. (default is None)
free : bool
boolean indicating if a free format string should be generated.
length and ipos are not used if free is True. (default is False)
comment : str
comment string to add to the end of the string
Returns
-------
out : str
fixed or free format string generated using user-provided data
"""
if isinstance(v, np.ndarray):
v = v.aslist()
elif isinstance(v, int) or isinstance(v, float) or isinstance(v, bool):
v = [v]
ncol = len(v)
# construct ipos if it was not passed
if ipos is None:
ipos = []
for i in range(ncol):
ipos.append(length)
else:
if isinstance(ipos, np.ndarray):
ipos = ipos.flatten().aslist()
elif isinstance(ipos, int):
ipos = [ipos]
if len(ipos) < ncol:
err = 'user provided ipos length ({})'.format(len(ipos)) + \
'should be greater than or equal ' + \
'to the length of v ({})'.format(ncol)
raise Exception(err)
out = ''
for n in range(ncol):
if free:
write_fmt = '{} '
else:
write_fmt = '{{:>{}}}'.format(ipos[n])
out += write_fmt.format(v[n])
if comment is not None:
out += ' # {}'.format(comment)
out += '\n'
return out
def read_fixed_var(line, ncol=1, length=10, ipos=None, free=False):
"""
Parse a fixed format line using user provided data
Parameters
----------
line : str
text string to parse.
ncol : int
number of columns to parse from line. (default is 1)
length : int
length of each column for fixed column widths. (default is 10)
ipos : list, int, or numpy array
user-provided column widths. (default is None)
free : bool
boolean indicating if sting is free format. ncol, length, and
ipos are not used if free is True. (default is False)
Returns
-------
out : list
padded list containing data parsed from the passed text string
"""
if free:
out = line.rstrip().split()
else:
# construct ipos if it was not passed
if ipos is None:
ipos = []
for i in range(ncol):
ipos.append(length)
else:
if isinstance(ipos, np.ndarray):
ipos = ipos.flatten().aslist()
elif isinstance(ipos, int):
ipos = [ipos]
ncol = len(ipos)
line = line.rstrip()
out = []
istart = 0
for ivar in range(ncol):
istop = istart + ipos[ivar]
try:
txt = line[istart:istop]
if len(txt.strip()) > 0:
out.append(txt)
else:
out.append(0)
except:
break
istart = istop
return out
def flux_to_wel(cbc_file,text,precision="single",model=None,verbose=False):
"""
Convert flux in a binary cell budget file to a wel instance
Parameters
----------
cbc_file : (str) cell budget file name
text : (str) text string of the desired flux type (e.g. "drains")
precision : (optional str) precision of the cell budget file
model : (optional) BaseModel instance. If passed, a new ModflowWel
instance will be added to model
verbose : bool flag passed to CellBudgetFile
Returns
-------
flopy.modflow.ModflowWel instance
"""
from . import CellBudgetFile as CBF
from .util_list import MfList
from ..modflow import Modflow, ModflowWel
cbf = CBF(cbc_file,precision=precision,verbose=verbose)
# create a empty numpy array of shape (time,layer,row,col)
m4d = np.zeros((cbf.nper,cbf.nlay,cbf.nrow,cbf.ncol),dtype=np.float32)
m4d[:] = np.NaN
# process the records in the cell budget file
iper = -1
for kstpkper in cbf.kstpkper:
kstpkper = (kstpkper[0]-1,kstpkper[1]-1)
kper = kstpkper[1]
#if we haven't visited this kper yet
if kper != iper:
arr = cbf.get_data(kstpkper=kstpkper,text=text,full3D=True)
if len(arr) > 0:
arr = arr[0]
print(arr.max(),arr.min(),arr.sum())
# masked where zero
arr[np.where(arr==0.0)] = np.NaN
m4d[iper+1] = arr
iper += 1
# model wasn't passed, then create a generic model
if model is None:
model = Modflow("test")
# if model doesn't have a wel package, then make a generic one...
# need this for the from_m4d method
if model.wel is None:
ModflowWel(model)
# get the stress_period_data dict {kper:np recarray}
sp_data = MfList.from_4d(model,"WEL",{"flux":m4d})
wel = ModflowWel(model,stress_period_data=sp_data)
return wel
|
{
"content_hash": "ef304fecd14f18b96e371f9ca613ff01",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 82,
"avg_line_length": 32.779411764705884,
"alnum_prop": 0.537909376401974,
"repo_name": "mrustl/flopy",
"id": "947b2d718da092fab76ccb4d66eb68dfdae5535f",
"size": "6687",
"binary": false,
"copies": "1",
"ref": "refs/heads/kwb",
"path": "flopy/utils/flopy_io.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "1772821"
},
{
"name": "Visual Basic",
"bytes": "3938"
}
],
"symlink_target": ""
}
|
from . import ffi, librtmp
__all__ = ["RTMPPacket",
"PACKET_SIZE_LARGE", "PACKET_SIZE_MEDIUM",
"PACKET_SIZE_SMALL", "PACKET_SIZE_MINIMUM",
"PACKET_TYPE_CHUNK_SIZE", "PACKET_TYPE_BYTES_READ_REPORT",
"PACKET_TYPE_CONTROL", "PACKET_TYPE_SERVER_BW",
"PACKET_TYPE_CLIENT_BW", "PACKET_TYPE_AUDIO",
"PACKET_TYPE_VIDEO", "PACKET_TYPE_FLEX_STREAM_SEND",
"PACKET_TYPE_FLEX_SHARED_OBJECT", "PACKET_TYPE_FLEX_MESSAGE",
"PACKET_TYPE_INFO", "PACKET_TYPE_SHARED_OBJECT",
"PACKET_TYPE_INVOKE", "PACKET_TYPE_FLASH_VIDEO"]
(PACKET_SIZE_LARGE, PACKET_SIZE_MEDIUM, PACKET_SIZE_SMALL,
PACKET_SIZE_MINIMUM) = range(4)
PACKET_TYPE_CHUNK_SIZE = 0x01
PACKET_TYPE_BYTES_READ_REPORT = 0x03
PACKET_TYPE_CONTROL = 0x04
PACKET_TYPE_SERVER_BW = 0x05
PACKET_TYPE_CLIENT_BW = 0x06
PACKET_TYPE_AUDIO = 0x08
PACKET_TYPE_VIDEO = 0x09
PACKET_TYPE_FLEX_STREAM_SEND = 0x0F
PACKET_TYPE_FLEX_SHARED_OBJECT = 0x10
PACKET_TYPE_FLEX_MESSAGE = 0x11
PACKET_TYPE_INFO = 0x12
PACKET_TYPE_SHARED_OBJECT = 0x13
PACKET_TYPE_INVOKE = 0x14
PACKET_TYPE_FLASH_VIDEO = 0x16
class RTMPPacket(object):
@classmethod
def _from_pointer(cls, pointer):
packet = cls.__new__(cls)
packet.packet = pointer
return packet
def __init__(self, type, format, channel, timestamp=0,
absolute_timestamp=False, body=None):
self.packet = ffi.new("RTMPPacket*")
self.type = type
self.format = format
self.channel = channel
self.timestamp = timestamp
self.absolute_timestamp = absolute_timestamp
if not body:
body = b""
self.body = body
@property
def format(self):
"""Format of the packet."""
return self.packet.m_headerType
@format.setter
def format(self, value):
self.packet.m_headerType = int(value)
@property
def type(self):
"""Type of the packet."""
return self.packet.m_packetType
@type.setter
def type(self, value):
self.packet.m_packetType = int(value)
@property
def channel(self):
"""Channel of the packet."""
return self.packet.m_nChannel
@channel.setter
def channel(self, value):
self.packet.m_nChannel = int(value)
@property
def timestamp(self):
"""Timestamp of the packet."""
return self.packet.m_nTimeStamp
@timestamp.setter
def timestamp(self, value):
self.packet.m_nTimeStamp = int(value)
@property
def absolute_timestamp(self):
"""True if the timestamp is absolute."""
return bool(self.packet.m_hasAbsTimestamp)
@absolute_timestamp.setter
def absolute_timestamp(self, value):
self.packet.m_hasAbsTimestamp = int(bool(value))
@property
def body(self):
"""The body of the packet."""
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize)
return view[:]
@body.setter
def body(self, value):
size = len(value)
librtmp.RTMPPacket_Alloc(self.packet, size)
view = ffi.buffer(self.packet.m_body, size)
view[:] = value
self.packet.m_nBodySize = size
def dump(self):
"""Dumps packet to logger."""
librtmp.RTMPPacket_Dump(self.packet)
def __del__(self):
librtmp.RTMPPacket_Free(self.packet)
|
{
"content_hash": "41fd038fc90f29069a2ed9451eb5bc4a",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 72,
"avg_line_length": 28.258064516129032,
"alnum_prop": 0.6024543378995434,
"repo_name": "Autotonic/piny-librtmp",
"id": "0bdecb50ea5a7e1326b02e4d5ddea527f9398e0c",
"size": "3504",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rtmp/librtmp/packet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49209"
},
{
"name": "Python",
"bytes": "104969"
}
],
"symlink_target": ""
}
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
{
"content_hash": "5216e735393031e5eb70b77b4276a215",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 99,
"avg_line_length": 42.55,
"alnum_prop": 0.7614571092831962,
"repo_name": "gmimano/commcaretest",
"id": "841bf8fe738ad5ce074f3289ead87fa8196e0d7d",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/gunicorn_conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser.test.test_metbk_a_dcl
@file marine-integrations/mi/dataset/parser/test/test_metbk_a_dcl.py
@author Ronald Ronquillo
@brief Test code for a metbk_a_dcl data parser
In the following files, Metadata consists of 4 records.
There is 1 group of Sensor Data records for each set of metadata.
Files used for testing:
20140805.metbk2.log
Metadata - 4 set, Sensor Data - 1430 records
20140901.metbk2.log
Metadata - 7 sets, Sensor Data - 1061 records
20140902.metbk2.log
Metadata - 0 sets, Sensor Data - 863 records
20140917.metbk2.log
Metadata - 0 sets, Sensor Data - 904 records
20140805.metbk2_bad_sensor.log
Metadata - 4 sets, Sensor Data - 9 records
20140901.metbk2_no_sensor.log
Metadata - 7 sets, Sensor Data - 0 records
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.metbk_a.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.metbk_a_dcl import MetbkADclParser
from mi.dataset.driver.metbk_a.dcl.metbk_dcl_a_driver import MODULE_NAME, \
RECOVERED_PARTICLE_CLASS, TELEMETERED_PARTICLE_CLASS
log = get_logger()
FILE_4_1430 = '20140805.metbk2.log'
FILE_4_9 = '20140805.metbk2_bad_sensor.log'
FILE_7_0 = '20140901.metbk2_no_sensor.log'
FILE_7_1061 = '20140901.metbk2.log'
FILE_0_863 = '20140902.metbk2.log'
FILE_0_904 = '20140917.metbk2.log'
FILE_8_1440 = '20171114.metbk.log'
YML_0_863 = 'rec_20140902.metbk2.yml'
YML_4_1430 = 'tel_20140805.metbk2.yml'
YML_7_1061 = 'tel_20140901.metbk2.yml'
YML_0_904 = 'rec_20140917.metbk2.yml'
YML_8_1440 = 'tel_20171114.metbk.yml'
RECORDS_FILE_4_1430 = 1430 # number of records expected
RECORDS_FILE_7_1061 = 1061 # number of records expected
RECORDS_FILE_0_863 = 863 # number of records expected
RECORDS_FILE_0_904 = 904 # number of records expected
TOTAL_RECORDS_FILE_7_0 = 7 # total number of records
TOTAL_RECORDS_FILE_4_9 = 60 # total number of records
RECORDS_FILE_8_1440 = 1440 # total number of records
RECORDS_FILE_4_9 = 9 # number of records expected
EXCEPTIONS_FILE_4_0 = 47 # number of exceptions expected
@attr('UNIT', group='mi')
class MetbkADclParserUnitTestCase(ParserUnitTestCase):
"""
metbk_a_dcl Parser unit test suite
"""
def create_parser(self, particle_class, file_handle):
"""
This function creates a MetbkADcl parser for recovered data.
"""
parser = MetbkADclParser(
{DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: particle_class},
file_handle,
self.exception_callback)
return parser
def open_file(self, filename):
my_file = open(os.path.join(RESOURCE_PATH, filename), mode='r')
return my_file
def setUp(self):
ParserUnitTestCase.setUp(self)
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_big_giant_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done in the
tests below.
"""
log.debug('===== START TEST BIG GIANT INPUT RECOVERED =====')
in_file = self.open_file(FILE_0_863)
parser = self.create_parser(RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles in this file.
number_expected_results = RECORDS_FILE_0_863
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== START TEST BIG GIANT INPUT TELEMETERED =====')
in_file = self.open_file(FILE_0_904)
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles in this file.
number_expected_results = RECORDS_FILE_0_904
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST BIG GIANT INPUT =====')
def test_get_many(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
log.debug('===== START TEST GET MANY RECOVERED =====')
in_file = self.open_file(FILE_0_863)
parser = self.create_parser(RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(RECORDS_FILE_0_863)
# self.assertEqual(result, expected_particle)
self.assert_particles(result, YML_0_863, RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== START TEST GET MANY TELEMETERED =====')
in_file = self.open_file(FILE_7_1061)
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(RECORDS_FILE_7_1061)
self.assert_particles(result, YML_7_1061, RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST GET MANY =====')
def test_invalid_sensor_data_records(self):
"""
Read data from a file containing invalid sensor data records.
Verify that only the expected number of instrument particles are produced
and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE_4_9)
parser = self.create_parser(RECOVERED_PARTICLE_CLASS, in_file)
# Try to get records and verify expected number of particles are returned.
result = parser.get_records(TOTAL_RECORDS_FILE_4_9)
self.assertEqual(len(result), RECORDS_FILE_4_9)
self.assertEqual(len(self.exception_callback_value), EXCEPTIONS_FILE_4_0)
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_no_sensor_data(self):
"""
Read a file containing no sensor data records
and verify that no particles are produced.
"""
log.debug('===== START TEST NO SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE_7_0)
parser = self.create_parser(RECOVERED_PARTICLE_CLASS, in_file)
# Try to get a record and verify that none are produced.
result = parser.get_records(TOTAL_RECORDS_FILE_7_0)
self.assertEqual(result, [])
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST NO SENSOR DATA =====')
def test_simple(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START TEST SIMPLE RECOVERED =====')
in_file = self.open_file(FILE_4_1430)
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(RECORDS_FILE_4_1430)
self.assert_particles(result, YML_4_1430, RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST SIMPLE =====')
def test_bug_9692(self):
"""
Test to verify change made to dcl_file_common.py works with DCL
timestamps containing seconds >59
"""
in_file = self.open_file("20140805.metbk2A.log")
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(5)
self.assertEqual(len(result), 4)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_bug_13106(self):
"""
Test to verify change made to metbk_a_dcl.py works with non-floating point
variables and ensure that the values can be outputted.
"""
in_file = self.open_file(FILE_8_1440)
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(RECORDS_FILE_8_1440)
self.assert_particles(result, YML_8_1440, RESOURCE_PATH)
self.assertEqual(len(result), RECORDS_FILE_8_1440)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_small(self):
"""
Test to verify various formats in the log and ensure that the data is correct.
"""
in_file = self.open_file("test.metbk2.log")
parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(10)
self.assert_particles(result, "small_test.yml", RESOURCE_PATH)
self.assertEqual(len(result), 4)
self.assertEqual(len(self.exception_callback_value), 3)
in_file.close()
|
{
"content_hash": "a1b311d79ca6848b618a6022dd3fd4e2",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 86,
"avg_line_length": 36.91570881226053,
"alnum_prop": 0.6546964193046185,
"repo_name": "renegelinas/mi-instrument",
"id": "58aac13c4cf0df98597e5f1e501db25b3b37fee1",
"size": "9658",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/test/test_metbk_a_dcl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10013408"
}
],
"symlink_target": ""
}
|
"""Tests for mb.py."""
import ast
import json
import StringIO
import os
import sys
import unittest
import mb
class FakeMBW(mb.MetaBuildWrapper):
def __init__(self, win32=False):
super(FakeMBW, self).__init__()
# Override vars for test portability.
if win32:
self.src_dir = 'c:\\fake_src'
self.default_config = 'c:\\fake_src\\tools_webrtc\\mb\\mb_config.pyl'
self.default_isolate_map = ('c:\\fake_src\\testing\\buildbot\\'
'gn_isolate_map.pyl')
self.platform = 'win32'
self.executable = 'c:\\python\\python.exe'
self.sep = '\\'
else:
self.src_dir = '/fake_src'
self.default_config = '/fake_src/tools_webrtc/mb/mb_config.pyl'
self.default_isolate_map = '/fake_src/testing/buildbot/gn_isolate_map.pyl'
self.executable = '/usr/bin/python'
self.platform = 'linux2'
self.sep = '/'
self.files = {}
self.calls = []
self.cmds = []
self.cross_compile = None
self.out = ''
self.err = ''
self.rmdirs = []
def ExpandUser(self, path):
return '$HOME/%s' % path
def Exists(self, path):
return self.files.get(path) is not None
def MaybeMakeDirectory(self, path):
self.files[path] = True
def PathJoin(self, *comps):
return self.sep.join(comps)
def ReadFile(self, path):
return self.files[path]
def WriteFile(self, path, contents, force_verbose=False):
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
self.files[path] = contents
def Call(self, cmd, env=None, buffer_output=True):
if env:
self.cross_compile = env.get('GYP_CROSSCOMPILE')
self.calls.append(cmd)
if self.cmds:
return self.cmds.pop(0)
return 0, '', ''
def Print(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
f = kwargs.get('file', sys.stdout)
if f == sys.stderr:
self.err += sep.join(args) + end
else:
self.out += sep.join(args) + end
def TempFile(self, mode='w'):
return FakeFile(self.files)
def RemoveFile(self, path):
del self.files[path]
def RemoveDirectory(self, path):
self.rmdirs.append(path)
files_to_delete = [f for f in self.files if f.startswith(path)]
for f in files_to_delete:
self.files[f] = None
class FakeFile(object):
def __init__(self, files):
self.name = '/tmp/file'
self.buf = ''
self.files = files
def write(self, contents):
self.buf += contents
def close(self):
self.files[self.name] = self.buf
TEST_CONFIG = """\
{
'masters': {
'chromium': {},
'fake_master': {
'fake_builder': 'gyp_rel_bot',
'fake_gn_builder': 'gn_rel_bot',
'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
'fake_gn_debug_builder': 'gn_debug_goma',
'fake_gyp_builder': 'gyp_debug',
'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
'fake_memcheck_bot': 'gn_memcheck_bot',
'fake_multi_phase': { 'phase_1': 'gn_phase_1', 'phase_2': 'gn_phase_2'},
'fake_android_bot': 'gn_android_bot',
},
},
'configs': {
'gyp_rel_bot': ['gyp', 'rel', 'goma'],
'gn_debug_goma': ['gn', 'debug', 'goma'],
'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
'gn_rel_bot': ['gn', 'rel', 'goma'],
'gyp_crosscompile': ['gyp', 'crosscompile'],
'gn_phase_1': ['gn', 'phase_1'],
'gn_phase_2': ['gn', 'phase_2'],
'gn_memcheck_bot': ['gn', 'memcheck'],
'gn_android_bot': ['gn', 'android'],
},
'mixins': {
'crosscompile': {
'gyp_crosscompile': True,
},
'fake_feature1': {
'gn_args': 'enable_doom_melon=true',
'gyp_defines': 'doom_melon=1',
},
'gyp': {'type': 'gyp'},
'gn': {'type': 'gn'},
'goma': {
'gn_args': 'use_goma=true',
'gyp_defines': 'goma=1',
},
'phase_1': {
'gn_args': 'phase=1',
'gyp_args': 'phase=1',
},
'phase_2': {
'gn_args': 'phase=2',
'gyp_args': 'phase=2',
},
'rel': {
'gn_args': 'is_debug=false',
},
'debug': {
'gn_args': 'is_debug=true',
},
'memcheck': {
'gn_args': 'rtc_use_memcheck=true',
},
'android': {
'gn_args': 'target_os="android"',
}
},
}
"""
GYP_HACKS_CONFIG = """\
{
'masters': {
'chromium': {},
'fake_master': {
'fake_builder': 'fake_config',
},
},
'configs': {
'fake_config': ['fake_mixin'],
},
'mixins': {
'fake_mixin': {
'type': 'gyp',
'gn_args': '',
'gyp_defines':
('foo=bar llvm_force_head_revision=1 '
'gyp_link_concurrency=1 baz=1'),
},
},
}
"""
class UnitTest(unittest.TestCase):
def fake_mbw(self, files=None, win32=False):
mbw = FakeMBW(win32=win32)
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
mbw.files.setdefault(
mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'),
'''{
"foo_unittests": {
"label": "//foo:foo_unittests",
"type": "console_test_launcher",
"args": [],
},
}''')
mbw.files.setdefault(
mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
'is_debug = false\n')
if files:
for path, contents in files.items():
mbw.files[path] = contents
return mbw
def check(self, args, mbw=None, files=None, out=None, err=None, ret=None):
if not mbw:
mbw = self.fake_mbw(files)
actual_ret = mbw.Main(args)
self.assertEqual(actual_ret, ret)
if out is not None:
self.assertEqual(mbw.out, out)
if err is not None:
self.assertEqual(mbw.err, err)
return mbw
def test_clobber(self):
files = {
'/fake_src/out/Debug': None,
'/fake_src/out/Debug/mb_type': None,
}
mbw = self.fake_mbw(files)
# The first time we run this, the build dir doesn't exist, so no clobber.
self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, [])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
# The second time we run this, the build dir exists and matches, so no
# clobber.
self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, [])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
# Now we switch build types; this should result in a clobber.
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
# Now we delete mb_type; this checks the case where the build dir
# exists but wasn't populated by mb; this should also result in a clobber.
del mbw.files['/fake_src/out/Debug/mb_type']
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs,
['/fake_src/out/Debug', '/fake_src/out/Debug'])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
def test_gn_analyze(self):
files = {'/tmp/in.json': '''{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests"],
"additional_compile_targets": ["all"]
}''',
'/tmp/out.json.gn': '''{\
"status": "Found dependency",
"compile_targets": ["//foo:foo_unittests"],
"test_targets": ["//foo:foo_unittests"]
}'''}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'Found dependency',
'compile_targets': ['foo:foo_unittests'],
'test_targets': ['foo_unittests']
})
def test_gn_gen(self):
mbw = self.fake_mbw()
self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
('goma_dir = "/goma"\n'
'is_debug = true\n'
'use_goma = true\n'))
# Make sure we log both what is written to args.gn and the command line.
self.assertIn('Writing """', mbw.out)
self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check',
mbw.out)
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
('goma_dir = "c:\\\\goma"\n'
'is_debug = true\n'
'use_goma = true\n'))
self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
'--check\n', mbw.out)
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
self.assertEqual(
mbw.files['/fake_src/out/Debug/args.gn'],
'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
def test_gn_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
def test_gn_gen_swarming(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'cc_perftests': {"
" 'label': '//cc:cc_perftests',"
" 'type': 'console_test_launcher',"
"}}\n"
),
'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
"cc_perftests\n"
),
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
'-c', 'gn_debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
def test_gn_gen_swarming_android(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'additional_compile_target',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.check(['gen', '-c', 'gn_android_bot', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, ['base_unittests'])
self.assertEqual(command, [
'../../build/android/test_wrapper/logdog_wrapper.py',
'--target', 'base_unittests',
'--logdog-bin-cmd', '../../bin/logdog_butler',
'--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
'--store-tombstones',
])
def test_gn_gen_swarming_android_junit_test(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'junit_test',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.check(['gen', '-c', 'gn_android_bot', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, ['base_unittests'])
self.assertEqual(command, [
'../../build/android/test_wrapper/logdog_wrapper.py',
'--target', 'base_unittests',
'--logdog-bin-cmd', '../../bin/logdog_butler',
'--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
'--store-tombstones',
])
def test_gn_gen_timeout(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'non_parallel_console_test_launcher',"
" 'timeout': 500,"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.check(['gen', '-c', 'gn_debug_goma', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../third_party/gtest-parallel/gtest-parallel',
'../../third_party/gtest-parallel/gtest_parallel.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'base_unittests',
])
self.assertEqual(command, [
'../../testing/test_env.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'--output_dir=${ISOLATED_OUTDIR}/test_logs',
'--gtest_color=no',
'--timeout=500',
'--retry_failed=3',
'./base_unittests',
'--workers=1',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_gen_script(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests_script\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests_script': {"
" 'label': '//base:base_unittests',"
" 'type': 'script',"
" 'script': '//base/base_unittests_script.py',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
"base_unittests_script.py\n"
),
}
mbw = self.check(['gen', '-c', 'gn_debug_goma', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = (
mbw.files['/fake_src/out/Default/base_unittests_script.isolate'])
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'base_unittests',
'base_unittests_script.py',
])
self.assertEqual(command, [
'../../base/base_unittests_script.py',
])
def test_gn_gen_non_parallel_console_test_launcher(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'non_parallel_console_test_launcher',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.check(['gen', '-c', 'gn_debug_goma', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../third_party/gtest-parallel/gtest-parallel',
'../../third_party/gtest-parallel/gtest_parallel.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'base_unittests',
])
self.assertEqual(command, [
'../../testing/test_env.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'--output_dir=${ISOLATED_OUTDIR}/test_logs',
'--gtest_color=no',
'--timeout=900',
'--retry_failed=3',
'./base_unittests',
'--workers=1',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_isolate_windowed_test_launcher_linux(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'windowed_test_launcher',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
"some_resource_file\n"
),
}
mbw = self.check(['gen', '-c', 'gn_debug_goma', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../testing/xvfb.py',
'../../third_party/gtest-parallel/gtest-parallel',
'../../third_party/gtest-parallel/gtest_parallel.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'base_unittests',
'some_resource_file',
])
self.assertEqual(command, [
'../../testing/xvfb.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'--output_dir=${ISOLATED_OUTDIR}/test_logs',
'--gtest_color=no',
'--timeout=900',
'--retry_failed=3',
'./base_unittests',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_gen_windowed_test_launcher_win(self):
files = {
'/tmp/swarming_targets': 'unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'unittests': {"
" 'label': '//somewhere:unittests',"
" 'type': 'windowed_test_launcher',"
"}}\n"
),
r'c:\fake_src\out\Default\unittests.exe.runtime_deps': (
"unittests.exe\n"
"some_dependency\n"
),
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
'-c', 'gn_debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
'//out/Default'], mbw=mbw, ret=0)
isolate_file = mbw.files['c:\\fake_src\\out\\Default\\unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../third_party/gtest-parallel/gtest-parallel',
'../../third_party/gtest-parallel/gtest_parallel.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'some_dependency',
'unittests.exe',
])
self.assertEqual(command, [
'../../testing/test_env.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'--output_dir=${ISOLATED_OUTDIR}\\test_logs',
'--gtest_color=no',
'--timeout=900',
'--retry_failed=3',
r'.\unittests.exe',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_gen_console_test_launcher(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'console_test_launcher',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.check(['gen', '-c', 'gn_debug_goma', '//out/Default',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../third_party/gtest-parallel/gtest-parallel',
'../../third_party/gtest-parallel/gtest_parallel.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'base_unittests',
])
self.assertEqual(command, [
'../../testing/test_env.py',
'../../tools_webrtc/gtest-parallel-wrapper.py',
'--output_dir=${ISOLATED_OUTDIR}/test_logs',
'--gtest_color=no',
'--timeout=900',
'--retry_failed=3',
'./base_unittests',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_isolate_console_test_launcher_memcheck(self):
test_files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'console_test_launcher',"
"}}\n"
),
'/fake_src/out/Release/base_unittests.runtime_deps': (
"base_unittests\n"
"lots_of_memcheck_dependencies\n"
"../../tools_webrtc/valgrind/webrtc_tests.sh\n"
),
}
mbw = self.check(['gen', '-c', 'gn_memcheck_bot', '//out/Release',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl'],
files=test_files, ret=0)
isolate_file = mbw.files['/fake_src/out/Release/base_unittests.isolate']
isolate_file_contents = ast.literal_eval(isolate_file)
files = isolate_file_contents['variables']['files']
command = isolate_file_contents['variables']['command']
self.assertEqual(files, [
'../../testing/test_env.py',
'../../tools_webrtc/valgrind/webrtc_tests.sh',
'base_unittests',
'lots_of_memcheck_dependencies',
])
self.assertEqual(command, [
'../../testing/test_env.py',
'bash',
'../../tools_webrtc/valgrind/webrtc_tests.sh',
'--tool',
'memcheck',
'--target',
'Release',
'--build-dir',
'..',
'--test',
'./base_unittests',
'--',
'--asan=0',
'--lsan=0',
'--msan=0',
'--tsan=0',
])
def test_gn_isolate(self):
files = {
'/fake_src/out/Default/toolchain.ninja': "",
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'non_parallel_console_test_launcher',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
# test running isolate on an existing build_dir
files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
files['/fake_src/out/Default/mb_type'] = 'gn\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
def test_gn_run(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'windowed_test_launcher',"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
def test_gn_lookup(self):
self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
def test_gn_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
out=('\n'
'Writing """\\\n'
'goma_dir = "/foo"\n'
'is_debug = false\n'
'use_goma = true\n'
'""" to _path_/args.gn.\n\n'
'/fake_src/buildtools/linux64/gn gen _path_\n'))
def test_gyp_analyze(self):
mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
'/tmp/in.json', '/tmp/out.json'], ret=0)
self.assertIn('analyzer', mbw.calls[0])
def test_gyp_crosscompile(self):
mbw = self.fake_mbw()
self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
mbw=mbw, ret=0)
self.assertTrue(mbw.cross_compile)
def test_gyp_gen(self):
self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
ret=0,
out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
"python build/gyp_chromium -G output_dir=out\n"))
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
mbw=mbw, ret=0,
out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
"python build\\gyp_chromium -G output_dir=out\n"))
def test_gyp_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
def test_gyp_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
"python build/gyp_chromium -G output_dir=_path_\n"))
def test_help(self):
orig_stdout = sys.stdout
try:
sys.stdout = StringIO.StringIO()
self.assertRaises(SystemExit, self.check, ['-h'])
self.assertRaises(SystemExit, self.check, ['help'])
self.assertRaises(SystemExit, self.check, ['help', 'gen'])
finally:
sys.stdout = orig_stdout
def test_multiple_phases(self):
# Check that not passing a --phase to a multi-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'],
ret=1)
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
'--phase', 'phase_1'], ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
# Check that passing a wrong phase key to a multi-phase builder fails.
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
'--phase', 'wrong_phase'], ret=1)
self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
# Check that passing a correct phase key to a multi-phase builder passes.
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
'--phase', 'phase_1'], ret=0)
self.assertIn('phase = 1', mbw.out)
mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
'--phase', 'phase_2'], ret=0)
self.assertIn('phase = 2', mbw.out)
def test_validate(self):
mbw = self.fake_mbw()
self.check(['validate'], mbw=mbw, ret=0)
def test_gyp_env_hacks(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
ret=0,
out=("GYP_DEFINES='foo=bar baz=1'\n"
"GYP_LINK_CONCURRENCY=1\n"
"LLVM_FORCE_HEAD_REVISION=1\n"
"python build/gyp_chromium -G output_dir=_path_\n"))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "09c741b510f91105c9fc6ca7b9bfcc9b",
"timestamp": "",
"source": "github",
"line_count": 874,
"max_line_length": 80,
"avg_line_length": 34.59038901601831,
"alnum_prop": 0.5381053188674253,
"repo_name": "koobonil/Boss2D",
"id": "dd475317ba4dba4ae68fc36cadfbda5d5ab5f226",
"size": "30644",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Boss2D/addon/_old/webrtc-qt5.11.2_for_boss/tools_webrtc/mb/mb_unittest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4820445"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "89930"
},
{
"name": "C",
"bytes": "119747922"
},
{
"name": "C#",
"bytes": "87505"
},
{
"name": "C++",
"bytes": "272329620"
},
{
"name": "CMake",
"bytes": "1199656"
},
{
"name": "CSS",
"bytes": "42679"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1651996"
},
{
"name": "DIGITAL Command Language",
"bytes": "239527"
},
{
"name": "Dockerfile",
"bytes": "9638"
},
{
"name": "Emacs Lisp",
"bytes": "15570"
},
{
"name": "Go",
"bytes": "858185"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "2958385"
},
{
"name": "Java",
"bytes": "2921052"
},
{
"name": "JavaScript",
"bytes": "178190"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "M4",
"bytes": "775724"
},
{
"name": "MATLAB",
"bytes": "74606"
},
{
"name": "Makefile",
"bytes": "3941551"
},
{
"name": "Meson",
"bytes": "2847"
},
{
"name": "Module Management System",
"bytes": "2626"
},
{
"name": "NSIS",
"bytes": "4505"
},
{
"name": "Objective-C",
"bytes": "4090702"
},
{
"name": "Objective-C++",
"bytes": "1702390"
},
{
"name": "PHP",
"bytes": "3530"
},
{
"name": "Perl",
"bytes": "11096338"
},
{
"name": "Perl 6",
"bytes": "11802"
},
{
"name": "PowerShell",
"bytes": "38571"
},
{
"name": "Python",
"bytes": "24123805"
},
{
"name": "QMake",
"bytes": "18188"
},
{
"name": "Roff",
"bytes": "1261269"
},
{
"name": "Ruby",
"bytes": "5890"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "2879948"
},
{
"name": "TeX",
"bytes": "243507"
},
{
"name": "TypeScript",
"bytes": "1593696"
},
{
"name": "Verilog",
"bytes": "1215"
},
{
"name": "Vim Script",
"bytes": "3759"
},
{
"name": "Visual Basic",
"bytes": "16186"
},
{
"name": "eC",
"bytes": "9705"
}
],
"symlink_target": ""
}
|
import os
import re
import sh
import pytest
from binaryornot.check import is_binary
PATTERN = '{{(\s?cookiecutter)[.](.*?)}}'
RE_OBJ = re.compile(PATTERN)
@pytest.fixture
def context():
return {
'project_name': 'My Test Project',
'project_slug': 'my_test_project',
'author_name': 'Test Author',
'email': 'test@example.com',
'description': 'A short description of the project.',
'domain_name': 'example.com',
'version': '0.1.0',
'timezone': 'UTC',
'now': '2015/01/13',
'year': '2015'
}
def build_files_list(root_dir):
"""Build a list containing absolute paths to the generated files."""
return [
os.path.join(dirpath, file_path)
for dirpath, subdirs, files in os.walk(root_dir)
for file_path in files
]
def check_paths(paths):
"""Method to check all paths have correct substitutions,
used by other tests cases
"""
# Assert that no match is found in any of the files
for path in paths:
if is_binary(path):
continue
for line in open(path, 'r'):
match = RE_OBJ.search(line)
msg = 'cookiecutter variable not replaced in {}'
assert match is None, msg.format(path)
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == context['project_slug']
assert result.project.isdir()
paths = build_files_list(str(result.project))
assert paths
check_paths(paths)
@pytest.fixture(params=['use_mailhog', 'use_celery', 'windows'])
def feature_context(request, context):
context.update({request.param: 'y'})
return context
def test_enabled_features(cookies, feature_context):
result = cookies.bake(extra_context=feature_context)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == feature_context['project_slug']
assert result.project.isdir()
paths = build_files_list(str(result.project))
assert paths
check_paths(paths)
def test_flake8_compliance(cookies):
"""generated project should pass flake8"""
result = cookies.bake()
try:
sh.flake8(str(result.project))
except sh.ErrorReturnCode as e:
pytest.fail(e)
|
{
"content_hash": "2e68c0210e728a3c4f29d77ad23c9add",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 72,
"avg_line_length": 27.21590909090909,
"alnum_prop": 0.6413361169102296,
"repo_name": "aeikenberry/cookiecutter-django-rest-babel",
"id": "6815751a81b72cb7d016832b1dc68ac5b89e4b6e",
"size": "2420",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_cookiecutter_generation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5146"
},
{
"name": "CSS",
"bytes": "1775"
},
{
"name": "HTML",
"bytes": "21110"
},
{
"name": "JavaScript",
"bytes": "3681"
},
{
"name": "Makefile",
"bytes": "5664"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "66827"
},
{
"name": "Shell",
"bytes": "8430"
}
],
"symlink_target": ""
}
|
'''
roverbattery.py Check battery on Brookstone Rover 2.0.
Copyright (C) 2014 Simon D. Levy
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import rover
rover = rover.Rover()
print('Battery at %d%%' % rover.getBatteryPercentage())
rover.close()
|
{
"content_hash": "1da7f006f0cd1dffd55f82ebe0edb007",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 26,
"alnum_prop": 0.7606837606837606,
"repo_name": "GearsAD/semisorted_arnerve",
"id": "cc6cc8cdaa27e10526612931261208ddf53465b7",
"size": "725",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "arnerve_bot/roverpylot/roverbattery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "912872"
},
{
"name": "C#",
"bytes": "66637"
},
{
"name": "Java",
"bytes": "30714"
},
{
"name": "Python",
"bytes": "354774"
}
],
"symlink_target": ""
}
|
import re
import sys
import hashlib
from .. import DistData, SEPRTR, W_BADPRICE
# Distributors definitions.
from .distributor import distributor_class
from .log__ import debug_overview, debug_obsessive, warning
__all__ = ['dist_local_template']
if sys.version_info[0] < 3:
from urlparse import urlsplit, urlunsplit
def to_bytes(val):
return val
else:
from urllib.parse import urlsplit, urlunsplit
def to_bytes(val):
return val.encode('utf-8')
unique_catalogs = {}
def make_unique_catalog_number(p, dist):
FIELDS_MANFCAT = ([d + '#' for d in distributor_class.get_distributors_iter()] + ['manf#'])
FIELDS_NOT_HASH = (['manf#_qty', 'manf'] + FIELDS_MANFCAT + [d + '#_qty' for d in distributor_class.get_distributors_iter()])
# TODO unify the `FIELDS_NOT_HASH` configuration (used also in `edas/tools.py`).
hash_fields = {k: p.fields[k] for k in p.fields if k not in FIELDS_NOT_HASH}
hash_fields['dist'] = dist
id = hashlib.md5(to_bytes(str(tuple(sorted(hash_fields.items()))))).hexdigest()
num = unique_catalogs.get(id)
if num is None:
num = len(unique_catalogs) + 1
unique_catalogs[id] = num
return '#NO_CATALOG%04d' % num
class dist_local_template(distributor_class):
name = 'Local'
type = 'local'
enabled = True
url = None
# We don't add distributors here, they are collected in query_part_info
api_distributors = []
@staticmethod
def configure(ops):
for k, v in ops.items():
if k == 'enable':
dist_local_template.enabled = v
debug_obsessive('Local API configured to enabled {}'.format(dist_local_template.enabled))
@staticmethod
def update_distributors(parts, distributors):
""" Looks for user defined distributors """
# This loops through all the parts and finds any that are sourced from
# local distributors that are not normally searched and places them into
# the distributor disctionary.
for part in parts:
# Find the various distributors for this part by
# looking for leading fields terminated by SEPRTR.
for key in part.fields:
try:
dist = key[:key.index(SEPRTR)]
except ValueError:
continue
# If the distributor is not in the list of web-scrapable distributors,
# then it's a local distributor. Copy the local distributor template
# and add it to the table of distributors.
# Note: If the user excludes a web-scrapable distributors (using --exclude)
# and then adds it as a local distributor (using fields) it will be added.
if dist not in distributors:
debug_overview('Creating \'{}\' local distributor profile...'.format(dist))
new_dist = distributor_class.get_distributor_template('local_template')
new_dist.label.name = dist # Set dist name for spreadsheet header.
distributor_class.add_distributor(dist, new_dist)
distributors.append(dist)
dist_local_template.api_distributors.append(dist)
@staticmethod
def query_part_info(parts, distributors, currency):
""" Fill-in part information for locally-sourced parts not handled by Octopart. """
solved = set()
# Loop through the parts looking for those sourced by local distributors
# that won't be found online. Place any user-added info for these parts
# (such as pricing) into the part dictionary.
for p in parts:
# Find the manufacturer's part number if it exists.
pn = p.fields.get('manf#') # Returns None if no manf# field.
# Now look for catalog number, price list and webpage link for this part.
for dist in distributors:
cat_num = p.fields.get(dist + ':cat#')
pricing = p.fields.get(dist + ':pricing')
link = p.fields.get(dist + ':link')
avail = p.fields.get(dist + ':avail')
if cat_num is None and pricing is None and link is None:
continue
cat_num = cat_num or pn or make_unique_catalog_number(p, dist)
p.fields[dist + ':cat#'] = cat_num # Store generated cat#.
# Get the DistData for this distributor
dd = p.dd.get(dist, DistData())
dd.part_num = cat_num
if link:
url_parts = list(urlsplit(link))
if url_parts[0] == '':
url_parts[0] = u'http'
link = urlunsplit(url_parts)
else:
# This happens when no part URL is found.
debug_obsessive('No part URL found for local \'{}\' distributor!'.format(dist))
dd.url = link
price_tiers = {}
try:
local_currency = re.findall('[a-zA-Z]{3}', pricing)[0].upper()
except Exception:
local_currency = currency
old_pricing = pricing
pricing = re.sub('[^0-9.;:]', '', pricing) # Keep only digits, decimals, delimiters.
for qty_price in pricing.split(';'):
splitted = qty_price.split(SEPRTR)
if len(splitted) == 2:
qty, price = splitted
if local_currency:
dd.currency = local_currency
try:
price_tiers[int(qty)] = float(price)
except ValueError:
warning(W_BADPRICE, 'Malformed pricing number: `{}` at {}'.format(old_pricing, p.refs))
else:
warning(W_BADPRICE, 'Malformed pricing entry: `{}` at {}'.format(qty_price, p.refs))
# dd.moq = min(price_tiers.keys())
if not price_tiers:
# This happens when no pricing info is found.
debug_obsessive('No pricing information found for local \'{}\' distributor!'.format(dist))
dd.price_tiers = price_tiers
# Availability
if avail is not None:
dd.qty_avail = avail
# Update the DistData for this distributor
p.dd[dist] = dd
# We have data for this distributor. Avoid marking normal distributors.
if dist in dist_local_template.api_distributors:
solved.add(dist)
return solved
distributor_class.register(dist_local_template, 100)
|
{
"content_hash": "5566f9cdaa3c5aa70ffe6f5c042b479d",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 129,
"avg_line_length": 43.43312101910828,
"alnum_prop": 0.5590262501833113,
"repo_name": "hildogjr/KiCost",
"id": "ab7df11e63320254ddc20da0465e1afded746f6a",
"size": "8010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kicost/distributors/dist_local_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1112"
},
{
"name": "HTML",
"bytes": "595"
},
{
"name": "Makefile",
"bytes": "2836"
},
{
"name": "Python",
"bytes": "554207"
},
{
"name": "Shell",
"bytes": "11027"
}
],
"symlink_target": ""
}
|
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=[],
pbr=False)
|
{
"content_hash": "ece2eab58f4d0d0bbda24a18f386ea84",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.7315634218289085,
"repo_name": "galthaus/setup-kubernetes",
"id": "43c5ca1b4969930cb18b03c78528e2e99ec147d2",
"size": "1019",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "contrib/inventory_builder/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "3088"
},
{
"name": "HCL",
"bytes": "25635"
},
{
"name": "Python",
"bytes": "1570796"
},
{
"name": "Shell",
"bytes": "51465"
},
{
"name": "Smarty",
"bytes": "328"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="layout.ternary.baxis", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "177c29694a3124b9b605e3eff70d1964",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 33,
"alnum_prop": 0.5930735930735931,
"repo_name": "plotly/plotly.py",
"id": "be3d81e24e9c26f2cdbb0ea12da70e56af698def",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/ternary/baxis/_ticklen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_threat_indicator
OBJECT = {
"name": "My_Indicator",
"observables": [
{
"name": "My_Observable",
"mail-to": "someone@somewhere.com",
"confidence": "medium",
"severity": "low",
"product": "AV"
}
],
"action": "Inactive",
"profile_overrides": [
{
"profile": "My_Profile",
"action": "detect"
}
],
"ignore_warnings": True
}
CREATE_PAYLOAD = {
"name": "My_Indicator",
"observables": [
{
"name": "My_Observable",
"mail-to": "someone@somewhere.com",
"confidence": "medium",
"severity": "low",
"product": "AV"
}
],
"action": "Inactive",
"profile_overrides": [
{
"profile": "My_Profile",
"action": "detect"
}
],
"ignore_warnings": True
}
UPDATE_PAYLOAD = {
"name": "My_Indicator",
"action": "Inactive",
"ignore_warnings": True
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "My_Indicator",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_threat_indicator.api_call'
api_call_object = 'threat-indicator'
class TestCheckpointThreatIndicator(object):
module = cp_mgmt_threat_indicator
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
{
"content_hash": "c10b02df9204b2e74866259bc8ff81d1",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 109,
"avg_line_length": 30.92248062015504,
"alnum_prop": 0.6237152168463274,
"repo_name": "thaim/ansible",
"id": "6cb952f4999ec0bbd718143c887a02c41f439687",
"size": "4669",
"binary": false,
"copies": "18",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/check_point/test_cp_mgmt_threat_indicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
ConvertCSV
@author: phoexer
This is a lovely little script that converts my stanchart account statement to
a YNAB compatible format.
"""
#%reset -f
import os
import sys
import argparse
import csv
import pandas as pd
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class AccountImport(object):
account_name = 'Default Name'
table = pd.DataFrame(columns = ("account","date","payee","memo","outflow","inflow"))
ind = 1
def read_csv(self, filepath):
"""Reads a book and returns string"""
s = []
with open (filepath, "r") as f:
reader = csv.reader(f)
for row in reader:
if(len(row) > 0):
s.append(row)
return s
def process_header(self, lst):
if lst[0] == "Account Name":
self.account_name = lst[1]
def process_lines(self, lines):
for line in lines:
l = len(line)
if(l == 2):
self.process_header(line)
elif (l > 5) and (line[0] != "Date"):
memo = line[1].strip()
self.table.loc[self.ind] = self.account_name,line[0].strip(),"",memo,line[4].strip(),line[3].strip()
self.ind += 1
def runImport(self, filename_in,filename_out):
data = self.read_csv(filename_in)
self.process_lines(data)
logging.debug("Head")
logging.debug(self.table.head())
self.table.to_csv(filename_out, sep=',')
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file, CSV from Standchart please.")
parser.add_argument('outfile', help="Output file, name of output file, if it exists it will be overwritten, you were warned")
args = parser.parse_args(arguments)
logging.debug("Lets get this party started.")
logging.debug("Input file:" + args.infile)
logging.debug("Output file:" + args.outfile)
ai = AccountImport()
ai.runImport(args.infile, args.outfile)
if __name__ == '__main__':
main(sys.argv[1:])
#sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "0e0657ad2feb3635c1b120d2ee57665f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 129,
"avg_line_length": 29.088607594936708,
"alnum_prop": 0.5870322019147084,
"repo_name": "phoexer/Kelly",
"id": "7f1a79982bad2c6f7bf9a72790e452c6c817f03d",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/convertCSV/convertCSV.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "307"
},
{
"name": "Python",
"bytes": "24604"
}
],
"symlink_target": ""
}
|
from bson.json_util import object_hook
import json
from mongodisco.mongo_util import get_collection
'''
File: mongodb_input.py
Description:
'''
def _open(input_description, task=None):
"""Return a :class:`~mongodisco.mongodb_input.MongoWrapper`
which wraps a cursor selecting just those documents relevant
to one particular map operation. `input_description` is
a JSON string describing the documents to find, and looks like::
{ "inputURI": "mongodb://discomaster.zeroclues.net:27017/test.twitter",
"keyField": null,
"query": {
"$query": {},
"$min": {"_id": {"$oid": "4fae7a97fa22c41aeb5d78f8"}},
"$max": {"_id": {"$oid": "4fae7b27fa22c41aeb5d96b5"}}},
"fields": null,
"sort": null,
"limit": 0,
"skip": 0,
"timeout": false }
"""
parsed = json.loads(input_description, object_hook=object_hook)
collection = get_collection(parsed['inputURI'])
return MongoWrapper(collection.find(
spec=parsed['query'],
fields=parsed['fields'],
skip=parsed['skip'],
limit=parsed['limit'],
sort=parsed['sort'],
timeout=parsed['timeout'],
slave_okay=parsed['slave_ok']
))
class MongoWrapper(object):
"""Want to wrap the cursor in an object that
supports the following operations: """
def __init__(self, cursor):
self.cursor = cursor
self.offset = 0
def __iter__(self):
#most important method
return self.cursor
def __len__(self):
#may need to do this more dynamically (see lib/disco/comm.py ln 163)
#may want to cache this
return self.cursor.count()
def close(self):
self.cursor.close()
def read(self, size=-1):
#raise a non-implemented error to see if this ever pops up
raise Exception("read is not implemented- investigate why this was called")
def input_stream(stream, size, url, params):
# This looks like a mistake, but it is intentional.
# Due to the way that Disco imports and uses this
# function, we must re-import the module here.
from mongodisco.mongodb_input import _open
return _open(url)
|
{
"content_hash": "88fc3cb67e1a363fed6fca58fc58dac2",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 83,
"avg_line_length": 30.684931506849313,
"alnum_prop": 0.615625,
"repo_name": "johntut/MongoDisco",
"id": "b8f0faf7b30a137dcdc6ac429ba19d7d355af839",
"size": "2834",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mongodisco/mongodb_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62930"
}
],
"symlink_target": ""
}
|
import simplegui
import random
# helper function to initialize globals
def new_game():
global cards, exposed, state, counter
exposed = []
state = 0
counter = 0
cards = range(8)
cards.extend(range(8))
random.shuffle(cards)
for card in cards:
exposed.append(False)
# define event handlers
def mouseclick(pos):
# add game state logic here
global state, prev_index, cur_index, counter
card_index = pos[0]/50
if exposed[card_index] == False:
exposed[card_index] = True
if state == 0:
state = 1
prev_index = card_index
elif state == 1:
state = 2
cur_index = card_index
counter += 1
label.set_text("Turns = " + str(counter))
else: #state == 2
state = 1
if cards[cur_index] != cards[prev_index]:
exposed[cur_index] = False
exposed[prev_index] = False
prev_index = card_index
# cards are logically 50x100 pixels in size
def draw(canvas):
start_point = 0
for i in range(len(cards)):
if exposed[i] == True:
canvas.draw_polygon([[start_point,0],[start_point+50,0],[start_point+50,100],[start_point,100]],1,'Red','Black')
canvas.draw_text(str(cards[i]),[start_point+18,60],30,'White')
else:
canvas.draw_polygon([[start_point,0],[start_point+50,0],[start_point+50,100],[start_point,100]],1,'Red','Green')
start_point += 50
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
# Always remember to review the grading rubric
|
{
"content_hash": "6b92553a0943f88b639c2b99e2f9c0a9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 124,
"avg_line_length": 29.71875,
"alnum_prop": 0.594111461619348,
"repo_name": "anyaelise/interactive-python",
"id": "ce1e71c578e5b7d4f21e90fab0639109e18bad81",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "P5 - MEMORY/memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37672"
}
],
"symlink_target": ""
}
|
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import warnings
import mne
from mne import compute_covariance
from mne.datasets import testing
from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
from mne.beamformer._lcmv import _lcmv_source_power
from mne.externals.six import advance_iterator
from mne.utils import run_tests_if_main, slow_test
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
label = 'Aud-lh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
warnings.simplefilter('always') # enable b/c these tests throw warnings
def read_forward_solution_meg(*args, **kwargs):
fwd = mne.read_forward_solution(*args, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
epochs_preload=True, data_cov=True):
"""Read in data used in tests
"""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)
raw = mne.io.Raw(fname_raw, preload=True)
forward = mne.read_forward_solution(fname_fwd)
if all_forward:
forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
if epochs:
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, ref_meg=False, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0),
preload=epochs_preload,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
if epochs_preload:
epochs.resample(200, npad=0, n_jobs=2)
evoked = epochs.average()
info = evoked.info
else:
epochs = None
evoked = None
info = raw.info
noise_cov = mne.read_cov(fname_cov)
noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
eeg=0.1, proj=True)
if data_cov:
with warnings.catch_warnings(record=True):
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
else:
data_cov = None
return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@slow_test
@testing.requires_testing_data
def test_lcmv():
"""Test LCMV with evoked data and single trials
"""
raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
for fwd in [forward, forward_vol]:
stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
stc.crop(0.02, None)
stc_pow = np.sum(stc.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.09 < tmax < 0.105, tmax)
assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))
if fwd is forward:
# Test picking normal orientation (surface source space only)
stc_normal = lcmv(evoked, forward_surf_ori, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
stc_normal.crop(0.02, None)
stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_normal.data[idx]
tmax = stc_normal.times[np.argmax(max_stc)]
assert_true(0.04 < tmax < 0.11, tmax)
assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))
# The amplitude of normal orientation results should always be
# smaller than free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# Test picking source orientation maximizing output source power
stc_max_power = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01,
pick_ori="max-power")
stc_max_power.crop(0.02, None)
stc_pow = np.sum(stc_max_power.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_max_power.data[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.09 < tmax < 0.11, tmax)
assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))
# Maximum output source power orientation results should be similar to
# free orientation results
assert_true((stc_max_power.data - stc.data < 1).all())
# Test if fixed forward operator is detected when picking normal or
# max-power orientation
assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
reg=0.01, pick_ori="max-power")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, lcmv, evoked, forward, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, lcmv, evoked, forward_vol, noise_cov, data_cov,
reg=0.01, pick_ori="normal")
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01,
return_generator=True)
assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
epochs.drop_bad_epochs()
assert_true(len(epochs.events) == len(stcs))
# average the single trial estimates
stc_avg = np.zeros_like(stcs[0].data)
for this_stc in stcs:
stc_avg += this_stc.data
stc_avg /= len(stcs)
# compare it to the solution using evoked with fixed orientation
stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
assert_array_almost_equal(stc_avg, stc_fixed.data)
# use a label so we have few source vertices and delayed computation is
# not used
stcs_label = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,
reg=0.01, label=label)
assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
@testing.requires_testing_data
def test_lcmv_raw():
"""Test LCMV with raw data
"""
raw, _, _, _, noise_cov, label, forward, _, _, _ =\
_get_data(all_forward=False, epochs=False, data_cov=False)
tmin, tmax = 0, 20
start, stop = raw.time_as_index([tmin, tmax])
# use only the left-temporal MEG channels for LCMV
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, exclude='bads',
selection=left_temporal_channels)
data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)
stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label,
start=start, stop=stop, picks=picks)
assert_array_almost_equal(np.array([tmin, tmax]),
np.array([stc.times[0], stc.times[-1]]),
decimal=2)
# make sure we get an stc with vertices only in the lh
vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]
assert_true(len(stc.vertices[0]) == len(np.intersect1d(vertno[0],
label.vertices)))
assert_true(len(stc.vertices[1]) == 0)
@testing.requires_testing_data
def test_lcmv_source_power():
"""Test LCMV source power computation
"""
raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
stc_source_power = _lcmv_source_power(epochs.info, forward, noise_cov,
data_cov, label=label)
max_source_idx = np.argmax(stc_source_power.data)
max_source_power = np.max(stc_source_power.data)
assert_true(max_source_idx == 0, max_source_idx)
assert_true(0.4 < max_source_power < 2.4, max_source_power)
# Test picking normal orientation and using a list of CSD matrices
stc_normal = _lcmv_source_power(epochs.info, forward_surf_ori, noise_cov,
data_cov, pick_ori="normal", label=label)
# The normal orientation results should always be smaller than free
# orientation results
assert_true((np.abs(stc_normal.data[:, 0]) <=
stc_source_power.data[:, 0]).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, _lcmv_source_power, raw.info, forward_fixed,
noise_cov, data_cov, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, _lcmv_source_power, raw.info, forward, noise_cov,
data_cov, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, _lcmv_source_power, epochs.info, forward_vol,
noise_cov, data_cov, pick_ori="normal")
@testing.requires_testing_data
def test_tf_lcmv():
"""Test TF beamforming based on LCMV
"""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)
raw = mne.io.Raw(fname_raw, preload=True)
forward = mne.read_forward_solution(fname_fwd)
event_id, tmin, tmax = 1, -0.2, 0.2
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False,
stim=True, eog=True, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=False,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
epochs.drop_bad_epochs()
freq_bins = [(4, 12), (15, 40)]
time_windows = [(-0.1, 0.1), (0.0, 0.2)]
win_lengths = [0.2, 0.2]
tstep = 0.1
reg = 0.05
source_power = []
noise_covs = []
for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
raw_band = raw.copy()
raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
epochs_band = mne.Epochs(raw_band, epochs.events, epochs.event_id,
tmin=tmin, tmax=tmax, baseline=None,
proj=True, picks=picks)
with warnings.catch_warnings(record=True): # not enough samples
noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin +
win_length)
noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=reg,
grad=reg, eeg=reg, proj=True)
noise_covs.append(noise_cov)
del raw_band # to save memory
# Manually calculating source power in on frequency band and several
# time windows to compare to tf_lcmv results and test overlapping
if (l_freq, h_freq) == freq_bins[0]:
for time_window in time_windows:
with warnings.catch_warnings(record=True):
data_cov = compute_covariance(epochs_band,
tmin=time_window[0],
tmax=time_window[1])
stc_source_power = _lcmv_source_power(epochs.info, forward,
noise_cov, data_cov,
reg=reg, label=label)
source_power.append(stc_source_power.data)
with warnings.catch_warnings(record=True):
stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep,
win_lengths, freq_bins, reg=reg, label=label)
assert_true(len(stcs) == len(freq_bins))
assert_true(stcs[0].shape[1] == 4)
# Averaging all time windows that overlap the time period 0 to 100 ms
source_power = np.mean(source_power, axis=0)
# Selecting the first frequency bin in tf_lcmv results
stc = stcs[0]
# Comparing tf_lcmv results with _lcmv_source_power results
assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])
# Test if using unsupported max-power orientation is detected
assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
tstep, win_lengths, freq_bins=freq_bins,
pick_ori='max-power')
# Test if incorrect number of noise CSDs is detected
# Test if incorrect number of noise covariances is detected
assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
tmax, tstep, win_lengths, freq_bins)
# Test if freq_bins and win_lengths incompatibility is detected
assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)
# Test if time step exceeding window lengths is detected
assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)
# Test correct detection of preloaded epochs objects that do not contain
# the underlying raw object
epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0), preload=True)
with warnings.catch_warnings(record=True): # not enough samples
assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward,
noise_covs, tmin, tmax, tstep, win_lengths, freq_bins)
with warnings.catch_warnings(record=True): # not enough samples
# Pass only one epoch to test if subtracting evoked
# responses yields zeros
stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep,
win_lengths, freq_bins, subtract_evoked=True, reg=reg,
label=label)
assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
run_tests_if_main()
|
{
"content_hash": "01e9603ee21efeb939e875c926c33506",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 79,
"avg_line_length": 41.84391534391534,
"alnum_prop": 0.6046026427261807,
"repo_name": "yousrabk/mne-python",
"id": "d92c60a111d198c7d7599f37e231b1b58f80d992",
"size": "15817",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mne/beamformer/tests/test_lcmv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3171"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4489354"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s02_1.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "4af3bd573ad882712f4f86e450a712f5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 94,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.7055214723926381,
"repo_name": "anhstudios/swganh",
"id": "ea53693eb123975a42ceac89d08ffc219a7af44a",
"size": "471",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/attachment/weapon/shared_xwing_weapon1_pos_s02_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
include NATLayer_rpc.repy
# This test connects a server to a forwarder and uses waitforconn
# Then it is tested to make sure it works properly with 2 clients.
# However, after the first client a stopcomm is issued, so the 2nd client should not be able to connect
# Then numbers 1-50 are exchanged
# There is no expected output
serverMac = "FFFFFFFFFFFE"
# The test will be forced to exit after this many seconds
# This is necessary since client 3 is expected to block indefinately
TIME_LIMIT = 30
def new_client(remoteip, remoteport, socketlikeobj, commhandle, thisnatcon):
# Increment the client connected count
if remoteip == getmyip():
num = 1
while True:
# Check the client message
mesg = socketlikeobj.recv(1024)
if int(mesg) != num:
raise Exception, "Unexpected Message! Expected: " + str(num) + " Received: " + mesg
else:
num = num + 1
# Send a number to the server
socketlikeobj.send(str(num))
# Expect a larger response
num = num + 1
# If we are expecting 51, break
if num == 51:
sleep(1)
socketlikeobj.close()
break
else:
raise Exception, "Unexpected client connected! "+remoteip
# Bounced messages back and forth to server, stops when reaches "stop"
def client_message(socket, stop=50):
num = 1
first = True
serverMesg = "0"
# Loop until the magic number
while True:
# Check the server response (after the first time)
if not first:
serverMesg = socket.recv(1024)
if int(serverMesg) != num:
raise Exception, "Unexpected Message! Expected: " + str(num) + " Received: " + serverMesg
else:
num = num + 1
# Break at stop
if int(serverMesg) == stop:
break
# Send a number to the server
socket.send(str(num))
# Expect a larger number
num = num + 1
# Break now
first = False
# Close the socket
socket.close()
# This is called if the test is taking too long
def long_execution():
print "Execution exceeded "+str(TIME_LIMIT)+" seconds!"
exitall()
if callfunc == "initialize":
# Create server connection, force local forwarder
whandle = nat_waitforconn(serverMac, 10000, new_client, getmyip(), 12345, 12345)
# Setup client sockets, force use of local forwarder for the tests
clientsock1 = nat_openconn(serverMac, 10000, forwarderIP=getmyip(),forwarderPort=12345)
# Setup timer to kill us if we exceed our time limit
handle = settimer(TIME_LIMIT, long_execution, ())
# Try to connect client 1+2
client_message(clientsock1, 50)
# do the stopcomm
nat_stopcomm(whandle)
# try a new connection
try:
clientsock2 = nat_openconn(serverMac, 10000, forwarderIP=getmyip(),forwarderPort=12345)
except:
canceltimer(handle)
clientsock1.close()
else:
clientsock2.close()
print 'ERROR: client was able to connect after stopcomm'
exitall()
|
{
"content_hash": "b97a8c5901a8a5d98e45860e1b8a37c7",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 103,
"avg_line_length": 26.614035087719298,
"alnum_prop": 0.6499670402109426,
"repo_name": "sburnett/seattle",
"id": "b49f814012c4dcceb5217bb0494d30c005a95d46",
"size": "3034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "natlayer/tests/test_natlayer_rpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85039"
},
{
"name": "CSS",
"bytes": "44140"
},
{
"name": "Java",
"bytes": "178864"
},
{
"name": "JavaScript",
"bytes": "791008"
},
{
"name": "Perl",
"bytes": "36791"
},
{
"name": "Python",
"bytes": "4683648"
},
{
"name": "Scala",
"bytes": "2587"
},
{
"name": "Shell",
"bytes": "87609"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.actions import do_rename_stream
from zerver.lib.management import ZulipBaseCommand
from zerver.models import get_stream
class Command(ZulipBaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new name>', type=str,
help='new name to rename the stream to')
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
old_name = options['old_name']
new_name = options['new_name']
stream = get_stream(old_name, realm)
do_rename_stream(stream, new_name, self.user_profile)
|
{
"content_hash": "3032325f143dc77a1e67476ea3112122",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 39.03846153846154,
"alnum_prop": 0.645320197044335,
"repo_name": "shubhamdhama/zulip",
"id": "798de1c3c0fa2f032366bd29635b2b25333b3218",
"size": "1015",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zerver/management/commands/rename_stream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400387"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "721395"
},
{
"name": "JavaScript",
"bytes": "3095896"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71124"
},
{
"name": "Python",
"bytes": "6896725"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.admin.widgets import AdminFileWidget
from django.forms import (
HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput,
PasswordInput
)
from django.forms.widgets import CheckboxInput
from .bootstrap import (
get_bootstrap_setting, get_form_renderer, get_field_renderer,
get_formset_renderer
)
from .text import text_concat, text_value
from .exceptions import BootstrapError
from .utils import add_css_class, render_tag, split_css_classes
from .components import render_icon
FORM_GROUP_CLASS = 'form-group'
def render_formset(formset, **kwargs):
"""
Render a formset to a Bootstrap layout
"""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render()
def render_formset_errors(formset, **kwargs):
"""
Render formset errors to a Bootstrap layout
"""
renderer_cls = get_formset_renderer(**kwargs)
return renderer_cls(formset, **kwargs).render_errors()
def render_form(form, **kwargs):
"""
Render a form to a Bootstrap layout
"""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render()
def render_form_errors(form, type='all', **kwargs):
"""
Render form errors to a Bootstrap layout
"""
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render_errors(type)
def render_field(field, **kwargs):
"""
Render a field to a Bootstrap layout
"""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render()
def render_label(content, label_for=None, label_class=None, label_title=''):
"""
Render a label with content
"""
attrs = {}
if label_for:
attrs['for'] = label_for
if label_class:
attrs['class'] = label_class
if label_title:
attrs['title'] = label_title
return render_tag('label', attrs=attrs, content=content)
def render_button(
content, button_type=None, icon=None, button_class='', size='',
href='', name=None, value=None):
"""
Render a button with content
"""
attrs = {}
classes = add_css_class('btn', button_class)
size = text_value(size).lower().strip()
if size == 'xs':
classes = add_css_class(classes, 'btn-xs')
elif size == 'sm' or size == 'small':
classes = add_css_class(classes, 'btn-sm')
elif size == 'lg' or size == 'large':
classes = add_css_class(classes, 'btn-lg')
elif size == 'md' or size == 'medium':
pass
elif size:
raise BootstrapError(
'Parameter "size" should be "xs", "sm", "lg" or ' +
'empty ("{}" given).'.format(size))
if button_type:
if button_type == 'submit':
if not any([c.startswith('btn-') for c in split_css_classes(classes)]):
classes = add_css_class(classes, 'btn-primary')
elif button_type not in ('reset', 'button', 'link'):
raise BootstrapError(
'Parameter "button_type" should be "submit", "reset", ' +
'"button", "link" or empty ("{}" given).'.format(button_type))
attrs['type'] = button_type
attrs['class'] = classes
icon_content = render_icon(icon) if icon else ''
if href:
attrs['href'] = href
tag = 'a'
else:
tag = 'button'
if name:
attrs['name'] = name
if value:
attrs['value'] = value
return render_tag(
tag, attrs=attrs, content=text_concat(
icon_content, content, separator=' '))
def render_field_and_label(
field, label, field_class='', label_for=None, label_class='',
layout='', **kwargs):
"""
Render a field with its label
"""
if layout == 'horizontal':
if not label_class:
label_class = get_bootstrap_setting('horizontal_label_class')
if not field_class:
field_class = get_bootstrap_setting('horizontal_field_class')
if not label:
label = ' '
label_class = add_css_class(label_class, 'control-label')
html = field
if field_class:
html = '<div class="{klass}">{html}</div>'.format(
klass=field_class, html=html)
if label:
html = render_label(
label, label_for=label_for, label_class=label_class) + html
return html
def render_form_group(content, css_class=FORM_GROUP_CLASS):
"""
Render a Bootstrap form group
"""
return '<div class="{klass}">{content}</div>'.format(
klass=css_class,
content=content,
)
def is_widget_required_attribute(widget):
"""
Is this widget required?
"""
if not get_bootstrap_setting('set_required'):
return False
if not widget.is_required:
return False
if isinstance(
widget, (
AdminFileWidget, HiddenInput, FileInput,
CheckboxInput, CheckboxSelectMultiple)):
return False
return True
def is_widget_with_placeholder(widget):
"""
Is this a widget that should have a placeholder?
Only text, search, url, tel, e-mail, password, number have placeholders
These are all derived form TextInput, except for Textarea
"""
# PasswordInput inherits from Input in Django 1.4.
# It was changed to inherit from TextInput in 1.5.
return isinstance(widget, (TextInput, Textarea, PasswordInput))
|
{
"content_hash": "2138ea8b34896f2321f46f2d5decfa53",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 83,
"avg_line_length": 30.29281767955801,
"alnum_prop": 0.614079883275579,
"repo_name": "py-geek/City-Air",
"id": "989de25849cf3b4d958f6414a8608745eb7b9aa7",
"size": "5507",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/bootstrap3/forms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44421"
},
{
"name": "HTML",
"bytes": "124920"
},
{
"name": "JavaScript",
"bytes": "101247"
},
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "8742149"
},
{
"name": "Shell",
"bytes": "3801"
}
],
"symlink_target": ""
}
|
import argparse
import json
from pprint import pprint, pformat
import requests
def print_response(response):
pad = '==========================='
dpad = pad.replace('=', '-')
request = response.request
print("\n#===================================================" + pad)
print("# %s %s %s" % (response.status_code,
request.method, request.url))
if request.body:
print("#----------[ Request ]------------------------------" + dpad)
pprint(json.loads(request.body))
print("#----------[ Response: %s ]------------------------" %
response.status_code + dpad)
try:
print("JSON: %s" % pformat(response.json()))
except json.decoder.JSONDecodeError:
print("Text: %s" % pformat(response.text))
print("Headers: %s" % pformat(response.headers))
def get_color(color_name):
color_lookup = {
"green": "#36a64f",
"red": "#e6364f",
"yellow": "#efef4f",
}
if color_name in color_lookup:
return color_lookup[color_name]
else:
return color_lookup['green']
def run(channels, icon_url, text, title, link, color, hooks_url, username):
for channel in channels.split(','):
body = {
'channel': channel,
'username': username,
'icon_url': icon_url,
"attachments": [
{
"fallback": title + text,
"color": get_color(color),
"title": title,
"title_link": link,
"text": text,
}
]
}
headers = {'Content-Type': 'application/json'}
response = requests.post(hooks_url, data=json.dumps(body),
headers=headers)
print_response(response)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--channels', default='empi',
help='Name of the channel(s) to post notification to '
'(comma separated)')
parser.add_argument('--hooks-url', required=True,
help='The url for sending slack HTTP POST requests')
parser.add_argument('--icon_url',
default='https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcQ'
'54WMELxKASbPBiB4qhsNW2LgvlP2JA1DB_IMY-wK83pESM-o5',
help='A url to an image to use for the user')
parser.add_argument('--text', required=True,
help='Contents of message to post to slack channel')
parser.add_argument('--title', required=True,
help='Title of message to post to slack channel')
parser.add_argument('--link', required=True,
help='Link in message to post to slack channel')
parser.add_argument('--color', default='green',
help='Color of message to post to slack channel')
parser.add_argument('--username', default='CI',
help='Name of the user that shows up as having posted the message')
return parser.parse_args()
if __name__ == '__main__':
arguments = parse_args()
run(**vars(arguments))
|
{
"content_hash": "ba46dd5deb810ea011f837e8d12ee684",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 35.96511627906977,
"alnum_prop": 0.5386356288393146,
"repo_name": "davidlmorton/webapp-ci",
"id": "ad01414a31eee780214d55c18f49eb6e29d0efc3",
"size": "3116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notify-slack.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11576"
},
{
"name": "Shell",
"bytes": "6402"
}
],
"symlink_target": ""
}
|
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
"""Check that recommended disk space is available before a first-time install."""
name = "disk_availability"
tags = ["preflight"]
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
"masters": 40 * 10**9,
"nodes": 15 * 10**9,
"etcd": 20 * 10**9,
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have recommended disk space requirements."""
group_names = get_var(task_vars, "group_names", default=[])
has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes))
return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
ansible_mounts = get_var(task_vars, "ansible_mounts")
free_bytes = self.openshift_available_disk(ansible_mounts)
recommended_min = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
configured_min = int(get_var(task_vars, "openshift_check_min_host_disk_gb", default=0)) * 10**9
min_free_bytes = configured_min or recommended_min
if free_bytes < min_free_bytes:
return {
'failed': True,
'msg': (
'Available disk space ({:.1f} GB) for the volume containing '
'"/var" is below minimum recommended space ({:.1f} GB)'
).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9)
}
return {}
@staticmethod
def openshift_available_disk(ansible_mounts):
"""Determine the available disk space for an OpenShift installation.
ansible_mounts should be a list of dicts like the 'setup' Ansible module
returns.
"""
# priority list in descending order
supported_mnt_paths = ["/var", "/"]
available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
try:
for path in supported_mnt_paths:
if path in available_mnts:
return available_mnts[path]["size_available"]
except KeyError:
pass
paths = ''.join(sorted(available_mnts)) or 'none'
msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths)
raise OpenShiftCheckException(msg)
|
{
"content_hash": "0c82d0ffabd562ceaab0c01cd14e5bcb",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 109,
"avg_line_length": 41.74242424242424,
"alnum_prop": 0.6323049001814882,
"repo_name": "DG-i/openshift-ansible",
"id": "962148cb8b77546e4833aa1baeda1ed4dd857790",
"size": "2791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "roles/openshift_health_checker/openshift_checks/disk_availability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4999"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "Python",
"bytes": "3197455"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "80962"
}
],
"symlink_target": ""
}
|
from concurrent import futures
import grpc
import pytest
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.grpc_check import GrpcCheck
def create_insecure_grpc_server(expected_status=health_pb2.HealthCheckResponse.SERVING):
grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
health_servicer = health.HealthServicer()
health_servicer.set("grpc.test", expected_status)
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, grpc_server)
grpc_server.add_insecure_port("localhost:50051")
return grpc_server
def create_secure_grpc_server(expected_status=health_pb2.HealthCheckResponse.SERVING):
grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
health_servicer = health.HealthServicer()
health_servicer.set("grpc.test", expected_status)
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, grpc_server)
ca_cert = open("tests/fixtures/ca.pem", "rb").read()
private_key = open("tests/fixtures/server-key.pem", "rb").read()
certificate_chain = open("tests/fixtures/server.pem", "rb").read()
credentials = grpc.ssl_server_credentials(
[(private_key, certificate_chain)],
root_certificates=ca_cert,
require_client_auth=True,
)
grpc_server.add_secure_port("localhost:50052", credentials)
return grpc_server
def test_insecure_server_is_serving(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50051",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server()
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50051",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.OK,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_insecure_server_is_not_serving(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50051",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server(health_pb2.HealthCheckResponse.NOT_SERVING)
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50051",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_insecure_server_is_unknown(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50051",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server(health_pb2.HealthCheckResponse.UNKNOWN)
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50051",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_unavailable(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:80",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server()
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:80",
"status_code:UNAVAILABLE",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_timeout(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50051",
"timeout": 0.00001,
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server()
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50051",
"status_code:DEADLINE_EXCEEDED",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_not_found(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50051",
"grpc_server_service": "not_found",
"tags": ["tag_key1:value1", "tag_key2:value2"],
}
grpc_server = create_insecure_grpc_server()
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:not_found",
"grpc_server_address:localhost:50051",
"status_code:NOT_FOUND",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_secure_server_is_serving(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
"ca_cert": "tests/fixtures/ca.pem",
"client_cert": "tests/fixtures/client.pem",
"client_key": "tests/fixtures/client-key.pem",
}
grpc_server = create_secure_grpc_server()
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50052",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.OK,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_secure_server_is_not_serving(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
"ca_cert": "tests/fixtures/ca.pem",
"client_cert": "tests/fixtures/client.pem",
"client_key": "tests/fixtures/client-key.pem",
}
grpc_server = create_secure_grpc_server(health_pb2.HealthCheckResponse.NOT_SERVING)
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50052",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_secure_server_is_unknown(dd_run_check, aggregator):
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
"ca_cert": "tests/fixtures/ca.pem",
"client_cert": "tests/fixtures/client.pem",
"client_key": "tests/fixtures/client-key.pem",
}
grpc_server = create_secure_grpc_server(health_pb2.HealthCheckResponse.UNKNOWN)
grpc_server.start()
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
grpc_server.stop(None)
expected_tags = [
"grpc_server_service:grpc.test",
"grpc_server_address:localhost:50052",
"status_code:OK",
"tag_key1:value1",
"tag_key2:value2",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.CRITICAL,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_ca_cert_missing():
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
# missing ca_cert
"client_cert": "tests/fixtures/client.pem",
"client_key": "tests/fixtures/client-key.pem",
}
with pytest.raises(
ConfigurationError,
match="^ca_cert, client_cert or client_key is missing$",
):
GrpcCheck("grpc_check", {}, [instance])
def test_client_cert_missing():
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
"ca_cert": "tests/fixtures/ca.pem",
# missing client_cert
"client_key": "tests/fixtures/client-key.pem",
}
with pytest.raises(
ConfigurationError,
match="^ca_cert, client_cert or client_key is missing$",
):
GrpcCheck("grpc_check", {}, [instance])
def test_client_key_missing():
instance = {
"grpc_server_address": "localhost:50052",
"grpc_server_service": "grpc.test",
"tags": ["tag_key1:value1", "tag_key2:value2"],
"ca_cert": "tests/fixtures/ca.pem",
"client_cert": "tests/fixtures/client.pem",
# missing client_key
}
with pytest.raises(
ConfigurationError,
match="^ca_cert, client_cert or client_key is missing$",
):
GrpcCheck("grpc_check", {}, [instance])
def test_empty_instance():
instance = {}
with pytest.raises(ConfigurationError, match="^grpc_server_address must be specified$"):
GrpcCheck("grpc_check", {}, [instance])
def test_timeout_zero():
instance = {"grpc_server_address": "localhost:50051", "timeout": 0}
with pytest.raises(ConfigurationError, match="^timeout must be greater than zero$"):
GrpcCheck("grpc_check", {}, [instance])
@pytest.mark.integration
@pytest.mark.usefixtures("dd_environment")
def test_check_integration(dd_run_check, aggregator, instance):
check = GrpcCheck("grpc_check", {}, [instance])
dd_run_check(check)
expected_tags = [
"grpc_server_service:",
"grpc_server_address:localhost:50051",
"status_code:OK",
]
aggregator.assert_metric(
"grpc_check.healthy",
value=1.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
"grpc_check.unhealthy",
value=0.0,
tags=expected_tags,
hostname="",
flush_first_value=False,
metric_type=aggregator.GAUGE,
)
aggregator.assert_service_check(
"grpc.healthy",
status=AgentCheck.OK,
tags=expected_tags,
count=1,
hostname="",
message="",
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
|
{
"content_hash": "8888b0d7bdcaa91b390b66a942ff38dd",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 92,
"avg_line_length": 29.064846416382252,
"alnum_prop": 0.6067989666510099,
"repo_name": "DataDog/integrations-extras",
"id": "9c1bc410fbea1b2b7f7078753cf6488c9feb77e8",
"size": "17032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grpc_check/tests/test_grpc_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4265"
},
{
"name": "Go",
"bytes": "4119"
},
{
"name": "PHP",
"bytes": "3192"
},
{
"name": "Python",
"bytes": "1219552"
},
{
"name": "Ruby",
"bytes": "8005"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
}
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:11000")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
|
{
"content_hash": "bebee903bcffb31fdc24640e62bfb87c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 44.2,
"alnum_prop": 0.7601809954751131,
"repo_name": "gfneto/testcoin",
"id": "8b97813e89b473fd8b79e28ec4162cb8a1857d46",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletchangepass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "31208"
},
{
"name": "C++",
"bytes": "2496531"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18286"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "5101"
},
{
"name": "NSIS",
"bytes": "6100"
},
{
"name": "Objective-C",
"bytes": "858"
},
{
"name": "Objective-C++",
"bytes": "5711"
},
{
"name": "Python",
"bytes": "69715"
},
{
"name": "QMake",
"bytes": "14104"
},
{
"name": "Shell",
"bytes": "9737"
}
],
"symlink_target": ""
}
|
import unittest
import azure.mgmt.relay
from devtools_testutils import AzureMgmtRecordedTestCase, ResourceGroupPreparer, recorded_by_proxy
class TestMgmtRelay(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.relay_client = self.create_mgmt_client(
azure.mgmt.relay.RelayAPI
)
@recorded_by_proxy
def test_operations_list(self):
# Check the namespace availability
result = self.relay_client.operations.list()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9c3c69ee72684576c48f1a3c7bbb299a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 98,
"avg_line_length": 27.863636363636363,
"alnum_prop": 0.6068515497553018,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9ae17d3f2e13ce5f7ba74a9582ee88c9642f7ae7",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/relay/azure-mgmt-relay/tests/disable_test_azure_mgmt_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
virtstrap.commands.info
-----------------------
The 'info' command
"""
import os
import shutil
from virtstrap import commands
from virtstrap import constants
class InfoCommand(commands.ProjectCommand):
"""Displays information about the current project
This excludes VEfile and requirements.lock
"""
name = 'info'
description = 'Displays information about current project'
def run(self, project, options, **kwargs):
self.display_info(project)
def display_info(self, project):
self.logger.info('*******************%s INFO*******************\n' % project.name)
self.logger.info('Project Path: %s' % project.path())
self.logger.info('Project Environment Path: %s' % project.env_path())
self.logger.info('Project Bin Path: %s' % project.bin_path())
|
{
"content_hash": "fe4069d50e0df0a453e57340c030147b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 90,
"avg_line_length": 29.5,
"alnum_prop": 0.6319612590799032,
"repo_name": "ravenac95/virtstrap",
"id": "24068de19c9863ba505f5d316e5a4d1898b36117",
"size": "826",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "virtstrap-core/virtstrap/commands/info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "566172"
},
{
"name": "Ruby",
"bytes": "54"
},
{
"name": "Shell",
"bytes": "37428"
}
],
"symlink_target": ""
}
|
import re
from typing import Any, Dict, Optional
from django.http import HttpRequest
from django.views.debug import SafeExceptionReporterFilter
class ZulipExceptionReporterFilter(SafeExceptionReporterFilter):
def get_post_parameters(self, request: Optional[HttpRequest]) -> Dict[str, Any]:
post_data = SafeExceptionReporterFilter.get_post_parameters(self, request)
assert isinstance(post_data, dict)
filtered_post = post_data.copy()
filtered_vars = [
"content",
"secret",
"password",
"key",
"api-key",
"subject",
"stream",
"subscriptions",
"to",
"csrfmiddlewaretoken",
"api_key",
"realm_counts",
"installation_counts",
]
for var in filtered_vars:
if var in filtered_post:
filtered_post[var] = "**********"
return filtered_post
def clean_data_from_query_parameters(val: str) -> str:
return re.sub(r"([a-z_-]+=)([^&]+)([&]|$)", r"\1******\3", val)
|
{
"content_hash": "f15dbfc5a175229281cc50e344a652c3",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 84,
"avg_line_length": 30.555555555555557,
"alnum_prop": 0.5581818181818182,
"repo_name": "kou/zulip",
"id": "09d50e583e25a24fcfc6487ef4937b833db38b97",
"size": "1100",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "zerver/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433376"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "635452"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3361648"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79932"
},
{
"name": "Python",
"bytes": "8142846"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "134587"
},
{
"name": "TypeScript",
"bytes": "20233"
}
],
"symlink_target": ""
}
|
import os
import shutil
import subprocess
import sys
# Dictionary of settings that control java source compilation
CONFIG = {
'out' : 'bin',
'src' : [ 'src', 'test' ],
'libraries' : [
'third_party/junit4-4.11.jar',
'third_party/hamcrest-core-1.3.jar',
'third_party/gson-2.8.2.jar'
],
'separators' : {
'nt' : ';',
'posix' : ':'
}
}
# CLEAN
#
# Remove all files from the build output directory.
#
def clean(config) :
out = config['out']
for entry in [ os.path.join(out, name) for name in os.listdir(out) ] :
if os.path.isdir(entry) :
shutil.rmtree(entry)
else :
os.remove(entry)
print('Clean PASSED')
# BUILD
#
# Build the project defined by the config object. This will find all source
# files in the source directories, link all specified libraries, and write
# all output to the out directory.
#
def build(config) :
libraries = config['libraries']
out = config['out']
separator = config['separators'][os.name]
src = config['src']
# Find all the java source files in the given source directories.
# Non-java source files are ignored.
src_files = [ ]
for src_path in src :
for root, dirs, files in os.walk(src_path) :
src_files += [ os.path.join(root, file) for file in files if file.endswith('.java') ]
# Take everything so far and construct a single command to build the project.
command = [ ]
command += [ 'javac' ]
command += [ '-d', out ]
command += [ '-cp', separator.join([ out ] + libraries) ]
command += [ '-Xlint' ]
command += src_files
print('running : %s' % command)
print('Build %s' % ('PASSED' if subprocess.call(command) == 0 else 'FAILED'))
# RUN
#
# Run a class from within the project.
#
def run(config, start_class_path, arguments):
libraries = config['libraries']
out = config['out']
separator = config['separators'][os.name]
command = [ ]
command += [ 'java' ]
command += [ '-cp', separator.join([ out ] + libraries) ]
command += [ start_class_path ]
command += arguments
print 'Running: [',
for x in command :
print x,
print ']'
print('Run %s' % ('PASSED' if subprocess.call(command) == 0 else 'FAILED'))
# USAGE
#
# Print basic usage info.
#
def usage() :
print('Usage: python build.py clean | build | rebuild | run | help')
print(' clean : Remove all files in the output directory.')
print(' This does not remove the root of the output tree.')
print(' build : Build the full project. This will build all java files')
print(' found in all of the src directories.')
print(' rebuild : perform clean followed by build.')
print(' run <class path> [ arguments ... ] : Run the specified class.')
print(' All arguments after the class path will be passed to')
print(' the java class when it runs.')
print(' help : Print this helpful message.')
# MAIN
def main(args) :
if len(args) > 1 :
command = args[1]
if 'help' == command :
usage()
elif 'clean' == command :
clean(CONFIG)
elif 'build' == command :
build(CONFIG)
elif 'rebuild' == command :
clean(CONFIG)
build(CONFIG)
elif 'run' == command :
if len(args) > 2 :
java_class = args[2]
java_params = args[3:]
run(CONFIG, java_class, java_params)
else :
print('Run command requires a java class to run.')
usage();
else :
print 'Unknown command: [',
for x in args :
print x,
print ']'
usage();
else :
print ('No parameters provided.')
usage()
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "83f8b2ac716bda4638a41155adbda8f9",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 91,
"avg_line_length": 25.570422535211268,
"alnum_prop": 0.6053428807491049,
"repo_name": "lee-tammy/CodeU-Summer-2017",
"id": "0292b4cf48ab62b6fa24d6caf8d2486ec124fdbd",
"size": "5178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "269740"
},
{
"name": "Python",
"bytes": "5178"
}
],
"symlink_target": ""
}
|
"""Add RTPLaunchRecord table
Revision ID: bad90ab035ba
Revises: 77c082c87844
Create Date: 2021-03-08 19:16:44.611253+00:00
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "bad90ab035ba"
down_revision = "77c082c87844"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"rtp_launch_record",
sa.Column("obsid", sa.BigInteger(), nullable=False),
sa.Column("submitted_time", sa.BigInteger(), nullable=True),
sa.Column("rtp_attempts", sa.BigInteger(), nullable=False),
sa.Column("jd", sa.BigInteger(), nullable=False),
sa.Column("obs_tag", sa.String(length=128), nullable=False),
sa.Column("filename", sa.String(length=128), nullable=False),
sa.Column("prefix", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["obsid"],
["hera_obs.obsid"],
),
sa.PrimaryKeyConstraint("obsid"),
)
def downgrade():
op.drop_table("rtp_launch_record")
|
{
"content_hash": "447dc1c00dc3768cb5f6b21c6a7449d9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 69,
"avg_line_length": 28.736842105263158,
"alnum_prop": 0.6575091575091575,
"repo_name": "HERA-Team/hera_mc",
"id": "808ea86744da0958223a5746293ef4c67aa9f6e7",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "alembic/versions/bad90ab035ba_add_rtplaunchrecord_table.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1238267"
},
{
"name": "Shell",
"bytes": "458"
}
],
"symlink_target": ""
}
|
"""This example demonstrates the flow for retrieving a refresh token.
In order for this example to work your application's redirect URI must be set
to http://localhost:8080.
This tool can be used to conveniently create refresh tokens for later use with
your web application OAuth2 credentials.
"""
import praw
import random
import socket
import sys
def receive_connection():
"""Wait for and then return a connected socket..
Opens a TCP connection on port 8080, and waits for a single client.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('localhost', 8080))
server.listen(1)
client = server.accept()[0]
server.close()
return client
def send_message(client, message):
"""Send message to client and close the connection."""
print(message)
client.send('HTTP/1.1 200 OK\r\n\r\n{}'.format(message).encode('utf-8'))
client.close()
def main():
"""Provide the program's entry point when directly executed."""
if len(sys.argv) < 2:
print('Usage: {} SCOPE...'.format(sys.argv[0]))
return 1
reddit = praw.Reddit(client_id='YOUR_CLIENT_ID',
client_secret='YOUR_CLIENT_SECRET',
redirect_uri='http://localhost:8080',
user_agent='praw_refresh_token_example')
state = str(random.randint(0, 65000))
url = reddit.auth.url(sys.argv[1:], state, 'permanent')
print(url)
client = receive_connection()
data = client.recv(1024).decode('utf-8')
param_tokens = data.split(' ', 2)[1].split('?', 1)[1].split('&')
params = {key: value for (key, value) in [token.split('=')
for token in param_tokens]}
if state != params['state']:
send_message(client, 'State mismatch. Expected: {} Received: {}'
.format(state, params['state']))
return 1
elif 'error' in params:
send_message(client, params['error'])
return 1
refresh_token = reddit.auth.authorize(params['code'])
send_message(client, 'Refresh token: {}'.format(refresh_token))
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "282cfefe873124a2a130f55ffab7dd19",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 31.5,
"alnum_prop": 0.6199294532627866,
"repo_name": "RGood/praw",
"id": "fb12386da2f293ab56a67cf71e5fd20d19afe120",
"size": "2291",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/examples/obtain_refresh_token.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "323946"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
from pyqrllib.pyqrllib import bin2hstr
from qrl.core.State import State
from qrl.core.StateContainer import StateContainer
from qrl.core.misc import logger
from qrl.core.txs.Transaction import Transaction
from qrl.generated.qrl_pb2 import LatticePKMetadata
class LatticeTransaction(Transaction):
def __init__(self, protobuf_transaction=None):
super(LatticeTransaction, self).__init__(protobuf_transaction)
@property
def pk1(self): # kyber_pk
return self._data.latticePK.pk1
@property
def pk2(self): # dilithium_pk
return self._data.latticePK.pk2
@property
def pk3(self): # ecdsa_pk
return self._data.latticePK.pk3
def get_data_bytes(self):
return self.master_addr + \
self.fee.to_bytes(8, byteorder='big', signed=False) + \
self.pk1 + \
self.pk2 + \
self.pk3
@staticmethod
def create(pk1: bytes, pk2: bytes, pk3: bytes, fee: int, xmss_pk: bytes, master_addr: bytes = None):
transaction = LatticeTransaction()
if master_addr:
transaction._data.master_addr = master_addr
transaction._data.fee = fee
transaction._data.public_key = xmss_pk
transaction._data.latticePK.pk1 = bytes(pk1)
transaction._data.latticePK.pk2 = bytes(pk2)
transaction._data.latticePK.pk3 = bytes(pk3)
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self) -> bool:
if self.fee < 0:
logger.info('State validation failed for %s because: Negative send', bin2hstr(self.txhash))
return False
return True
def _validate_extended(self, state_container: StateContainer) -> bool:
if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
logger.warning("[LatticeTransaction] Hard Fork Feature not yet activated")
return False
dev_config = state_container.current_dev_config
if len(self.pk1) > dev_config.lattice_pk1_max_length: # TODO: to fix kyber pk value
logger.warning('Kyber PK length cannot be more than %s bytes', dev_config.lattice_pk1_max_length)
logger.warning('Found length %s', len(self.pk1))
return False
if len(self.pk2) > dev_config.lattice_pk2_max_length: # TODO: to fix dilithium pk value
logger.warning('Dilithium PK length cannot be more than %s bytes', dev_config.lattice_pk2_max_length)
logger.warning('Found length %s', len(self.pk2))
return False
if len(self.pk3) > dev_config.lattice_pk3_max_length: # TODO: to fix ecdsa pk value
logger.warning('ECDSA PK length cannot be more than %s bytes', dev_config.lattice_pk3_max_length)
logger.warning('Found length %s', len(self.pk3))
return False
tx_balance = state_container.addresses_state[self.addr_from].balance
if tx_balance < self.fee:
logger.info('State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, amount: %s', tx_balance, self.fee)
return False
if (self.addr_from, self.pk1, self.pk2, self.pk3) in state_container.lattice_pk.data:
logger.info('State validation failed for %s because: Lattice PKs already exists for this address',
bin2hstr(self.txhash))
return False
return True
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
def apply(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee, subtract=True)
state_container.paginated_lattice_pk.insert(address_state, self.txhash)
state_container.paginated_tx_hash.insert(address_state, self.txhash)
state_container.lattice_pk.data[(self.addr_from,
self.pk1, self.pk2, self.pk3)] = LatticePKMetadata(enabled=True)
return self._apply_state_changes_for_PK(state_container)
def revert(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee)
state_container.paginated_lattice_pk.remove(address_state, self.txhash)
state_container.paginated_tx_hash.remmove(address_state, self.txhash)
state_container.lattice_pk.data[(self.addr_from,
self.pk1, self.pk2, self.pk3)] = LatticePKMetadata(enabled=False)
return self._revert_state_changes_for_PK(state_container)
|
{
"content_hash": "af2ea58e56d3308365916377e3f92831",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 113,
"avg_line_length": 40.71900826446281,
"alnum_prop": 0.6444083620864623,
"repo_name": "cyyber/QRL",
"id": "cd5921a41cafa9a533ca1656e38589e084af19a9",
"size": "4927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/qrl/core/txs/LatticeTransaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "185833"
},
{
"name": "Python",
"bytes": "1929465"
},
{
"name": "Shell",
"bytes": "2126"
}
],
"symlink_target": ""
}
|
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
# 'Dkim@nrao.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'awais@berkeley.edu',
],
'public_projects': True,
'poster': False,
'talk': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'meetings@spsp.org',
],
'poster': True,
'talk': True,
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
'aoverman@elon.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': False,
'admins': [
'director@vprsf.org',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': False,
'admins': [
'mhurst@virginia.edu',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': False,
'admins': [
'amorris.mtsu@gmail.com',
],
'public_projects': True,
'poster': True,
'talk': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [
],
'public_projects': True,
'poster': True,
'talk': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': False,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NRAO2015': {
'name': 'National Radio Astronomy Observatory Accretion 2015',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ARCS2015': {
'name': 'Advancing Research Communication and Scholarship 2015',
'info_url': 'http://commons.pacificu.edu/arcs/',
'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'singlecasedesigns2015': {
'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice',
'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OSFM2015': {
'name': 'OSF for Meetings 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'JSSP2015': {
'name': 'Japanese Society of Social Psychology 2015',
'info_url': 'http://www.socialpsychology.jp/conf2015/index.html',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'4S2015': {
'name': 'Society for Social Studies of Science 2015',
'info_url': 'http://www.4sonline.org/meeting',
'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IARR2016': {
'name': 'International Association for Relationship Research 2016',
'info_url': 'http://iarr.psych.utoronto.ca/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'IA2015': {
'name': 'Inclusive Astronomy 2015',
'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015',
'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'PsiChiRepository': {
'name': 'Psi Chi Repository',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'R2RC': {
'name': 'Right to Research Coalition',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'OpenCon2015': {
'name': 'OpenCon2015',
'info_url': 'http://opencon2015.org/',
'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2015': {
'name': 'Earth Science Information Partners 2015',
'info_url': 'http://esipfed.org/',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SPSP2016': {
'name': 'Society for Personality and Social Psychology 2016 ',
'info_url': 'http://meeting.spsp.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'NACIII': {
'name': '2015 National Astronomy Consortium (NAC) III Workshop',
'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CDS2015': {
'name': 'Cognitive Development Society 2015',
'info_url': 'http://meetings.cogdevsoc.org/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'SEASR2016': {
'name': 'Southeastern Association of Shared Resources 2016',
'info_url': 'http://seasr.abrf.org',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'Accretion2015': {
'name': 'Observational Evidence of Gas Accretion onto Galaxies?',
'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'2020Futures': {
'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s',
'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'RMPA2016': {
'name': 'Rocky Mountain Psychological Association 2016',
'info_url': 'http://www.rockymountainpsych.org/convention-info.html',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'CNI2015': {
'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015',
'info_url': 'https://www.cni.org/events/membership-meetings/upcoming-meeting/fall-2015/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
'poster': False,
'talk': True,
},
'SWPA2016': {
'name': 'Southwestern Psychological Association Convention 2016',
'info_url': 'https://www.swpsych.org/conv_dates.php',
'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
'ESIP2016W': {
'name': 'Earth Science Information Partners Winter Meeting 2016',
'info_url': 'http://commons.esipfed.org/2016WinterMeeting',
'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png',
'active': True,
'admins': [],
'public_projects': True,
'poster': True,
'talk': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins')
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
try:
conf.save()
except ModularOdmException:
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Updated {}: {}'.format(meeting, changed_fields))
else:
print('Added new Conference: {}'.format(meeting))
if __name__ == '__main__':
main()
|
{
"content_hash": "edccede2edb27c7692a6451a84b281f2",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 156,
"avg_line_length": 31.945996275605214,
"alnum_prop": 0.5316817254444768,
"repo_name": "njantrania/osf.io",
"id": "3c4c0c262f41f269a5844350d61b5590514dc7d7",
"size": "17196",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/populate_conferences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119424"
},
{
"name": "HTML",
"bytes": "31299"
},
{
"name": "JavaScript",
"bytes": "1175450"
},
{
"name": "Mako",
"bytes": "537851"
},
{
"name": "Python",
"bytes": "3844872"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
}
|
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import sys
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print '===================='
print 'Iteration %03d of %03d' % (it, max_it)
print '===================='
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print "benching lars_path (with Gram):",
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
lars_gram[i_f, i_s] = delta
gc.collect()
print "benching lars_path (without Gram):",
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
lars[i_f, i_s] = delta
gc.collect()
print "benching orthogonal_mp (with Gram):",
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
omp_gram[i_f, i_s] = delta
gc.collect()
print "benching orthogonal_mp (without Gram):",
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.itervalues())
import pylab as pl
fig = pl.figure()
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
|
{
"content_hash": "47be96e6ace8f2674102020ee3ff5f46",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 76,
"avg_line_length": 35.70247933884298,
"alnum_prop": 0.5243055555555556,
"repo_name": "sgenoud/scikit-learn",
"id": "7341c892911bc8fd0e2cf7fa22d0bf217cc1e8a6",
"size": "4320",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "benchmarks/bench_plot_omp_lars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7396960"
},
{
"name": "C++",
"bytes": "408753"
},
{
"name": "JavaScript",
"bytes": "4736"
},
{
"name": "Objective-C",
"bytes": "4595"
},
{
"name": "Python",
"bytes": "3013862"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
}
|
from __future__ import division
import numpy as np
import logging
np.seterr(divide='warn')
import seaborn as sns
import scipy.stats as pyst
import scipy.signal as ssig
import scipy.optimize as sopt
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
import os
import json
import mc_functions as mcf
import vc_gen_class as vgc
import plot_scripts as plt_s
import imp
import pymc as pm
import pandas as pd
CONFIG_DIR = os.path.join("/".join(imp.find_module("invivoinfer")[1].split('/')[:-1]), 'config')
DATA_DIR = os.path.join("/".join(imp.find_module("invivoinfer")[1].split('/')[:-1]), 'data')
class VCInfer(object):
# ---------------- CONSTRUCTOR
def __init__(self, trace, dt, n_moments=4, params=None, figures_folder='figure_output', config=None):
'''
This class generates an object that contains a raw_trace and a timestep.
In order to be constructed it needs the following parameters
'''
self.raw_trace = trace
self.dt = dt
self.descr_stat = {}
self.descr_ps = {}
self.n_moments = n_moments
self.distType = None
logging.info('Trace length: {} sec'.format(len(trace) * dt))
if np.mean(self.raw_trace) < 0.:
self.raw_trace = -self.raw_trace
logging.info('We flip the trace to be positive, negative trace detected.')
if config is None:
self.params = self.default_params
else:
self.params = self.get_params_from_config(config)
if params is not None:
self.params.update(params)
self.figure_dir = os.path.join(DATA_DIR, figures_folder)
self.param_priors = dict()
self.inference_results = {}
self.figures = {}
@property
def default_params(self):
config_file = os.path.join(CONFIG_DIR, 'config.json')
with open(config_file, 'r') as fp:
out = json.load(fp)
return out
def get_params_from_config(self, config):
config_file = os.path.join(CONFIG_DIR, config)
with open(config_file, 'r') as fp:
out = json.load(fp)
return out
@property
def weightType(self):
# While for LogNormal the parameters denote the moments, for the other two distributions, they are
# actually the parameters of the function
if self.distType is None:
return None
elif self.distType == 'LogNormal':
return 'Moments'
elif self.distType == 'TruncNormal':
return 'Parameters'
elif self.distType == 'Exponential':
return 'Parameters'
return Exception('distType {} is not recognized'.format(self.distType))
# @property
# def init_param_priors(self):
# param_priors = {'tau1': np.zeros(2), 'tau2': np.zeros(2), 'constant': np.zeros(2), 'pw0': np.zeros(2),
# 'pw1': np.zeros(2), 'freq': np.zeros(2)} # for each name, pos0: mean, pos1:std
#
# for i in np.arange(self.n_moments):
# param_priors['obs_bias_{}'.format(i)] = [self.obs_bias['mean'][i], self.obs_bias['std'][i]]
#
# return param_priors
# ---------------- CALCULATING BASIC QUANTITIES
def momenta(self):
'''
calculates mean, stdev, skew and kurtosis of the object
(no baseline corrections nor anything).
It's in self.descr_stat
'''
self.descr_stat['mean'] = np.mean(self.raw_trace)
self.descr_stat['std'] = np.std(self.raw_trace)
self.descr_stat['skew'] = pyst.skew(self.raw_trace)
self.descr_stat['kurtosis'] = pyst.kurtosis(self.raw_trace)
logging.info(' ------ Summary of the statistics - RAW TRACE: \n {}'.format(self.descr_stat))
def psd_calc(self, verbose=0, windowl=1., overlap_perc=0.75):
'''
it calculates the gross powerspectrum of the object
it creates self.descr_ps
'''
self.psd_param = {'windowl': windowl, 'overlap_perc': overlap_perc}
# using welch method
window_bin = nextpow2(windowl / self.dt)
overlap_bin = int(window_bin * overlap_perc)
f, Pxx_den = ssig.welch(self.raw_trace, fs=1 / self.dt,
window='hanning', nperseg=window_bin, noverlap=overlap_bin, detrend='constant',
scaling='density')
# estimate of the error (normal assumption)
Pxx_den_std = Pxx_den * (window_bin / self.raw_trace.size * 11 / 9) ** 0.5
self.descr_ps['psd_x'] = Pxx_den
self.descr_ps['psd_x_std'] = Pxx_den_std
self.descr_ps['psd_x_freq'] = f
def estimate_taus(self, verbose=0, plot=0):
'''
from the powerspectrum, estimate taus and the constant Ps(nu)=constant*(tau2**2)/(.......)
you need to run self.taus_init() beforehand
'''
taus_estimation_par = self.params['taus_estimation_par']
logging.info('---------- Estimating taus from the powerspectrum ----------- ')
# GOAL: to smoothen the uncertainty of the powerspectrum for the fit
logging.info('* now smoothening the uncertainty onthe powerspectrum')
logic_unc_fit = np.logical_and(self.descr_ps['psd_x_freq'] > taus_estimation_par['lim_fit_std'][0],
self.descr_ps['psd_x_freq'] < taus_estimation_par['lim_fit_std'][1])
fr_unc_fit = self.descr_ps['psd_x_freq'][logic_unc_fit]
pow_std_unc_fit = self.descr_ps['psd_x_std'][logic_unc_fit]
p_pot, dumb = sopt.curve_fit(exp_1, fr_unc_fit, pow_std_unc_fit, maxfev=fr_unc_fit.shape[0] * 1000)
self.descr_ps['psd_x_std_smooth'] = exp_1(self.descr_ps['psd_x_freq'], p_pot[0], p_pot[1], p_pot[2])
# using the datapoints only in the right range
logging.info('* now preparing for the powerspectrum fit')
logic_unc_mcmc_lim = np.logical_and(self.descr_ps['psd_x_freq'] > taus_estimation_par['lim_fit_psd'][0],
self.descr_ps['psd_x_freq'] < taus_estimation_par['lim_fit_psd'][1])
f = self.descr_ps['psd_x_freq'][logic_unc_mcmc_lim]
data = self.descr_ps['psd_x'][logic_unc_mcmc_lim]
data_err = self.descr_ps['psd_x_std_smooth'][logic_unc_mcmc_lim]
# setting the starting points of the fit
p0_tau1 = np.mean(taus_estimation_par['tau1_bound'])
p0_tau2 = np.mean(taus_estimation_par['tau2_bound'])
p0_const = data[0] / p0_tau2 ** 2
p0 = [p0_const, p0_tau2, p0_tau1]
fg_tau_res = 10 ** 20 * np.ones([taus_estimation_par['n_iter_tau']])
fg_tau_data = {}
fg_tau_data['constant'] = np.zeros(taus_estimation_par['n_iter_tau'])
fg_tau_data['tau2'] = np.zeros(taus_estimation_par['n_iter_tau'])
fg_tau_data['tau1'] = np.zeros(taus_estimation_par['n_iter_tau'])
fun_tau = lambda x: f_tau_min(x, f, data_err, data)
k = 0
for kk in np.arange(taus_estimation_par['n_iter_tau']):
logging.info('** Performing {} / {} optimisations'.format(kk + 1, taus_estimation_par['n_iter_tau']))
p0_r = (np.random.rand(3) - 0.5) * p0 * 0.5 + p0
if kk == 0:
opt_par, dumb = sopt.curve_fit(mcf.power_spectrum_tau, f, data, p0=p0_r, sigma=data_err,
maxfev=f.shape[0] * 1000)
res_obj = sopt.minimize(fun_tau, x0=p0_r, method='Powell', options={'maxiter': 1000}) # Powell 'SLSQP'
if (res_obj.success == True):
fg_tau_data['constant'][kk] = res_obj.x[0]
fg_tau_data['tau2'][kk] = res_obj.x[1]
fg_tau_data['tau1'][kk] = res_obj.x[2]
fg_tau_res[kk] = res_obj.fun
k += 1
logging.info('...and... {}/{} optimisations did converge'.format(k, kk + 1))
fg_tau_data = pd.DataFrame(fg_tau_data)
self.param_priors['tau1'] = [fg_tau_data['tau1'][np.argmin(fg_tau_res)], np.sqrt(dumb[2, 2])]
self.param_priors['tau2'] = [fg_tau_data['tau2'][np.argmin(fg_tau_res)], np.sqrt(dumb[1, 1])]
self.param_priors['constant'] = [fg_tau_data['constant'][np.argmin(fg_tau_res)], np.sqrt(dumb[0, 0])]
logging.info('-------------- TAU ESTIMATION RESULTS -----------')
logging.info('\nTau_1: {}+-{}, Tau_2: {}+-{}'.format(self.param_priors['tau1'][0], self.param_priors['tau1'][1],
self.param_priors['tau2'][0],
self.param_priors['tau2'][1]))
logging.info('Constant: {}+-{}\n'.format(self.param_priors['constant'][0], self.param_priors['constant'][1]))
def plot_tau_estimation(self, save_pdf=False):
# Zoom on the low frequency part
sns.set(style="ticks", context='paper')
fig = plt.figure()
plt.fill_between(self.descr_ps['psd_x_freq'], self.descr_ps['psd_x'] - self.descr_ps['psd_x_std'],
self.descr_ps['psd_x'] + self.descr_ps['psd_x_std'], facecolor='grey', edgecolor='grey')
plt.plot(self.descr_ps['psd_x_freq'], self.descr_ps['psd_x'])
plt.plot(self.descr_ps['psd_x_freq'],
mcf.power_spectrum_tau(self.descr_ps['psd_x_freq'], self.param_priors['constant'][0],
self.param_priors['tau2'][0], self.param_priors['tau1'][0]), color='red',
label='fit')
xlimit = 30
ax = plt.gca()
ax.set_xlim([0, xlimit])
ax.set_ylim([np.min(
self.descr_ps['psd_x'][self.descr_ps['psd_x_freq'] < xlimit] - self.descr_ps['psd_x_std'][
self.descr_ps['psd_x_freq'] < xlimit]),
np.max(self.descr_ps['psd_x'] + self.descr_ps['psd_x_std'])])
ax.set_yscale("log", nonposx='clip')
sns.despine()
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V$^2$/Hz]')
plt.tight_layout(pad=2)
if save_pdf:
plt_s.save_plot(os.path.join(self.figure_dir, 'psp_lf_zoom.pdf'), fig_size=[8, 5], file_format='pdf')
self.figures['psp_lf_zoom'] = fig
# All powerspectrum
fig = plt.figure()
sns.set(style="ticks", context='paper')
plt.fill_between(self.descr_ps['psd_x_freq'], self.descr_ps['psd_x'] - self.descr_ps['psd_x_std'],
self.descr_ps['psd_x'] + self.descr_ps['psd_x_std'], facecolor='grey', edgecolor='grey')
plt.plot(self.descr_ps['psd_x_freq'], self.descr_ps['psd_x'])
plt.plot(self.descr_ps['psd_x_freq'],
mcf.power_spectrum_tau(self.descr_ps['psd_x_freq'], self.param_priors['constant'][0],
self.param_priors['tau2'][0], self.param_priors['tau1'][0]), color='red',
label='fit')
xlimit = 1000
ax = plt.gca()
ax.set_xlim([0, xlimit])
ax.set_ylim([np.min(
self.descr_ps['psd_x'][self.descr_ps['psd_x_freq'] < xlimit] - self.descr_ps['psd_x_std'][
self.descr_ps['psd_x_freq'] < xlimit]),
np.max(self.descr_ps['psd_x'] + self.descr_ps['psd_x_std'])])
ax.set_yscale("log", nonposx='clip')
sns.despine()
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V$^2$/Hz]')
plt.tight_layout(pad=2)
if save_pdf:
plt_s.save_plot(os.path.join(self.figure_dir, 'psp_all.pdf'), fig_size=[8, 5], file_format='pdf')
self.figures['psp_all'] = fig
# --------------------- SETTING PRIORS ON DATA MEASUREMENT BIASES, namely P(Dobs|Dtrue)
def reset_obs_unc(self):
logging.info('*** Resetting observations bias and uncertainties')
self.obs_bias = {'mean': np.zeros(self.n_moments),
'std': 1. * 10 ** (-1) * np.ones(self.n_moments)} # from position 0..4: mean,std,skew,kurt
logging.info('Obs uncertainties: {}+-{}'.format(self.obs_bias['mean'], self.obs_bias['std']))
def set_obs_unc_mean(self):
'''
it sets the bias and uncertainty on the recording (namely the baseline)
'''
base_line_force = self.params['baseline_corr']
if base_line_force['ToUse'] == True:
logging.info('\n --------------- Assigning bias and uncertainty of the mean -----------')
logging.info('Baseline: {}+-{}\n'.format(base_line_force['average'], base_line_force['uncertainty']))
self.obs_bias['mean'][0] = base_line_force['average']
self.obs_bias['std'][0] = base_line_force['uncertainty']
def plot_mean_offset(self, save_pdf=False):
base_line_force = self.params['baseline_corr']
fig = plt.figure()
t_plot = np.linspace(self.dt, self.dt * self.raw_trace.shape[0], self.raw_trace.shape[0])
plt.plot(t_plot, self.raw_trace)
plt.plot(t_plot, np.ones(len(t_plot)) * base_line_force['average'])
plt.plot(t_plot,
np.ones(len(t_plot)) * (base_line_force['average'] - base_line_force['uncertainty']))
plt.plot(t_plot,
np.ones(len(t_plot)) * (base_line_force['average'] + base_line_force['uncertainty']))
plt.ylim([0, 2 * base_line_force['average']])
plt.tight_layout(pad=2)
if save_pdf:
plt_s.save_plot(os.path.join(self.figure_dir, 'baseline.pdf'), fig_size=[8, 5], file_format='pdf')
self.figures['baseline'] = fig
def set_obs_unc_std(self):
'''
it sets the high freq noise bias and uncertainty on the std (generally you can measure it easily)
it estimates the contribution of low freq oscillations to the powerspectrum (given the cutoff threshold)
VERBOSE 1 print, 2 plot
'''
hf_std_corr = self.params['hf_std_corr']
lf_std_corr = self.params['lf_std_corr']
logging.info('\n -------- Measuring the bias of the std --------')
# HIGH FREQUENCY
if hf_std_corr['ToUse'] == True:
hfnoise_mean = hf_std_corr['average']
hfnoise_std = hf_std_corr['uncertainty']
else:
hfnoise_mean = 0
hfnoise_std = 0
logging.info('High frequency noise: {}+-{}'.format(hfnoise_mean, hfnoise_std))
# LOW FREQUENCY
if lf_std_corr['ToUse'] == True:
# predicted_std is the area under the fitted curve used to get the tau
predicted_std = np.sqrt(self.param_priors['constant'][0] * self.param_priors['tau2'][0] ** 3 / 4 / (
self.param_priors['tau2'][0] + self.param_priors['tau1'][0]) / (
(self.param_priors['tau2'][0] + 2 * self.param_priors['tau1'][0])))
if self.descr_stat['std'] ** 2 - predicted_std ** 2 > 0:
lfnoise_mean = np.sqrt(self.descr_stat['std'] ** 2 - predicted_std ** 2)
else:
lfnoise_mean = 0
lfnoise_std = 0
else:
lfnoise_mean = 0
lfnoise_std = 0
logging.info('Low frequency noise: {}+-{}'.format(lfnoise_mean, lfnoise_std))
if hf_std_corr['ToUse'] == True or lf_std_corr['ToUse'] == True:
self.obs_bias['mean'][1] = self.descr_stat['std'] - np.sqrt(
self.descr_stat['std'] ** 2 - (hfnoise_mean ** 2 + lfnoise_mean ** 2))
self.obs_bias['std'][1] = np.sqrt(hfnoise_std ** 2 + lfnoise_std ** 2)
# HACK TO IMPROVE KURT AND SKEW:
# self.obs_bias['mean'][2] = -self.obs_bias['mean'][1] / 10
# self.obs_bias['mean'][3] = -self.obs_bias['mean'][1] / 10
logging.info('Total noise contribution to std:{}+-{}'.format(self.obs_bias['mean'][1], self.obs_bias['std'][1]))
def first_guess(self, distType='LogNormal'):
'''
Given the taus estimated, it calculates the parameters A, stdA, freq from a LS fit.
It caluclates also their uncertainties by starting the fit from different starting points
The results are put in self.param_priors
'''
self.distType = distType
first_guess_par = self.params['first_guess_par'][distType]
pw0_bound = first_guess_par['pw0_bound']
pw1_bound = first_guess_par['pw1_bound']
freq_bound = first_guess_par['freq_bound']
n_iter_par = first_guess_par['n_iter_par']
logging.info('\n --START OF THE FIRST GUESS OPTIMIZATION.....')
first_guess_s = {}
# preparing the minimization
t1_fix = self.param_priors['tau1'][0]
t2_fix = self.param_priors['tau2'][0]
if self.n_moments == 4:
y_target = np.array([self.descr_stat['mean'] - self.obs_bias['mean'][0],
self.descr_stat['std'] - self.obs_bias['mean'][1],
self.descr_stat['skew'] - self.obs_bias['mean'][2],
self.descr_stat['kurtosis'] - self.obs_bias['mean'][3]])
else:
y_target = np.array([self.descr_stat['mean'] - self.obs_bias['mean'][0],
self.descr_stat['std'] - self.obs_bias['mean'][1],
self.descr_stat['skew'] - self.obs_bias['mean'][2],
])
fun = lambda x: f_like(x, t1_fix, t2_fix, distType, y_target, n_moments=self.n_moments)
first_guess_s['pw0'] = np.zeros(n_iter_par)
first_guess_s['pw1'] = np.zeros(n_iter_par)
first_guess_s['freq'] = np.zeros(n_iter_par)
residuals = 10 ** 5 * np.ones(n_iter_par) # This is the default residuals if fail to converge (very high!)
bnds = ((pw0_bound), (pw1_bound), (freq_bound))
k = 0
for jj in np.arange(n_iter_par):
x0 = ((np.random.rand() - 0.5) * np.diff(pw0_bound) + np.mean(pw0_bound),
(np.random.rand() - 0.5) * np.diff(pw1_bound) + np.mean(pw1_bound),
(np.random.rand() - 0.5) * np.diff(freq_bound) + np.mean(freq_bound))
res_obj = sopt.minimize(fun, x0, method='SLSQP', bounds=bnds,
options={'maxiter': 1000}) # fitting the first four momenta
if (res_obj.success == True):
first_guess_s['pw0'][jj] = res_obj.x[0]
first_guess_s['pw1'][jj] = res_obj.x[1]
first_guess_s['freq'][jj] = res_obj.x[2]
residuals[jj] = res_obj.fun
k = k + 1
logging.info('...and... {}/{} optimisations did converge'.format(k, jj + 1))
for i, val in enumerate(first_guess_s):
m_val = first_guess_s[val][np.argmin(residuals)]
# The next is just a reasonable estimate of the variability of the first guess estimation
unc_val = np.std(first_guess_s[val][first_guess_s[val] != 0])
self.param_priors[val] = np.array([m_val, unc_val])
logging.info('Values for the {} of the {} weights distribution:'.format(self.weightType, distType))
logging.info('PW0: {}+-{}, PW1: {}+-{}, freq: {}+-{}\n'.format(self.param_priors['pw0'][0],
self.param_priors['pw0'][1],
self.param_priors['pw1'][0],
self.param_priors['pw1'][1],
self.param_priors['freq'][0],
self.param_priors['freq'][1]))
if self.weightType == 'Parameters':
m_w, s_w = mcf.par2mom(self.param_priors['pw0'][0], self.param_priors['pw1'][0], distType=distType)
else:
m_w, s_w = self.param_priors['pw0'][0], self.param_priors['pw1'][0]
logging.info('Moments of the weights distibution:')
logging.info('Mean: {}, Std: {}\n'.format(m_w, s_w))
logging.info('First guess theoretical moments: {}'.format(
mcf.inputs2momenta_full(self.param_priors['freq'][0], self.param_priors['pw0'][0],
self.param_priors['pw1'][0], self.param_priors['tau2'][0],
self.param_priors['tau1'][0], np.array([1, 2, 3, 4]), dist_type=distType,
weightType=self.weightType))
)
logging.info('Target experimental moments: {}'.format(y_target))
logging.info('\n --END OF THE FIRST GUESS OPTIMIZATION ..... \n')
def optimize_likelihood(self):
'''
To optimize the likelihood I run nsamples of the first guess
The output is the std/mean of the momenta and tau
'''
logging.info('\n -- START OF THE ESTIMATION OF THE LIKELIHOOD FUNCTION ...')
sk_ku_corr = self.params['sk_ku_corr']
likelihood_par = self.params['likelihood_par']
kernel_par = self.params['kernel_par']
sim_param = {'time': self.dt * self.raw_trace.shape[0], 'dt': self.dt}
if sk_ku_corr['ToUse'] == True:
sk_ku_corr['lf_noise'] = np.sqrt(
self.obs_bias['mean'][1] * (self.descr_stat['std'] - self.obs_bias['mean'][1]))
else:
sk_ku_corr['lf_noise'] = 0
kde_ll, mom_sim, mom_bias = self.likelihood_est_f(likelihood_par['nsamples'],
self.param_priors,
sim_param,
self.distType, self.weightType,
kernel_par=kernel_par,
sk_ku_corr=sk_ku_corr,
n_moments=self.n_moments)
self.likelihood_estim = {'kde_ll': kde_ll, 'moments_sim': mom_sim}
if sk_ku_corr['ToUse'] == True:
# This are the uncertainties, which are bigger now, as shown by the variability of the simulation
self.obs_bias['std'] = np.sqrt(
self.obs_bias['std'] ** 2 + mom_bias['std'] ** 2)
# Since bias of mean and std have been already been accounted for, we correct onlt skew and kurt
self.obs_bias['mean'][2] = self.obs_bias['mean'][2] + mom_bias['mean'][2] # bias of the skew
if self.n_moments == 4:
self.obs_bias['mean'][3] = self.obs_bias['mean'][3] + mom_bias['mean'][3] # bias of the kurtosis
logging.info('New biases: {}+-{}'.format(self.obs_bias['mean'], self.obs_bias['std']))
logging.info('\n -- END OF OPTIMIZATION OF THE LIKELIHOOD...')
def likelihood_est_f(self, nsamples, param_priors, sim_param, distType, weightType,
kernel_par={'Type': 'exponential', 'BW': 1},
sk_ku_corr={'ToUse': False, 'lf_noise': 0},
n_moments=4
):
# -------------------- SET UP THE INPUTS TO THE SIMULATION
Syn_exc_par = {}
Syn_inh_par = {}
Noise_par = {}
Noise_par['ampli'] = 0. # std of noise amplitude in pA
Noise_par['LF_ampli'] = sk_ku_corr['lf_noise']
Noise_par['LF_cutoff'] = 2.5
Fr_par = {}
Init_par = {}
Sim_par = {}
# --------- Excitatory
Syn_exc_par['tau1'] = param_priors['tau1'][0] # seconds
Syn_exc_par['tau2'] = param_priors['tau2'][0]
Syn_exc_par['A'] = param_priors['pw0'][0] # in pA
Syn_exc_par['stdA'] = param_priors['pw1'][0]
Syn_exc_par['WeightDist'] = distType # TruncNormal/LogNormal/Exponential
Syn_exc_par['WeightType'] = weightType
Syn_exc_par['freq'] = param_priors['freq'][0]
Init_par['Ioffset'] = 0.
Sim_par['duration'] = sim_param['time']
Sim_par['dt'] = sim_param['dt']
# Calculate theoretical moments (without LF noise!)
moments = mcf.inputs2momenta_full(Syn_exc_par['freq'], Syn_exc_par['A'], Syn_exc_par['stdA'],
Syn_exc_par['tau2'],
Syn_exc_par['tau1'], dist_type=Syn_exc_par['WeightDist'],
weightType=Syn_exc_par['WeightType'])
logging.info('********************** Likelihood function shape calculation - running...')
logging.info('{} of the {} weight distribution'.format(Syn_exc_par['WeightType'], Syn_exc_par['WeightDist']))
logging.info('pw0: {}, pw1: {}, freq: {}, tau1: {}, tau2: {}'.format(
Syn_exc_par['A'], Syn_exc_par['stdA'], Syn_exc_par['freq'], Syn_exc_par['tau1'], Syn_exc_par['tau2'])
)
logging.info('...Starting simulations to find the uncertainties on the likelihood...')
# initialize the arrays
momenta_sample = np.zeros([nsamples, n_moments])
momenta_sample_noise = np.zeros([nsamples, n_moments])
for kk in np.arange(nsamples):
v1 = vgc.VCGeneration(Syn_exc_par, Syn_inh_par, Noise_par, Fr_par, Init_par, Sim_par)
v1.reset()
v1.run()
momenta_sample[kk, :] = mcf.empirical_moments(v1.I, n_moments=n_moments)
if sk_ku_corr['ToUse'] == True:
if Noise_par['LF_ampli'] > 0:
v1.add_OU_noise(0, Noise_par['LF_ampli'], Noise_par['LF_cutoff'])
momenta_sample_noise[kk, :] = mcf.empirical_moments(v1.I, n_moments=n_moments)
else:
momenta_sample_noise[kk, :] = momenta_sample[kk, :]
if kk % 10 == 0:
logging.info('Simulated {} / {}'.format(kk, nsamples))
else:
logging.debug('Simulated {} / {}'.format(kk, nsamples))
if n_moments == 4:
col_names = ['mean', 'std', 'skew', 'kurtosis']
else:
col_names = ['mean', 'std', 'skew']
self.likelihood_samples = pd.DataFrame(momenta_sample, columns=col_names)
mom_sim = {'mean': np.mean(momenta_sample, axis=0), 'std': np.std(momenta_sample, axis=0)}
momenta_standard = (momenta_sample - mom_sim['mean']) / mom_sim['std']
kde_ll = KernelDensity(kernel=kernel_par['Type'], bandwidth=kernel_par['BW'])
kde_ll.fit(momenta_standard)
# biases calculation
momenta_bias_sample = momenta_sample_noise - momenta_sample
mom_bias = {'mean': np.mean(momenta_bias_sample, axis=0), 'std': np.std(momenta_bias_sample, axis=0)}
if n_moments == 4:
logging.info('Theoretical moments. mean: {}, std: {}, skew: {}, kurt: {}'.format(
moments[0], moments[1], moments[2], moments[3])
)
logging.info(
'Measured moments. mean: {}+{}, std: {}+-{}, skew: {}+-{}, kurt: {}+-{}'.format(mom_sim['mean'][0],
mom_sim['std'][0],
mom_sim['mean'][1],
mom_sim['std'][1],
mom_sim['mean'][2],
mom_sim['std'][2],
mom_sim['mean'][3],
mom_sim['std'][3])
)
logging.info(
'Measured bias. mean: {}+{}, std: {}+-{}, skew: {}+-{}, kurt: {}+-{}'.format(mom_bias['mean'][0],
mom_bias['std'][0],
mom_bias['mean'][1],
mom_bias['std'][1],
mom_bias['mean'][2],
mom_bias['std'][2],
mom_bias['mean'][3],
mom_bias['std'][3])
)
else:
logging.info('Theoretical moments. mean: {}, std: {}, skew: {}'.format(
moments[0], moments[1], moments[2])
)
logging.info('Measured moments. mean: {}+{}, std: {}+-{}, skew: {}+-{}'.format(mom_sim['mean'][0],
mom_sim['std'][0],
mom_sim['mean'][1],
mom_sim['std'][1],
mom_sim['mean'][2],
mom_sim['std'][2],
)
)
logging.info('Measured bias. mean: {}+{}, std: {}+-{}, skew: {}+-{}'.format(mom_bias['mean'][0],
mom_bias['std'][0],
mom_bias['mean'][1],
mom_bias['std'][1],
mom_bias['mean'][2],
mom_bias['std'][2],
)
)
return kde_ll, mom_sim, mom_bias
def plot_likelihood(self, save_pdf=False):
fig = plt.figure()
g = sns.PairGrid(self.likelihood_samples)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_diag(sns.kdeplot, lw=3, legend=False)
plt.tight_layout(pad=2)
if save_pdf:
plt_s.save_plot(os.path.join(self.figure_dir, 'likelihood.pdf'), fig_size=[8, 5], file_format='pdf')
self.figures['likelihood'] = fig
def create_model(self):
'''
'''
# SETTING UP THE PRIORS
model_param = self.params['prior_par'][self.distType]
freq_prior = model_param['freq']
if freq_prior[0] == 'Uniform':
freq = pm.Uniform("freq", freq_prior[1], freq_prior[2])
elif freq_prior[0] == 'Normal':
freq = pm.TruncatedNormal("freq", mu=freq_prior[1], tau=1 / freq_prior[2] ** 2, a=freq_prior[3],
b=freq_prior[4])
else:
raise Exception('Prior for pw0 must be in [Uniform, Normal]')
pw0_prior = model_param['pw0']
if pw0_prior[0] == 'Uniform':
pw0 = pm.Uniform("pw0", pw0_prior[1], pw0_prior[2])
elif pw0_prior[0] == 'Normal':
pw0 = pm.TruncatedNormal("pw0", mu=pw0_prior[1], tau=1 / pw0_prior[2] ** 2, a=pw0_prior[3], b=pw0_prior[4])
else:
raise Exception('Prior for pw1 must be in [Uniform, Normal]')
pw1 = model_param['pw1']
if pw1[0] == 'Uniform':
pw1 = pm.Uniform("pw1", pw1[1], pw1[2])
elif pw1[0] == 'Normal':
pw1 = pm.TruncatedNormal("pw1", mu=pw0, tau=1 / (pw1[2]) ** 2, a=pw1[2], b=pw1[4])
elif pw1[0] == 'Exponential':
pw1 = pm.Exponential("pw1", beta=1. / (pw1[1]))
else:
raise Exception('Prior for pw0 must be in [Uniform, Normal, Exponential]')
tau2 = self.param_priors['tau2'][0]
tau1 = self.param_priors['tau1'][0]
# ------ SETTING UP THE LIKELIHOOD INGREDIENTS
# ---- P(mean(Dtrue)|model)
@pm.deterministic
def mean_moments(freq=freq, A=pw0, stdA=pw1, tau2=tau2, tau1=tau1, dist_type=self.distType,
n_moments=self.n_moments): # analytical calculation of the mean of first 4 moments of the distribuion, given the parameters (extending the mom_n array makes predictions of the other moments.
mom_n = np.arange(1, 4 + 1)
k_i_m = np.zeros(mom_n.shape)
k_i_m[0] = tau2 ** 2 / (tau1 + tau2)
k_i_m[1] = tau2 ** 3 / 2 / (tau1 + tau2) / (2 * tau1 + tau2)
k_i_m[2] = 2 * tau2 ** 4 / 3 / (tau1 + tau2) / (3 * tau1 + 2 * tau2) / (3 * tau1 + tau2)
k_i_m[3] = 3 * tau2 ** 5 / 4 / (tau1 + tau2) / (4 * tau1 + 3 * tau2) / (4 * tau1 + tau2) / (2 * tau1 + tau2)
if dist_type == 'LogNormal':
mu = np.log((A ** 2) / np.sqrt(stdA ** 2 + A ** 2))
sigma = np.sqrt(np.log(stdA ** 2 / A ** 2 + 1))
mom_mult = np.exp(mom_n * mu + 0.5 * mom_n ** 2 * sigma ** 2)
elif dist_type == 'TruncNormal':
mom_mult = mcf.moments_truncgaussian2(A, stdA)
elif dist_type == 'Exponential':
stdA = np.amin([stdA, 12.])
mom_mult = mcf.mom_s_exp(A, stdA)
else:
raise Exception('Wrong type of distribution! {}'.format(dist_type))
cumulants = freq * mom_mult * k_i_m # these are the cumulants, or semi-invariants (1.5-2 rice) the central moments(see wikipedia) are the same, but the fourth one has a +3(sigma^4) term.
c_mom = cumulants;
c_mom[mom_n == 4] = c_mom[mom_n == 4] + 3 * (c_mom[mom_n == 2] ** 2)
# now we standardize
c_std = c_mom
c_std[mom_n > 2] = c_std[mom_n > 2] / (c_mom[mom_n == 2] ** (mom_n[mom_n > 2] / 2))
out = c_std
out[mom_n == 2] = np.sqrt(out[mom_n == 2])
out[mom_n == 4] = out[mom_n == 4] - 3
out[np.isfinite(out) == False] = 0.
return out[:n_moments]
# ---- P(Dobs|Dtrue) probab of observed data given th true ones (corrupted by biases and uncertainties due to baseline, hf/lf noise
self.obs_bias['std'][
self.obs_bias['std'] == 0] = 10 ** -2 # Just in case we have some 0 std! The likelihood would shoot
obs_bias_0 = pm.Normal('obs_bias_0', mu=self.obs_bias['mean'][0], tau=1 / (self.obs_bias['std'][0]) ** 2)
obs_bias_1 = pm.Normal('obs_bias_1', mu=self.obs_bias['mean'][1], tau=1 / (self.obs_bias['std'][1]) ** 2)
obs_bias_2 = pm.Normal('obs_bias_2', mu=self.obs_bias['mean'][2], tau=1 / (self.obs_bias['std'][2]) ** 2)
if self.n_moments == 4:
obs_bias_3 = pm.Normal('obs_bias_3', mu=self.obs_bias['mean'][3], tau=1 / (self.obs_bias['std'][3]) ** 2)
logging.info('OBSERVATION BIASES ---- P(Dmeas|Dtrue)')
for i, val in enumerate(self.obs_bias['mean']):
logging.info('{} +- {}'.format(self.obs_bias['mean'][i], self.obs_bias['std'][i]))
for i in np.arange(self.n_moments): # PASS the obs_bias to the parameters values
self.param_priors['obs_bias_{}'.format(i)] = [self.obs_bias['mean'][i], self.obs_bias['std'][i]]
if self.n_moments == 4:
@pm.deterministic
def obs_data(mean_moments=mean_moments,
obs_bias_0=obs_bias_0,
obs_bias_1=obs_bias_1,
obs_bias_2=obs_bias_2,
obs_bias_3=obs_bias_3):
obs_bias = np.array([obs_bias_0, obs_bias_1, obs_bias_2, obs_bias_3])
return mean_moments + obs_bias
else:
@pm.deterministic
def obs_data(mean_moments=mean_moments,
obs_bias_0=obs_bias_0,
obs_bias_1=obs_bias_1,
obs_bias_2=obs_bias_2,
):
obs_bias = np.array([obs_bias_0, obs_bias_1, obs_bias_2])
return mean_moments + obs_bias
# -------------- DATA OBSERVED
if self.n_moments == 4:
data_observed = np.array(
[self.descr_stat['mean'], self.descr_stat['std'], self.descr_stat['skew'], self.descr_stat['kurtosis']])
else:
data_observed = np.array(
[self.descr_stat['mean'], self.descr_stat['std'], self.descr_stat['skew']])
logging.info('MEASURED DATA {}'.format(data_observed))
# -------------- LIKELIHOOD FUNCTION P(Dobs|param)
@pm.stochastic(observed=True)
def obs_like(value=data_observed, obs_data=obs_data,
ll_kernel=self.likelihood_estim['kde_ll'],
ll_old_std=self.likelihood_estim['moments_sim']['std'],
):
target_new_std = (
value - obs_data) / ll_old_std # I normalise the differences in order to use the standardised kernel
if np.isfinite(ll_kernel.score(target_new_std.reshape(-1, len(target_new_std)))) == False:
return -10. ** 10.
else:
return ll_kernel.score(target_new_std.reshape(-1, len(target_new_std)))
if self.n_moments == 4:
self.model = pm.Model(
[obs_like, pw0, pw1, freq, tau2, tau1, obs_bias_0, obs_bias_1, obs_bias_2, obs_bias_3])
else:
self.model = pm.Model([obs_like, pw0, pw1, freq, tau2, tau1, obs_bias_0, obs_bias_1, obs_bias_2])
def run_sampler(self, sampler_type='mh'):
'''
run the sampler on the model created.
'sampler_type': 'mh' for metropolis hastings (only this supported so far)
'''
# -------------------- CALCULATING THE MAP
self.inference_results['map'] = MAP_estimate(self.model, self.param_priors, self.obs_bias,
n_iter=self.params['map_par']['n_samples'],
start_std=self.params['map_par']['start_std'],
distType=self.distType)
# ------ PRINT MAP PREDICTION
if self.n_moments == 4:
y_target = np.array([self.descr_stat['mean'] - self.obs_bias['mean'][0],
self.descr_stat['std'] - self.obs_bias['mean'][1],
self.descr_stat['skew'] - self.obs_bias['mean'][2],
self.descr_stat['kurtosis'] - self.obs_bias['mean'][3]])
else:
y_target = np.array([self.descr_stat['mean'] - self.obs_bias['mean'][0],
self.descr_stat['std'] - self.obs_bias['mean'][1],
self.descr_stat['skew'] - self.obs_bias['mean'][2]]
)
logging.info('Measured (corrected) moments: {}'.format(y_target))
logging.info('MAP predicted moments: {}'.format(
mcf.inputs2momenta_full(self.inference_results['map']['parameters']['freq'].values,
self.inference_results['map']['parameters']['pw0'].values,
self.inference_results['map']['parameters']['pw1'].values,
self.param_priors['tau2'][0],
self.param_priors['tau1'][0],
np.array([1, 2, 3, 4]),
dist_type=self.distType,
weightType=self.weightType,
n_moments=self.n_moments)))
# -------------------- RUN THE SAMPLER
sampler_param = self.params['mc_par']
if sampler_type == 'mh':
self.inference_results['mh'] = run_mh(self.model, self.inference_results['map'],
sampler_param['mh'], distType=self.distType,
)
def plot_mc_results(self, save_pdf=False):
figures = _plot_mc_results(self.inference_results['map'], self.inference_results['mh'], save_pdf, self.figure_dir)
self.figures['mh'] = figures['mh']
# --------------- plot summary results
def _plot_mc_results(map_data, mh_data, save_pdf=False, figure_dir=None):
figures = {}
logging.info('START PLOTTING FIGURES')
fig = plt.figure()
g = sns.PairGrid(mh_data['weights'])
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_diag(sns.kdeplot, lw=3)
plt.tight_layout(pad=2)
if save_pdf:
plt_s.save_plot(os.path.join(figure_dir, 'mh_plot.pdf'), fig_size=[8, 5], file_format='pdf')
figures['mh'] = fig
return figures
# --------------- LIKELIHOOD ESTIMATION FUNCTION
# ---------------------- MAP
def MAP_estimate(model, param_priors, obs_bias, n_iter=50, start_std=0.2, distType='LogNormal'):
# ------------------ FIND MAP given a model (you need to have calculated the first guess before!
logging.info('\n\nCalculating the MAP')
map_ = pm.MAP(model)
map_sum = {}
map_bic = np.zeros(np.size(np.arange((n_iter) * 2)))
map_aic = np.zeros(np.size(np.arange((n_iter) * 2)))
logging.info('Starting condition close to first guess')
for l in np.arange(n_iter):
for i, var in enumerate(model.stochastics):
var.value = param_priors[str(var)][0] + start_std * param_priors[str(var)][0] * np.random.randn()
# I start looking for the MAP from the first-guess results!
if l == 0:
map_sum[str(var)] = []
try:
map_.fit()
map_aic[l] = map_.AIC
map_bic[l] = map_.BIC
except Exception:
print Exception
pass
for i, var in enumerate(model.stochastics):
map_sum[str(var)] = np.hstack((map_sum[str(var)], var.value))
logging.info('Starting condition uniform random')
for l in np.arange(n_iter, np.size(np.arange((n_iter) * 2))):
model.draw_from_prior()
try:
map_.fit()
map_aic[l] = map_.AIC
map_bic[l] = map_.BIC
except Exception:
print Exception
pass
for i, var in enumerate(model.stochastics):
map_sum[str(var)] = np.hstack((map_sum[str(var)], var.value))
map_sum = pd.DataFrame.from_dict(map_sum)
if distType == 'TruncNormal':
mu_sum, sigma_sum = mcf.par2mom(map_sum['pw0'].values, map_sum['pw1'].values, distType=distType)
map_sum_w = pd.DataFrame(np.array([mu_sum, sigma_sum]).T, columns=['A', 'stdA'])
map_sum_w['freq'] = map_sum['freq']
elif distType == 'Exponential':
mu_sum = np.zeros(np.shape(map_sum['pw0'].values))
sigma_sum = np.zeros(np.shape(map_sum['pw0'].values))
for i, val in enumerate(map_sum['pw0'].values):
mu_sum[i], sigma_sum[i] = mcf.par2mom(map_sum['pw0'].values[i], map_sum['pw1'].values[i], distType=distType)
map_sum_w = pd.DataFrame(np.array([mu_sum, sigma_sum]).T, columns=['A', 'stdA'])
map_sum_w['freq'] = map_sum['freq']
elif distType == 'LogNormal':
map_sum_w = map_sum[['pw0', 'pw1', 'freq']]
map_sum_w.columns = ['A', 'stdA', 'freq']
else:
raise Exception('distType {} not supported'.format(distType))
mb_value = np.min(map_bic[map_bic > 0.])
ma_value = np.min(map_aic[map_aic > 0.])
map_max = map_sum.iloc[np.where(map_bic == mb_value)[0]]
map_std = map_sum.std()
map_w_max = map_sum_w.iloc[np.where(map_bic == mb_value)[0]]
map_w_std = map_sum_w.std()
map_ms = {'bic': mb_value, 'aic': ma_value}
out = {'weights': map_w_max, 'parameters': map_max, 'metrics': map_ms}
logging.info('\nLIST OF MEDIAN MAP VARIABLES for the {} param distribution'.format(distType))
for var, val in map_max.iteritems():
logging.info('MAP {}: {}+-{} (max +- std)'.format(var, val.values, map_std[var]))
logging.info('WEIGHTS MOMENTS')
for var, val in map_w_max.iteritems():
logging.info('MAP {}: {}+-{} (max +- std)'.format(var, val.values, map_w_std[var]))
logging.info('BIC: {} , AIC: {} '.format(map_ms['bic'], map_ms['aic']))
return out
# -------------- RUN MH
def run_mh(model, map_inference,
sampler_param={'n_samples': 150000, 'burn_in': 50000, 'thin': 100,
'remove_outliers': False},
distType='LogNormal'):
logging.info('-----------------MH SAMPLER IN ACTION')
mcmc = pm.MCMC(model)
logging.info('Starting values of the MCMC - MAPS')
for i, var in enumerate(model.stochastics):
var.value = map_inference['parameters'][str(var)].values[0]
mcmc.sample(sampler_param['n_samples'], sampler_param['burn_in'], sampler_param['thin'])
mean_deviance = np.mean(mcmc.db.trace('deviance')(), axis=0) # to calculating dic
dic = 2 * mean_deviance
mcmc.stats()
mcmc_sum = {}
for i, var in enumerate(model.stochastics):
mcmc_sum[str(var)] = mcmc.trace(str(var))[:]
mcmc_sum = pd.DataFrame.from_dict(mcmc_sum)
mcmc_median = mcmc_sum.median()
mcmc_std = mcmc_sum.std()
if sampler_param['remove_outliers']:
mcmc_sum = remove_outliers_f(mcmc_sum)
if distType == 'TruncNormal':
mu_sum, sigma_sum = mcf.par2mom(mcmc_sum['pw0'].values, mcmc_sum['pw1'].values, distType=distType)
mcmc_sum_w = pd.DataFrame(np.array([mu_sum, sigma_sum]).T, columns=['A', 'stdA'])
mcmc_sum_w['freq'] = mcmc_sum['freq']
elif distType == 'Exponential':
mu_sum = np.zeros(np.shape(mcmc_sum['pw0'].values))
sigma_sum = np.zeros(np.shape(mcmc_sum['pw0'].values))
for i, val in enumerate(mcmc_sum['pw0'].values):
mu_sum[i], sigma_sum[i] = mcf.par2mom(mcmc_sum['pw0'].values[i], mcmc_sum['pw1'].values[i],
distType=distType)
mcmc_sum_w = pd.DataFrame(np.array([mu_sum, sigma_sum]).T, columns=['A', 'stdA'])
mcmc_sum_w['freq'] = mcmc_sum['freq']
elif distType == 'LogNormal':
mcmc_sum_w = mcmc_sum[['pw0', 'pw1', 'freq']]
mcmc_sum_w.columns = ['A', 'stdA', 'freq']
else:
raise Exception('distType {} not valid'.format(distType))
mcmc_w_median = mcmc_sum_w.median()
mcmc_w_std = mcmc_sum_w.std()
logging.info('\n SUMMARY OF MH MC - PARAMETERS - {} WEIGHTS DISTRIBUTION'.format(distType))
for var, val in mcmc_median.iteritems():
logging.info('MH {}: {}+-{} (median +- std)'.format(var, val, mcmc_std[var]))
logging.info('\n SUMMARY OF THE USEFUL INFO OF THE INFERENCE')
for var, val in mcmc_w_median.iteritems():
logging.info('MH {}: {}+-{} (median +- std)'.format(var, val, mcmc_w_std[var]))
logging.info('DIC: {}'.format(dic))
out = {'parameters': mcmc_sum, 'weights': mcmc_sum_w, 'dic': dic, 'mcmc_object': mcmc}
return out
# ---------------- OTHER FUNCTIONS
def nextpow2(i):
n = 1
while n < i: n *= 2
return n
def exp_1(x, A, tau, c):
A = np.abs(A)
tau = np.abs(tau)
c = np.abs(c)
return A * np.exp(-x / tau) + c
def f_like(x, t1_fix, t2_fix, distType, y_target, n_moments=4):
A = x[0]
stdA = x[1]
nu = x[2]
if A < 0 or stdA < 0:
return 10 ** 20
if distType == 'LogNormal':
weightType = 'Moments'
elif distType == 'TruncNormal':
weightType = 'Parameters'
elif distType == 'Exponential':
weightType = 'Parameters'
y_theo = mcf.inputs2momenta_full(nu, A, stdA, t2_fix, t1_fix, dist_type=distType, weightType=weightType)
return np.sum((y_theo[:n_moments] - y_target[:n_moments]) ** 2)
def remove_outliers_f(mcmc_sum):
mcmc_quant_1 = mcmc_sum.quantile(0.02)
mcmc_quant_2 = mcmc_sum.quantile(0.98)
for ii in mcmc_sum.__iter__(): # I mask the top 2% and bottom 2% data
mcmc_sum[ii] = mcmc_sum[ii].mask(mcmc_sum[ii] < mcmc_quant_1[ii])
mcmc_sum[ii] = mcmc_sum[ii].mask(mcmc_sum[ii] > mcmc_quant_2[ii])
return mcmc_sum.dropna()
def lognorm_param(mean, std):
return np.log((mean ** 2) / np.sqrt(std ** 2 + mean ** 2)), np.sqrt(np.log(std ** 2 / mean ** 2 + 1));
def f_tau_min(x, freq, sigma, y_target):
constant = x[0]
tau2 = x[1]
tau1 = x[2]
if constant < 0.:
return 10 ** 20
if tau2 < 0.:
return 10 ** 20
if tau1 < 0.000:
return 10 ** 20
y_theo = mcf.power_spectrum_tau(freq, constant, tau2, tau1)
return np.sum((y_theo - y_target) ** 2 / sigma ** 2)
|
{
"content_hash": "e9bf9be41f9d46694eec7f67698588a4",
"timestamp": "",
"source": "github",
"line_count": 1088,
"max_line_length": 216,
"avg_line_length": 45.36580882352941,
"alnum_prop": 0.5043761902832368,
"repo_name": "ppuggioni/invivoinfer",
"id": "c638cf8bc864aae53d6b24f4e08b1567e910560f",
"size": "49358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invivoinfer/infer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "270112"
},
{
"name": "Python",
"bytes": "151203"
}
],
"symlink_target": ""
}
|
import functools
import logging
from html.parser import HTMLParser
from io import StringIO
from recipe_scrapers.settings import settings
from ._interface import PluginInterface
logging.basicConfig()
logger = logging.getLogger(__name__)
# Taken from @jksimoniii 's PR:
# - https://github.com/hhursev/recipe-scrapers/pull/346
# Modified to use the new "Plugin" system
# Taken from https://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False # this setting appears to do nothing
self.convert_charrefs = True
self.text = StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue()
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def stripper(string):
# Deal with HTML and HTML Encoded Characters
string = strip_tags(
f"<tag>{string}<tag>"
) # This is a workaround, since HTMLParser expects valid markup
string = strip_tags(
f"<tag>{string}<tag>"
) # This is another workaround, handles "&amp;"
return string
class HTMLTagStripperPlugin(PluginInterface):
"""
Run the output from the methods listed through the stripper function
defined above.
It is intended to strip away <html><tags></tags></html> seen inside the strings.
We do not want em.
"""
decorate_hosts = ("*",)
run_on_methods = ("title", "instructions", "ingredients")
@classmethod
def run(cls, decorated):
@functools.wraps(decorated)
def decorated_method_wrapper(self, *args, **kwargs):
logger.setLevel(settings.LOG_LEVEL)
class_name = self.__class__.__name__
method_name = decorated.__name__
logger.debug(
f"Decorating: {class_name}.{method_name}() with HTMLTagStripperPlugin plugin."
)
decorated_func_result = decorated(self, *args, **kwargs)
if type(decorated_func_result) is list:
return [stripper(item) for item in decorated_func_result]
else:
return stripper(decorated_func_result)
return decorated_method_wrapper
|
{
"content_hash": "035ab6ba1e985af327ece66ed7db229f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 94,
"avg_line_length": 28.617283950617285,
"alnum_prop": 0.640207075064711,
"repo_name": "hhursev/recipe-scraper",
"id": "2591815963c951933738b834988c3b81b1a35083",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recipe_scrapers/plugins/html_tags_stripper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
}
|
import json
from tests import BaseTestCase
from redash.models import ApiKey, Dashboard, AccessPermission
from redash.permissions import ACCESS_TYPE_MODIFY
class TestDashboardListResource(BaseTestCase):
def test_create_new_dashboard(self):
dashboard_name = 'Test Dashboard'
rv = self.make_request('post', '/api/dashboards', data={'name': dashboard_name})
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.json['name'], 'Test Dashboard')
self.assertEquals(rv.json['user_id'], self.factory.user.id)
self.assertEquals(rv.json['layout'], [])
class TestDashboardResourceGet(BaseTestCase):
def test_get_dashboard(self):
d1 = self.factory.create_dashboard()
rv = self.make_request('get', '/api/dashboards/{0}'.format(d1.slug))
self.assertEquals(rv.status_code, 200)
expected = d1.to_dict(with_widgets=True)
actual = json.loads(rv.data)
self.assertResponseEqual(expected, actual)
def test_get_dashboard_filters_unauthorized_widgets(self):
dashboard = self.factory.create_dashboard()
restricted_ds = self.factory.create_data_source(group=self.factory.create_group())
query = self.factory.create_query(data_source=restricted_ds)
vis = self.factory.create_visualization(query=query)
restricted_widget = self.factory.create_widget(visualization=vis, dashboard=dashboard)
widget = self.factory.create_widget(dashboard=dashboard)
dashboard.layout = '[[{}, {}]]'.format(widget.id, restricted_widget.id)
dashboard.save()
rv = self.make_request('get', '/api/dashboards/{0}'.format(dashboard.slug))
self.assertEquals(rv.status_code, 200)
self.assertTrue(rv.json['widgets'][0][1]['restricted'])
self.assertNotIn('restricted', rv.json['widgets'][0][0])
def test_get_non_existing_dashboard(self):
rv = self.make_request('get', '/api/dashboards/not_existing')
self.assertEquals(rv.status_code, 404)
class TestDashboardResourcePost(BaseTestCase):
def test_update_dashboard(self):
d = self.factory.create_dashboard()
new_name = 'New Name'
rv = self.make_request('post', '/api/dashboards/{0}'.format(d.id),
data={'name': new_name, 'layout': '[]'})
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.json['name'], new_name)
def test_raises_error_in_case_of_conflict(self):
d = self.factory.create_dashboard()
d.name = 'Updated'
d.save()
new_name = 'New Name'
rv = self.make_request('post', '/api/dashboards/{0}'.format(d.id),
data={'name': new_name, 'layout': '[]', 'version': d.version - 1})
self.assertEqual(rv.status_code, 409)
def test_overrides_existing_if_no_version_specified(self):
d = self.factory.create_dashboard()
d.name = 'Updated'
d.save()
new_name = 'New Name'
rv = self.make_request('post', '/api/dashboards/{0}'.format(d.id),
data={'name': new_name, 'layout': '[]'})
self.assertEqual(rv.status_code, 200)
def test_works_for_non_owner_with_permission(self):
d = self.factory.create_dashboard()
user = self.factory.create_user()
new_name = 'New Name'
rv = self.make_request('post', '/api/dashboards/{0}'.format(d.id),
data={'name': new_name, 'layout': '[]', 'version': d.version}, user=user)
self.assertEqual(rv.status_code, 403)
AccessPermission.grant(obj=d, access_type=ACCESS_TYPE_MODIFY, grantee=user, grantor=d.user)
rv = self.make_request('post', '/api/dashboards/{0}'.format(d.id),
data={'name': new_name, 'layout': '[]', 'version': d.version}, user=user)
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.json['name'], new_name)
class TestDashboardResourceDelete(BaseTestCase):
def test_delete_dashboard(self):
d = self.factory.create_dashboard()
rv = self.make_request('delete', '/api/dashboards/{0}'.format(d.slug))
self.assertEquals(rv.status_code, 200)
d = Dashboard.get_by_slug_and_org(d.slug, d.org)
self.assertTrue(d.is_archived)
class TestDashboardShareResourcePost(BaseTestCase):
def test_creates_api_key(self):
dashboard = self.factory.create_dashboard()
res = self.make_request('post', '/api/dashboards/{}/share'.format(dashboard.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json['api_key'], ApiKey.get_by_object(dashboard).api_key)
def test_requires_admin_or_owner(self):
dashboard = self.factory.create_dashboard()
user = self.factory.create_user()
res = self.make_request('post', '/api/dashboards/{}/share'.format(dashboard.id), user=user)
self.assertEqual(res.status_code, 403)
user.groups.append(self.factory.org.admin_group.id)
user.save()
res = self.make_request('post', '/api/dashboards/{}/share'.format(dashboard.id), user=user)
self.assertEqual(res.status_code, 200)
class TestDashboardShareResourceDelete(BaseTestCase):
def test_disables_api_key(self):
dashboard = self.factory.create_dashboard()
ApiKey.create_for_object(dashboard, self.factory.user)
res = self.make_request('delete', '/api/dashboards/{}/share'.format(dashboard.id))
self.assertEqual(res.status_code, 200)
self.assertIsNone(ApiKey.get_by_object(dashboard))
def test_ignores_when_no_api_key_exists(self):
dashboard = self.factory.create_dashboard()
res = self.make_request('delete', '/api/dashboards/{}/share'.format(dashboard.id))
self.assertEqual(res.status_code, 200)
def test_requires_admin_or_owner(self):
dashboard = self.factory.create_dashboard()
user = self.factory.create_user()
res = self.make_request('delete', '/api/dashboards/{}/share'.format(dashboard.id), user=user)
self.assertEqual(res.status_code, 403)
user.groups.append(self.factory.org.admin_group.id)
user.save()
res = self.make_request('delete', '/api/dashboards/{}/share'.format(dashboard.id), user=user)
self.assertEqual(res.status_code, 200)
|
{
"content_hash": "f9ec1b1fe6c55d4af40815d895b2b987",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 104,
"avg_line_length": 40.41772151898734,
"alnum_prop": 0.6390541810209834,
"repo_name": "ninneko/redash",
"id": "e367094c7711863ae1af96ffb40636a8889ce45e",
"size": "6386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/handlers/test_dashboards.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "239558"
},
{
"name": "HTML",
"bytes": "122828"
},
{
"name": "JavaScript",
"bytes": "281755"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Nginx",
"bytes": "577"
},
{
"name": "Python",
"bytes": "528912"
},
{
"name": "Ruby",
"bytes": "709"
},
{
"name": "Shell",
"bytes": "43388"
}
],
"symlink_target": ""
}
|
from django.db import models
class Country(models.Model):
name = models.CharField(max_length=30)
class EUCountry(Country):
join_date = models.DateField()
class City(models.Model):
name = models.CharField(max_length=30)
country = models.ForeignKey(Country, models.CASCADE)
class EUCity(models.Model):
name = models.CharField(max_length=30)
country = models.ForeignKey(EUCountry, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=30)
born = models.ForeignKey(City, models.CASCADE, related_name='+')
died = models.ForeignKey(City, models.CASCADE, related_name='+')
class PersonProfile(models.Model):
person = models.OneToOneField(Person, models.CASCADE, related_name='profile')
|
{
"content_hash": "09ababaf73ded824d1921b21ba154a5d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 81,
"avg_line_length": 26.06896551724138,
"alnum_prop": 0.7275132275132276,
"repo_name": "simonw/django",
"id": "c84f9ad6b29a140b16951b306015740b60be9d1d",
"size": "756",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/select_for_update/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
"""
MultiRenderWidget
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkOpenGLGPUMultiVolumeRayCastMapper
from vtk import vtkRenderer
from vtk import vtkInteractorStyleTrackballCamera
from vtk import vtkImagePlaneWidget
from vtk import vtkVolume
from vtk import vtkImageData
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
from vtk import vtkVolumeProperty
from vtk import VTK_FLOAT
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtCore import Signal
from PySide.QtCore import Slot
from ui.transformations import TransformationList
from ui.transformations import ClippingBox
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from core.vtkDrawing import CreateBounds
from core.vtkDrawing import CreateOrientationGrid
class MultiRenderWidget(QWidget):
"""
MultiRenderWidget is a widget that can display two datasets: fixed and
moving dataset.
It uses the given volume property to derive how the volumes should be
displayed. This widget also has its own controls that define how the
volumes from the other widgets will be mixed into one visualization.
The hard thing is to find out how to share volumes / volume properties /
resources between widgets while still being linked together. So for
instance when a volume is clipped in one of the single views it should
be immediately visible in this widget. And the problem with the volume
properties is that the volume property for this widget should be linked
to the other widgets so that when they update their volume properties, this
volume property will also be updated. But it can't be the same...
There can be a few visualization modes:
* 'simple' mix mode
* colorized mix mode
Simple mix mode is a mode that displays both datasets in the same way as
they are visualized in the other views. Two controls are given to provide
a way of setting the opacity of both volumes so that the user can mix the
datasets to a nice visualization.
Colorized mix mode makes grayscale visualizations of the
"""
dataChanged = Signal()
updated = Signal()
def __init__(self):
super(MultiRenderWidget, self).__init__()
# Default volume renderer
self.renderer = vtkRenderer()
self.renderer.SetBackground2(0.4, 0.4, 0.4)
self.renderer.SetBackground(0.1, 0.1, 0.1)
self.renderer.SetGradientBackground(True)
self.renderer.SetInteractive(1)
self.renderer.SetLayer(0)
# Overlay renderer which is synced with the default renderer
self.rendererOverlay = vtkRenderer()
self.rendererOverlay.SetLayer(1)
self.rendererOverlay.SetInteractive(0)
self.renderer.GetActiveCamera().AddObserver("ModifiedEvent", self._syncCameras)
self.rwi = QVTKRenderWindowInteractor(parent=self)
self.rwi.SetInteractorStyle(vtkInteractorStyleTrackballCamera())
self.rwi.GetRenderWindow().AddRenderer(self.renderer)
self.rwi.GetRenderWindow().AddRenderer(self.rendererOverlay)
self.rwi.GetRenderWindow().SetNumberOfLayers(2)
self.rwi.SetDesiredUpdateRate(0)
self._imagePlaneWidgets = [vtkImagePlaneWidget() for i in range(3)]
for index in range(3):
self._imagePlaneWidgets[index].DisplayTextOn()
self._imagePlaneWidgets[index].SetInteractor(self.rwi)
self.mapper = vtkOpenGLGPUMultiVolumeRayCastMapper()
self.mapper.SetBlendModeToComposite()
self.volume = vtkVolume()
self.volume.SetMapper(self.mapper)
self.renderer.AddViewProp(self.volume)
self.fixedGridItems = []
self.movingGridItems = []
self.orientationGridItems = []
# Create two empty datasets
self.fixedImageData = CreateEmptyImageData()
self.movingImageData = CreateEmptyImageData()
self.fixedVolumeProperty = vtkVolumeProperty()
self.movingVolumeProperty = vtkVolumeProperty()
color, opacityFunction = CreateEmptyFunctions()
self.fixedVolumeProperty.SetColor(color)
self.fixedVolumeProperty.SetScalarOpacity(opacityFunction)
self.movingVolumeProperty.SetColor(color)
self.movingVolumeProperty.SetScalarOpacity(opacityFunction)
self.visualization = None # MultiVolumeVisualization
self.clippingBox = ClippingBox()
self.clippingBox.setWidget(self)
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
self._transformations = TransformationList()
self._transformations.transformationChanged.connect(self.updateTransformation)
self._shouldResetCamera = False
self.setMinimumWidth(340)
self.setMinimumHeight(340)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.rwi, 0, 0)
self.setLayout(layout)
def render(self):
if self._shouldResetCamera:
self.renderer.ResetCamera()
self._shouldResetCamera = False
self.rwi.Render()
# Prevent warning messages on OSX by not asking to render
# when the render window has never rendered before
if not self.rwi.GetRenderWindow().GetNeverRendered():
self.rwi.GetRenderWindow().Render()
@Slot(object)
def setFixedData(self, imageData):
self._cleanUpGrids()
self.fixedImageData = imageData
if self.fixedImageData is None:
self.fixedImageData = CreateEmptyImageData()
if self.movingImageData is None:
self.movingImageData = CreateEmptyImageData()
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
for index in range(3):
self._imagePlaneWidgets[index].SetInputData(self.fixedImageData)
self._imagePlaneWidgets[index].SetPlaneOrientation(index)
self._updateGrids()
self._createClippingBox()
self._shouldResetCamera = True
@Slot(object)
def setMovingData(self, imageData):
self._cleanUpGrids()
self.movingImageData = imageData
if self.movingImageData is None:
self.movingImageData = CreateEmptyImageData()
if self.fixedImageData is None:
self.fixedImageData = CreateEmptyImageData()
self.mapper.SetInputData(0, self.fixedImageData)
self.mapper.SetInputData(1, self.movingImageData)
self._updateGrids()
self._shouldResetCamera = True
def setVolumeVisualization(self, visualization):
self.visualization = visualization
if self.visualization is None:
color, opacityFunction = CreateEmptyFunctions()
self.fixedVolumeProperty = vtkVolumeProperty()
self.fixedVolumeProperty.SetColor(color)
self.fixedVolumeProperty.SetScalarOpacity(opacityFunction)
self.movingVolumeProperty = vtkVolumeProperty()
self.movingVolumeProperty.SetColor(color)
self.movingVolumeProperty.SetScalarOpacity(opacityFunction)
else:
self.fixedVolumeProperty = self.visualization.fixedVolProp
self.movingVolumeProperty = self.visualization.movingVolProp
self.visualization.setMapper(self.mapper)
if self.visualization.fixedVisualization:
self._updateMapper(self.visualization.fixedVisualization, 1)
if self.visualization.movingVisualization:
self._updateMapper(self.visualization.movingVisualization, 2)
self._updateVolumeProperties()
def _updateGrids(self):
if not self._hasImageData():
return
if self._hasMovingImageData():
self.movingGridItems = CreateBounds(self.movingImageData.GetBounds())
boundsFixed = self.fixedImageData.GetBounds()
boundsMoving = self.movingImageData.GetBounds()
maxBounds = map(lambda x, y: max(x, y), boundsFixed, boundsMoving)
self.orientationGridItems = CreateOrientationGrid(maxBounds, self.renderer.GetActiveCamera())
for item in (self.movingGridItems + self.fixedGridItems + self.orientationGridItems):
self.renderer.AddViewProp(item)
def _cleanUpGrids(self):
for item in (self.fixedGridItems + self.movingGridItems + self.orientationGridItems):
self.renderer.RemoveViewProp(item)
self.fixedGridItems = []
self.movingGridItems = []
self.orientationGridItems = []
def _createClippingBox(self):
if not self._hasImageData():
self.clippingBox.showClippingBox(False)
else:
if self._hasFixedImageData():
self.clippingBox.setImageData(self.fixedImageData)
elif self._hasMovingImageData():
self.clippingBox.setImageData(self.movingImageData)
else:
self.clippingBox.enable(False)
def _hasImageData(self):
return self._hasFixedImageData() or self._hasMovingImageData()
def _hasFixedImageData(self):
return self._isActualImageData(self.fixedImageData)
def _hasMovingImageData(self):
return self._isActualImageData(self.movingImageData)
def _isActualImageData(self, imageData):
dimensions = imageData.GetDimensions()
return dimensions != (3, 3, 3)
# Properties
@property
def transformations(self):
return self._transformations
@transformations.setter
def transformations(self, value):
self._transformations.copyFromTransformations(value)
# Slots
@Slot(object)
def setSlices(self, slices):
for sliceIndex in range(len(slices)):
if slices[sliceIndex]:
self._imagePlaneWidgets[sliceIndex].On()
else:
self._imagePlaneWidgets[sliceIndex].Off()
def showClippingBox(self, show):
self.clippingBox.showClippingBox(show)
self.render()
def showClippingPlanes(self, show):
self.clippingBox.showClippingPlanes(show)
self.render()
def resetClippingBox(self):
self.clippingBox.resetClippingBox()
self.render()
@Slot()
def updateTransformation(self):
transform = self._transformations.completeTransform()
self.mapper.SetSecondInputUserTransform(transform)
for item in self.movingGridItems:
item.SetUserTransform(transform)
self.render()
# Private methods
def _updateMapper(self, volVis, volNr):
shaderType = volVis.shaderType()
if volNr == 1:
self.mapper.SetShaderType1(shaderType)
if shaderType == 2: # MIDA
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound1(lowerBound)
self.mapper.SetUpperBound1(upperBound)
self.mapper.SetBrightness1(volVis.brightness / 100.0)
if shaderType == 1: # MIP
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound1(lowerBound)
self.mapper.SetUpperBound1(upperBound)
else:
self.mapper.SetShaderType2(shaderType)
if shaderType == 2: # MIDA
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound2(lowerBound)
self.mapper.SetUpperBound2(upperBound)
self.mapper.SetBrightness2(volVis.brightness / 100.0)
if shaderType == 1: # MIP
lowerBound = (volVis.lowerBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
upperBound = (volVis.upperBound - volVis.minimum) / (volVis.maximum - volVis.minimum)
self.mapper.SetLowerBound2(lowerBound)
self.mapper.SetUpperBound2(upperBound)
def _updateVolumeProperties(self):
"""
Private method to update the volume properties.
"""
if self.volume.GetProperty() != self.fixedVolumeProperty:
self.volume.SetProperty(self.fixedVolumeProperty)
if self.mapper.GetProperty2() != self.movingVolumeProperty:
self.mapper.SetProperty2(self.movingVolumeProperty)
self.render()
def _syncCameras(self, camera, ev):
"""
Camera modified event callback. Copies the parameters of
the renderer camera into the camera of the overlay so they
stay synced at all times.
"""
self.rendererOverlay.GetActiveCamera().ShallowCopy(camera)
# Helper methods
def CreateEmptyImageData():
"""
Create an empty image data object. The multi volume mapper expects two
inputs, so if there is only one dataset loaded, a dummy dataset can be
created using this method. Be sure to also set a dummy volume property
(CreateVolumeVisualizationInvisible) so that the volume does not show up in
the renderer.
:rtype: vtkImageData
"""
dimensions = [3, 3, 3]
imageData = vtkImageData()
imageData.Initialize()
imageData.SetDimensions(dimensions)
imageData.SetSpacing(1, 1, 1)
imageData.SetOrigin(10, 10, 0)
imageData.AllocateScalars(VTK_FLOAT, 1)
for z in xrange(0, dimensions[2]-1):
for y in xrange(0, dimensions[1]-1):
for x in xrange(0, dimensions[0]-1):
imageData.SetScalarComponentFromDouble(x, y, z, 0, 0.0)
return imageData
def CreateEmptyFunctions():
"""
:rtype: vtkColorTransferFunction, vtkPiecewiseFunction
"""
# Transfer functions and properties
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(-1000, 0.0, 0.0, 0.0)
colorFunction.AddRGBPoint(1000, 0.0, 0.0, 0.0)
opacityFunction = vtkPiecewiseFunction()
opacityFunction.AddPoint(-1000, 0.0)
opacityFunction.AddPoint(1000, 0.0)
return colorFunction, opacityFunction
|
{
"content_hash": "ba1f645ff7422f7d2a186e145f481a2d",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 95,
"avg_line_length": 34.23924731182796,
"alnum_prop": 0.7733375206092487,
"repo_name": "berendkleinhaneveld/Registrationshop",
"id": "2240dcbf505e5b25d952d2ed33ebf3ed4a2fae4d",
"size": "12737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/widgets/MultiRenderWidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "440232"
},
{
"name": "Shell",
"bytes": "880"
}
],
"symlink_target": ""
}
|
import csv # this was the solution from Question1
def open_with_csv(filename, d='\t'):
data = []
with open(filename, encoding='utf-8') as tsvin:
tsvin = csv.reader(tsvin, delimiter=d)
for row in tsvin:
data.append(row)
return data
|
{
"content_hash": "eb0d5a2a2b266ec0d6c01c2bd86d8716",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6153846153846154,
"repo_name": "katychuang/python-data-sci-basics",
"id": "39f27148d19571ec6fd7cdf70a57c5a333fde072",
"size": "273",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "challenges/solutions/s2q2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35326"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.