id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9636533 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible_collections.netapp.ontap.tests.unit.compat import unittest
from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.modules.na_ontap_export_policy_rule \
import NetAppontapExportRule as policy_rule # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'rule':
xml = self.build_policy_rule(self.data)
if self.kind == 'rules':
xml = self.build_policy_rule(self.data, multiple=True)
if self.kind == 'policy':
xml = self.build_policy()
self.xml_out = xml
return xml
@staticmethod
def build_policy_rule(policy, multiple=False):
''' build xml data for export-rule-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {'attributes-list': {
'export-rule-info': {
'policy-name': policy['name'],
'client-match': policy['client_match'],
'ro-rule': {
'security-flavor': 'any'
},
'rw-rule': {
'security-flavor': 'any'
},
'protocol': {
'access-protocol': policy['protocol']
},
'super-user-security': {
'security-flavor': 'any'
},
'is-allow-set-uid-enabled': 'false',
'rule-index': policy['rule_index'],
'anonymous-user-id': policy['anonymous_user_id'],
}
}, 'num-records': 2 if multiple is True else 1}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_policy():
''' build xml data for export-policy-get-iter '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_rule = {
'name': 'test',
'protocol': 'nfs',
'client_match': '172.16.17.32',
'rule_index': 10,
'anonymous_user_id': '65534'
}
def mock_rule_args(self):
return {
'name': self.mock_rule['name'],
'client_match': self.mock_rule['client_match'],
'vserver': 'test',
'protocol': self.mock_rule['protocol'],
'rule_index': self.mock_rule['rule_index'],
'anonymous_user_id': self.mock_rule['anonymous_user_id'],
'ro_rule': 'any',
'rw_rule': 'any',
'hostname': 'test',
'username': 'test_user',
'password': '<PASSWORD>!'
}
def get_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_firewall_policy object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_firewall_policy object
"""
obj = policy_rule()
obj.autosupport_log = Mock(return_value=None)
if kind is None:
obj.server = MockONTAPConnection()
else:
obj.server = MockONTAPConnection(kind=kind, data=self.mock_rule_args())
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
policy_rule()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_rule(self):
''' Test if get_export_policy_rule returns None for non-existent policy '''
set_module_args(self.mock_rule_args())
result = self.get_mock_object().get_export_policy_rule()
assert result is None
def test_get_nonexistent_policy(self):
''' Test if get_export_policy returns None for non-existent policy '''
set_module_args(self.mock_rule_args())
result = self.get_mock_object().get_export_policy()
assert result is None
def test_get_existing_rule(self):
''' Test if get_export_policy_rule returns rule details for existing policy '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('rule').get_export_policy_rule()
assert result['name'] == data['name']
assert result['client_match'] == data['client_match']
assert result['ro_rule'] == ['any'] # from build_rule()
def test_get_existing_policy(self):
''' Test if get_export_policy returns policy details for existing policy '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('policy').get_export_policy()
assert result is not None
def test_create_missing_param_error(self):
''' Test validation error from create '''
data = self.mock_rule_args()
del data['ro_rule']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_mock_object().apply()
msg = 'Error: Missing required param for creating export policy rule ro_rule'
assert exc.value.args[0]['msg'] == msg
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_rule_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_rule_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete_without_rule_index(self):
''' Test delete existing job '''
data = self.mock_rule_args()
data['state'] = 'absent'
del data['rule_index']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_rule_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify protocol '''
data = self.mock_rule_args()
data['protocol'] = ['cifs']
data['allow_suid'] = 'true'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('rule').apply()
assert exc.value.args[0]['changed']
def test_error_on_ambiguous_delete(self):
''' Test error if multiple entries match for a delete '''
data = self.mock_rule_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_mock_object('rules').apply()
msg = "Multiple export policy rules exist.Please specify a rule_index to delete"
assert exc.value.args[0]['msg'] == msg
def test_helper_query_parameters(self):
''' Test helper method set_query_parameters() '''
data = self.mock_rule_args()
set_module_args(data)
result = self.get_mock_object('rule').set_query_parameters()
print(str(result))
assert 'query' in result
assert 'export-rule-info' in result['query']
assert result['query']['export-rule-info']['rule-index'] == data['rule_index']
| StarcoderdataPython |
5002816 | <gh_stars>1000+
"""
.. deprecated:: 1.20
*This module is deprecated. Instead of importing functions from*
``numpy.dual``, *the functions should be imported directly from NumPy
or SciPy*.
Aliases for functions which may be accelerated by SciPy.
SciPy_ can be built to use accelerated or otherwise improved libraries
for FFTs, linear algebra, and special functions. This module allows
developers to transparently support these accelerated functions when
SciPy is available but still support users who have only installed
NumPy.
.. _SciPy : https://www.scipy.org
"""
import warnings
warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
'use the functions directly from numpy or scipy.',
category=DeprecationWarning,
stacklevel=2)
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
# Usage --- from numpy.dual import fft, inv
__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
import numpy.linalg as linpkg
import numpy.fft as fftpkg
from numpy.lib import i0
import sys
fft = fftpkg.fft
ifft = fftpkg.ifft
fftn = fftpkg.fftn
ifftn = fftpkg.ifftn
fft2 = fftpkg.fft2
ifft2 = fftpkg.ifft2
norm = linpkg.norm
inv = linpkg.inv
svd = linpkg.svd
solve = linpkg.solve
det = linpkg.det
eig = linpkg.eig
eigvals = linpkg.eigvals
eigh = linpkg.eigh
eigvalsh = linpkg.eigvalsh
lstsq = linpkg.lstsq
pinv = linpkg.pinv
cholesky = linpkg.cholesky
_restore_dict = {}
def register_func(name, func):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val
def restore_all():
for name in _restore_dict.keys():
restore_func(name)
| StarcoderdataPython |
3509115 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def load_data(rootdir='./'):
print('load data \n')
x_train = np.loadtxt(rootdir + 'x_train.txt', dtype=str).astype(float)
y_train = np.loadtxt(rootdir + 'y_train.txt', dtype=str).astype(int)
print('x_train: [%d, %d], y_train:[%d,]' % (
x_train.shape[0], x_train.shape[1], y_train.shape[0]))
return x_train, y_train
| StarcoderdataPython |
3483078 | #!/usr/bin/python
import json, math, sys, time, uuid
from random import randint, sample
def output_file(file):
fo = open(file, 'w')
return fo
def create_node(node, in_dict, out_dict, in_dict_attrs, out_dict_attrs, k, num_nodes, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max, num_node_attrs, num_edge_attrs, num_edges, mini, mino, maxi, maxo, fo):
num_attrs = randint(node_attrs_min, node_attrs_max)
attrs_list = 0
if node_attrs_max != 0:
attrs_list = sample(xrange(node_attrs_max-1), node_attrs_max-1)
# create the vertex
node_str = "{\"_id\":"+str(node)
i = 0
while i < num_attrs:
if i == 0:
node_str += ",\"name"+"\":\""+str(uuid.uuid1())+"\""
else:
node_str += ",\"type"+str(attrs_list[i-1])+"\":\""+str(uuid.uuid1())+"\""
num_node_attrs += 1
i += 1
# create the in edges
node_str, num_edges, num_edge_attrs, out_dict, out_dict_attrs, in_dict, in_dict_attrs = add_edges("in", "out", out_dict, out_dict_attrs, in_dict, in_dict_attrs, node_str, node, k, num_nodes, num_edges, num_edge_attrs, edge_attrs_min, edge_attrs_max)
# create the out edges
node_str, num_edges, num_edge_attrs, in_dict, in_dict_attrs, out_dict, out_dict_attrs = add_edges("out", "in", in_dict, in_dict_attrs, out_dict, out_dict_attrs, node_str, node, k, num_nodes, num_edges, num_edge_attrs, edge_attrs_min, edge_attrs_max)
node_str += "}\n"
fo.write(node_str)
return num_node_attrs, num_edge_attrs, num_edges, in_dict, out_dict, in_dict_attrs, out_dict_attrs
def add_edges(direction1, direction2, dict, dict_attrs, dict2, dict_attrs2, node_str, node, k, num_nodes, num_edges, num_edge_attrs, edge_attrs_min, edge_attrs_max):
# !! NOTE hard coded list of labels (relationship types) for edges
directed_edge_labels = ["knows", "contacted", "manager_of", "works_for"]
i = flag = 0
edges = randint(mini,maxi/2)
if node in dict2:
node_str += ",\"_"+direction1+"E\":["
node_str += json.dumps(dict2[node])
node_str = node_str[:-1]
node_str += dict_attrs2[node]
node_str += "}"
i += len(dict2[node])
if i >= edges:
node_str += "]"
del dict2[node]
del dict_attrs2[node]
while i < edges:
flag = 1
node_str += ","
if i == 0:
node_str += "\"_"+direction1+"E\":["
num_attrs = randint(edge_attrs_min, edge_attrs_max)
attrs_list = 0
if edge_attrs_max != 0:
attrs_list = sample(xrange(edge_attrs_max-1), edge_attrs_max-1)
# !! TODO next three lines need to be fixed
#directionV = node
#while directionV == node:
# directionV = randint(0,num_nodes-1)
if node+1 >= k-1:
directionV = node
else:
directionV = randint(node+1,k-1)
random_label = str(directed_edge_labels[randint(0, len(directed_edge_labels)-1)])
node_str += "{\"_label\":\""+random_label+"\",\"_id\":"+str(num_nodes+num_edges)+",\"_"+direction2+"V\":"+str(directionV)
j = 0
attr_str = ""
while j < num_attrs:
if j == 0:
attr_str += ",\"type-"+str(attrs_list[j-1])+"\":\""+str(uuid.uuid1())+"\""
else:
attr_str += ",\"type-"+str(attrs_list[j-1]+1)+"\":\""+str(uuid.uuid1())+"\""
num_edge_attrs += 1
j += 1
node_str += attr_str
node_str += "}"
num_edges += 1
dV = "_"+direction1+"V"
dict[directionV] = {"_label":random_label,"_id":str(num_nodes+num_edges),dV:node}
dict_attrs[directionV] = attr_str
num_edges += 1
i += 1
if flag == 1:
node_str += "]"
return node_str, num_edges, num_edge_attrs, dict, dict_attrs, dict2, dict_attrs2
def generate_graph(clusters, file, num_nodes, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max, mini, mino, maxi, maxo):
fo = output_file(file)
num_node_attrs = num_edge_attrs = num_edges = 0
i = 0
print "Creating nodes . . ."
prev = -1
for n in range(clusters):
in_dict = {}
out_dict = {}
in_dict_attrs = {}
out_dict_attrs = {}
j = i
k = (num_nodes/clusters)*(n+1)
for node in range(j, k):
num_node_attrs, num_edge_attrs, num_edges, in_dict, out_dict, in_dict_attrs, out_dict_attrs = create_node(node, in_dict, out_dict, in_dict_attrs, out_dict_attrs, k, num_nodes, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max, num_node_attrs, num_edge_attrs, num_edges, mini, mino, maxi, maxo, fo)
percent_complete = percentage(i, num_nodes)
if percent_complete % 10 == 0 and prev != percent_complete:
prev = percent_complete
print str(percent_complete)+"% finished"
i += 1
return num_node_attrs, num_edge_attrs, num_edges
def percentage(part, whole):
return int(100 * float(part)/float(whole))
def print_help():
print "\n-n \t<num of nodes> (default is 1000, must be greater than 0)\n"
print "-c \t<number of clusters> (must be less than or equal to the number of nodes)\n"
print "-maxi \t<max in degree of nodes> (only used with directed flag, default is 10)\n"
print "-maxo \t<max out degree of nodes> (only used with directed flag, default is 10)\n"
print "-mini \t<min in degree of nodes> (only used with directed flag, default is 1)\n"
print "-mino \t<min out degree of nodes> (only used with directed flag, default is 1)\n"
print "-minna \t<min num of node attributes> (default is 2, must be at least 1)\n"
print "-maxna \t<max num of node attributes> (default is 2)\n"
print "-minea \t<min num of edge attributes> (default is 0)\n"
print "-maxea \t<max num of edge attributes> (default is 0)\n"
print "-o \t<path to output file> (default is 'graph')\n"
print "-h \thelp\n"
sys.exit(0)
def check_attrs(arg, args, i, flag):
try:
arg = int(args[i+1])
if arg < flag:
print_help()
except:
print_help()
return arg
def process_args(args):
# default initialization
num_nodes = 1000
mini = mino = 1
maxi = maxo= 10
node_attrs_min = 2
node_attrs_max = 2
edge_attrs_min = 0
edge_attrs_max = 0
clusters = 1
output = "graph"
# process rest of the args
i = 0
while i < len(args):
if args[i] == "-n":
try:
num_nodes = int(args[i+1])
if num_nodes < 1:
print_help()
except:
print_help()
elif args[i] == "-minna":
node_attrs_min = check_attrs(node_attrs_min, args, i, 1)
elif args[i] == "-maxna":
node_attrs_max = check_attrs(node_attrs_max, args, i, node_attrs_min)
elif args[i] == "-minea":
edge_attrs_min = check_attrs(edge_attrs_min, args, i, 0)
elif args[i] == "-maxea":
edge_attrs_max = check_attrs(edge_attrs_max, args, i, edge_attrs_min)
elif args[i] == "-mini":
try:
mini = int(args[i+1])
except:
print_help()
elif args[i] == "-mino":
try:
mino = int(args[i+1])
except:
print_help()
elif args[i] == "-maxi":
try:
maxi = int(args[i+1])
except:
print_help()
elif args[i] == "-maxo":
try:
maxo = int(args[i+1])
except:
print_help()
elif args[i] == "-c":
try:
clusters = int(args[i+1])
except:
print_help()
elif args[i] == "-o":
try:
output = args[i+1]
f = open(output, 'w')
f.close()
except:
print_help()
else:
print_help()
i += 2
if clusters >= num_nodes:
print_help()
if maxi < mini or maxo < mino:
print_help()
if node_attrs_max < node_attrs_min or edge_attrs_max < edge_attrs_min:
print_help()
return clusters, output, num_nodes, mini, mino, maxi, maxo, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max
def get_args():
args = []
for arg in sys.argv:
args.append(arg)
return args[1:]
if __name__ == "__main__":
start_time = time.time()
args = get_args()
clusters, output, num_nodes, mini, mino, maxi, maxo, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max = process_args(args)
print "Generating the following graph:"
print "\tOutput File: \t\t\t",output
print "\tNodes: \t\t\t\t",num_nodes
print "\tClusters: \t\t\t",clusters
print "\tMinimum In Degree: \t\t",mini
print "\tMaximum In Degree: \t\t",maxi
print "\tMinimum Out Degree: \t\t",mino
print "\tMaximum Out Degree: \t\t",maxo
print "\tMinimum Node Attributes: \t",node_attrs_min
print "\tMaximum Node Attributes: \t",node_attrs_max
print "\tMinimum Edge Attributes: \t",edge_attrs_min
print "\tMaximum Edge Attributes: \t",edge_attrs_max
num_node_attrs, num_edge_attrs, num_edges = generate_graph(clusters, output, num_nodes, node_attrs_min, node_attrs_max, edge_attrs_min, edge_attrs_max, mini, mino, maxi, maxo)
print "Number of edges created =",num_edges
print "Average number of node attributes =",num_node_attrs/num_nodes
if num_edges != 0:
print "Average number of edge attributes =",num_edge_attrs/num_edges
else:
print "Average number of edge attributes = 0"
print "Took",time.time() - start_time,"seconds to complete."
| StarcoderdataPython |
6612310 | import pubchempy as pcp
import logging
from src import setting
# Setting up log file
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh = logging.FileHandler(setting.run_specific_log, mode='a')
fh.setFormatter(fmt=formatter)
logger = logging.getLogger("Processing chemicals")
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
def smile2ichikey(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchikey
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
def smile2ichi(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchi
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
| StarcoderdataPython |
1764637 | # import the necessary packages
from imutils.video import VideoStream
import argparse
import numpy as np, cv2
import imutils, time
# import the necessary packages
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracker():
def __init__(self, maxDisappeared=3):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
# check to see if the list of input bounding box rectangles
# is empty
if len(rects) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
inputCentroids = np.zeros((len(rects), 2), dtype="int")
# loop over the bounding box rectangles
for (i, (startX, startY, endX, endY)) in enumerate(rects):
# use the bounding box coordinates to derive the centroid
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value as at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
# initialize our centroid tracker and frame dimensions
ct = CentroidTracker(); maxobjectID = 0;
###############################
from imutils.video import VideoStream
import argparse
import imutils, numpy as np
import time, cv2
args = {
'video': "stable_Baltimore & Charles - AM (1)_TrimEnd.mp4", #"../overpass.mp4",
'tracker':'kcf'
}
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
ROI_CAPTURED = False; refPt = []
video_dir = "Guilford & Madison - AM.avi" #
video_dir = "stable_Baltimore & Charles - AM (1)_TrimEnd.mp4"
#video_dir = "Guilford & LexingtonFayette - AM.avi"
backSub = cv2.createBackgroundSubtractorMOG2()
trackers = cv2.MultiTracker_create() # Create Multi-Tracker Object
vs = cv2.VideoCapture(video_dir) # Load video
print("[INFO] video path loaded..")
state = [0, 1] # track states
print("PRESS 's' to select anchor points.")
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
_, frame_o = vs.read()
# check to see if we have reached the end of the stream
if frame_o is None:
break
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=1000)
# grab the updated bounding box coordinates (if any) for each
# object that is being tracked
(success, boxes) = trackers.update(frame)
# loop over the bounding boxes and draw then on the frame
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
vs = cv2.VideoCapture(video_dir) # Load video
_, frame_o = vs.read()
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=1000)
for i in state:
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
box = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# create a new object tracker for the bounding box and add it
# to our multi-object tracker
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
trackers.add(tracker, frame, box)
strip = [];
(success, boxes) = trackers.update(frame)
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
strip.append((x+w//2, y+h//2))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#break out of loop
break
# press `r` to reset
elif key == ord("r"):
trackers = cv2.MultiTracker_create() # reset multi-object tracker
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
exit()
cv2.destroyAllWindows()
print("\nGOING THROUGH THE VIDEO USING THE ANCHOR TAGS\n")
print("PRESS 'l' to draw strip.")
#Get the y-value from a line using two points on the line
def getyfrom(y, point1, point2):
(x1, y1) = point1
(x2, y2) = point2
m = (y2-y1) / (x2-x1)
return int(y1 + m*(x-x1) )
#Add method for cropping and rotating roi
def crop_rect(img, rect):
# get the parameter of the small rectangle
center, size, angle = rect[0], rect[1], rect[2]
center, size = tuple(map(int, center)), tuple(map(int, size))
# get row and col num in img
height, width = img.shape[0], img.shape[1]
# calculate the rotation matrix
M = cv2.getRotationMatrix2D(center, angle, 1)
# rotate the original image
img_rot = cv2.warpAffine(img, M, (width, height))
# now rotated rectangle becomes vertical, and we crop it
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop, img_rot
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global refPt, cropping
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
refPt.append((x, y))
cropping = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
refPt.append((x, y))
cropping = False
# draw a rectangle around the region of interest
cv2.line(clone, refPt[-2], refPt[-1], (0, 255, 0), 2)
cv2.imshow("Draw R-O-I", clone)
#cv2.imshow("mask", mask)
# Get regions of Interest
strip = []; (success, boxes) = trackers.update(frame)
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
strip.append((x+w//2, y+h//2))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
((x1, y1), (x2, y2)) = (strip[0], strip[1])
strip.append((x2, y2+20)); strip.append((x1, y1+20))
cv2.line(frame, strip[0], strip[1], (15,15,15), 1)
cv2.line(frame, strip[1], strip[2], (15,15,15), 1)
cv2.line(frame, strip[2], strip[3], (15,15,15), 1)
cv2.line(frame, strip[3], strip[0], (15,15,15), 1)
# load the image, clone it, and setup the mouse callback function
clone = frame.copy()
cv2.namedWindow("Draw R-O-I")
cv2.setMouseCallback("Draw R-O-I", click_and_crop)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("Draw R-O-I", clone)
key = cv2.waitKey(1) & 0xFF
# if the 'r' key is pressed, reset the cropping region
if key == ord("r"):
refPt = []; cropping = False
clone = frame.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
#refPt = [(484, 423), (591, 508), (591, 508), (594, 496), (594, 496), (495, 418), (495, 418), (486, 418)]
# if there are two reference points, then crop the region of interest
if len(refPt) >= 3:
roi_corners = np.array([refPt], dtype=np.int32)
(x1, y1) = strip[0]
free_draw_dist = [(x1-x2, y1-y2) for (x2,y2) in refPt]
cv2.destroyWindow("Draw R-O-I"); cv2.destroyWindow("Masked Image");
#free_draw_dist = [(x1-x2, y1-y2) for ((x1,y1), (x2, y2)) in zip(strip, refPt)]
# APPLY ROI ROTATION AND CROP
rect = cv2.minAreaRect(roi_corners) #print("rect: {}".format(rect))
box = cv2.boxPoints(rect); box = np.int0(box)
# img_crop will the cropped rectangle, img_rot is the rotated image
img_crop, img_rot = crop_rect(frame, rect)
gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY)
images = [gray]; cur_images = [gray];
roi = [];
''' PROCESS VIDEO USING MY BACKGROUND SUBTRACTION METHOD'''
WIDTH = 1000; N = 10; cur_ind = N-1
fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
#width = vs.get(cv2.CAP_PROP_FRAME_WIDTH)
##height = vs.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = vs.get(cv2.CAP_PROP_FPS)
size = (frame.shape[1], frame.shape[0])
out = cv2.VideoWriter("video_output/video1.avi", fourcc, fps, size)
#out = cv2.VideoWriter('video_output/1.avi', fourcc, fps, (1000, 562))
for i in range(N-1):
# VideoStream or VideoCapture object
_, frame_o = vs.read()
# check to see if we have reached the end of the stream
if frame_o is None:
break
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=WIDTH)
# grab the updated bounding box coordinates
(success, boxes) = trackers.update(frame)
# loop over the anchor bounding boxes and draw them on the frame
strip = []
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
strip.append((x+w//2, y+h//2))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
((x1, y1), (x2, y2)) = (strip[0], strip[1])
strip.append((x2, y2+20)); strip.append((x1, y1+20))
# if there are two reference points, then crop the region of interest
# from teh image and display it
if len(refPt) >= 3:
(x1, y1) = strip[0]
refPt = [(x1-x2, y1-y2) for (x2,y2) in free_draw_dist]
roi_corners = np.array([refPt], dtype=np.int32)
# APPLY ROI ROTATION AND CROP
rect = cv2.minAreaRect(roi_corners) #print("rect: {}".format(rect))
box = cv2.boxPoints(rect); box = np.int0(box)
# img_crop will the cropped rectangle, img_rot is the rotated image
img_crop, img_rot = crop_rect(frame, rect)
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY)
#gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.blur(gray,(3,3))
images = np.append(images, [gray], axis=0)
cur_images = np.append(cur_images, [gray], axis=0)
while True: # loop over frames from the video stream
_, frame_o = vs.read()
# check to see if we have reached the end of the stream
if frame_o is None:
break
# resize the frame (so we can process it faster)
frame = imutils.resize(frame_o, width=1000)
# grab the updated bounding box coordinates
(success, boxes) = trackers.update(frame)
# loop over the anchor bounding boxes and draw them on the frame
strip = []
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
strip.append((x+w//2, y+h//2))
#cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
((x1, y1), (x2, y2)) = (strip[0], strip[1])
strip.append((x2, y2+20)); strip.append((x1, y1+20))
# if there are two reference points, then crop the region of interest
# from teh image and display it
if len(refPt) >= 3:
(x1, y1) = strip[0]
refPt = [(x1-x2, y1-y2) for (x2,y2) in free_draw_dist]
roi_corners = np.array([refPt], dtype=np.int32)
# APPLY ROI ROTATION AND CROP
rect = cv2.minAreaRect(roi_corners) #print("rect: {}".format(rect))
box = cv2.boxPoints(rect); box = np.int0(box)
# img_crop will the cropped rectangle, img_rot is the rotated image
img_crop, img_rot = crop_rect(frame, rect)
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
# Perform background subtraction
gray = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY) #convert to grayscale
#gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.blur(gray,(3,3))
avg_frame = np.mean(images, axis=0)
diff = cv2.convertScaleAbs(gray - avg_frame)
images[cur_ind] = gray;
cur_ind = cur_ind + 1 if cur_ind < N-1 else 0
#diff = cv2.GaussianBlur(diff, (5, 5), 0) #Blur to minimize
alpha, thresh = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, np.ones((5,3), np.uint8), iterations = 1)
thresh = cv2.dilate(thresh, np.ones((7,5), np.uint8), iterations = 2)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) # loop over the contours
rects = []
# loop over the detections
for index, c in enumerate(cnts):
# if the contour is too small, ignore it
# compute the (x, y)-coordinates of the bounding box for
# the object, then update the bounding box rectangles list
(x, y, w, h) = cv2.boundingRect(c)
if h > 20 and cv2.contourArea(c) > 150:
rects.append([x, y, x + w, y + h])
cv2.rectangle(img_crop, (x, y), (x + w, y + h), (0, 0, 255), 2)
#cv2.imshow("thresh", thresh); cv2.imshow("diff", diff)
#cv2.imshow("back-sub video", img_crop)
white = np.zeros(img_crop.shape, dtype=np.uint8); white[: :]=255
concat_img = cv2.hconcat([cv2.cvtColor(gray,cv2.COLOR_GRAY2RGB), white,
cv2.cvtColor(diff,cv2.COLOR_GRAY2RGB), white,
cv2.cvtColor(thresh,cv2.COLOR_GRAY2RGB), white,
img_crop])
cv2.imshow("diff thresh img_crop", concat_img)
objects = ct.update(rects) # send detections to centroid tracker
k=0; text = "New detections:"
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
if objectID+1 > maxobjectID:
maxobjectID = objectID+1
text = text + " {}".format(objectID); k = k+1
cv2.rectangle(frame, (0, 0), (200, 30), (255, 255, 255), -1) #(100, 100, 255)
#cv2.putText(frame, text, (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(frame, "Total detected: {}".format(maxobjectID), (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 150, 150), 2)
#draw anchors
for box in boxes:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the output frame
cv2.imshow("Frame", frame)
out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
vs.release(); out.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
3291319 | <filename>tests/test_api.py
import asyncio
import functools
import pytest
import http3
def threadpool(func):
"""
Our sync tests should run in seperate thread to the uvicorn server.
"""
@functools.wraps(func)
async def wrapped(*args, **kwargs):
nonlocal func
loop = asyncio.get_event_loop()
if kwargs:
func = functools.partial(func, **kwargs)
await loop.run_in_executor(None, func, *args)
return pytest.mark.asyncio(wrapped)
@threadpool
def test_get(server):
response = http3.get("http://127.0.0.1:8000/")
assert response.status_code == 200
assert response.reason_phrase == "OK"
assert response.text == "Hello, world!"
@threadpool
def test_post(server):
response = http3.post("http://127.0.0.1:8000/", data=b"Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_post_byte_iterator(server):
def data():
yield b"Hello"
yield b", "
yield b"world!"
response = http3.post("http://127.0.0.1:8000/", data=data())
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_options(server):
response = http3.options("http://127.0.0.1:8000/")
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_head(server):
response = http3.head("http://127.0.0.1:8000/")
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_put(server):
response = http3.put("http://127.0.0.1:8000/", data=b"Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_patch(server):
response = http3.patch("http://127.0.0.1:8000/", data=b"Hello, world!")
assert response.status_code == 200
assert response.reason_phrase == "OK"
@threadpool
def test_delete(server):
response = http3.delete("http://127.0.0.1:8000/")
assert response.status_code == 200
assert response.reason_phrase == "OK"
| StarcoderdataPython |
3268619 | <reponame>MrLingo/fast-food-chatbot
from __future__ import print_function
from flask import Flask, json, jsonify, render_template, request
from difflib import SequenceMatcher
app = Flask(__name__)
main_page_info = ["Viki", "Viki's response accuracy: ", "Total price:"]
@app.route('/')
def hello_world():
return render_template('main.html', pages=main_page_info)
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
total_price = 0
''' Associate each product with it's price (USD) '''
price_dict = {
"Pepperoni": 1,
"Margherita" : 1.5,
"White Pizza": 1,
"Caesar Salad Pizza": 2.5,
"Venetian Rolled": 2,
"Pizza Pockets": 1.3,
"Calzone": 1.5,
"Griddled California Pizzas": 2.4,
"Bruscheta Pizzaiola": 2.1,
"Breakfast Pizza": 1.5,
"Gluten-free Mushroom": 1.6,
"Wingless Buffalo Chicken": 2 ,
"Big Mac": 2.5,
"Quarter Pounder": 2,
"Cheeseburger": 1.5,
"Hamburger": 1.3,
"Bacon Ranch Salad": 3,
"Side Salad": 2.7,
"Coca-Cola": 1,
"Sprite": 1,
"Fanta": 1,
"Orange": 1.2,
"Dasani water": 0.6
}
@app.route('/data')
def process_order():
global total_price
user_input = request.args.get('user_input')
temp_dict = {}
with open("static/database.json") as file:
data_dict = json.load(file)
''' Traversing data dict and filling a temporary one ( answer:ratio ). '''
for i in data_dict:
ratio = similar(i, user_input.lower())
temp_dict[data_dict[i]] = ratio
''' Choosing the one with the best ratio from the generated one. '''
final_answer = max(temp_dict, key=temp_dict.get)
accuracy = temp_dict[final_answer]
''' Calculating total price. '''
for product in price_dict:
if product in final_answer:
total_price = total_price + price_dict[product]
''' Reformating the accuracy ( show in %) '''
accuracy = round(accuracy, 1)
accuracy = accuracy * 100
response_list = [final_answer, accuracy, total_price]
''' Return Viki's response to the user! '''
return jsonify(response_list)
| StarcoderdataPython |
331312 | import logging
from typing import Any, Optional
from hdfs.client import InsecureClient
from determined.common import util
from determined.tensorboard import base
class HDFSTensorboardManager(base.TensorboardManager):
"""
Store and tfevents files to HDFS.
"""
@util.preserve_random_state
def __init__(
self,
hdfs_url: str,
hdfs_path: str,
user: Optional[str] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.hdfs_url = hdfs_url
self.hdfs_path = hdfs_path
self.user = user
self.client = InsecureClient(self.hdfs_url, root=self.hdfs_path, user=self.user)
self.client.makedirs(str(self.sync_path))
@util.preserve_random_state
def sync(self) -> None:
for path in self.to_sync():
file_name = str(self.sync_path.joinpath(path.name))
logging.debug(f"Uploading {path} to {self.hdfs_path}")
self.client.upload(file_name, str(path))
def delete(self) -> None:
self.client.delete(self.sync_path, recursive=True)
| StarcoderdataPython |
11257696 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# TODO: resample 5 minutes data into another time interval
| StarcoderdataPython |
1688626 | import numpy as np
class PointSourceParam(object):
"""
"""
def __init__(self, model_list, kwargs_fixed, num_point_source_list=None, linear_solver=True,
fixed_magnification_list=None, kwargs_lower=None, kwargs_upper=None):
"""
:param model_list: list of point source model names
:param kwargs_fixed: list of keyword arguments with parameters to be held fixed
:param num_point_source_list: list of number of point sources per point source model class
:param linear_solver: bool, if True, does not return linear parameters for the sampler
(will be solved linearly instead)
:param fixed_magnification_list: list of booleans, if entry is True, keeps one overall scaling among the
point sources in this class
"""
self.model_list = model_list
if num_point_source_list is None:
num_point_source_list = [0] * len(model_list)
self._num_point_sources_list = num_point_source_list
self.kwargs_fixed = kwargs_fixed
if linear_solver is True:
self.kwargs_fixed = self.add_fix_linear(kwargs_fixed)
self._linear_solver = linear_solver
if fixed_magnification_list is None:
self._fixed_magnification_list = [False] * len(model_list)
if kwargs_lower is None:
kwargs_lower = []
for k, model in enumerate(self.model_list):
num = self._num_point_sources_list[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
fixed_low = {'ra_image': [-100] * num, 'dec_image': [-100] * num, 'point_amp': [0] * num}
elif model in ['SOURCE_POSITION']:
fixed_low = {'ra_source': -100, 'dec_source': -100, 'point_amp': 0}
else:
raise ValueError("%s not a valid point source model" % model)
kwargs_lower.append(fixed_low)
if kwargs_upper is None:
kwargs_upper = []
for k, model in enumerate(self.model_list):
num = self._num_point_sources_list[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
fixed_high = {'ra_image': [100] * num, 'dec_image': [100] * num, 'point_amp': [100] * num}
elif model in ['SOURCE_POSITION']:
fixed_high = {'ra_source': 100, 'dec_source': 100, 'point_amp': 100}
else:
raise ValueError("%s not a valid point source model" % model)
kwargs_upper.append(fixed_high)
self.lower_limit = kwargs_lower
self.upper_limit = kwargs_upper
def getParams(self, args, i):
"""
:param args:
:param i:
:return:
"""
kwargs_list = []
for k, model in enumerate(self.model_list):
kwargs = {}
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
kwargs['ra_image'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['ra_image'] = kwargs_fixed['ra_image']
if not 'dec_image' in kwargs_fixed:
kwargs['dec_image'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['dec_image'] = kwargs_fixed['dec_image']
if not 'point_amp' in kwargs_fixed:
kwargs['point_amp'] = np.array(args[i:i + self._num_point_sources_list[k]])
i += self._num_point_sources_list[k]
else:
kwargs['point_amp'] = kwargs_fixed['point_amp']
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
kwargs['ra_source'] = args[i]
i += 1
else:
kwargs['ra_source'] = kwargs_fixed['ra_source']
if not 'dec_source' in kwargs_fixed:
kwargs['dec_source'] = args[i]
i += 1
else:
kwargs['dec_source'] = kwargs_fixed['dec_source']
if not 'point_amp' in kwargs_fixed:
kwargs['point_amp'] = args[i]
i += 1
else:
kwargs['point_amp'] = kwargs_fixed['point_amp']
kwargs_list.append(kwargs)
return kwargs_list, i
def setParams(self, kwargs_list):
"""
:param kwargs:
:return:
"""
args = []
for k, model in enumerate(self.model_list):
kwargs = kwargs_list[k]
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
x_pos = kwargs['ra_image'][0:self._num_point_sources_list[k]]
for x in x_pos:
args.append(x)
if not 'dec_image' in kwargs_fixed:
y_pos = kwargs['dec_image'][0:self._num_point_sources_list[k]]
for y in y_pos:
args.append(y)
if not 'point_amp' in kwargs_fixed:
amp = kwargs['point_amp'][0:self._num_point_sources_list[k]]
for a in amp:
args.append(a)
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
args.append(kwargs['ra_source'])
if not 'dec_source' in kwargs_fixed:
args.append(kwargs['dec_source'])
if not 'point_amp' in kwargs_fixed:
args.append(kwargs['point_amp'])
return args
def num_param(self):
"""
:return:
"""
num = 0
list = []
for k, model in enumerate(self.model_list):
kwargs_fixed = self.kwargs_fixed[k]
if model in ['LENSED_POSITION', 'UNLENSED']:
if not 'ra_image' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('ra_image')
if not 'dec_image' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('dec_image')
if not 'point_amp' in kwargs_fixed:
num += self._num_point_sources_list[k]
for i in range(self._num_point_sources_list[k]):
list.append('point_amp')
if model in ['SOURCE_POSITION']:
if not 'ra_source' in kwargs_fixed:
num += 1
list.append('ra_source')
if not 'dec_source' in kwargs_fixed:
num += 1
list.append('dec_source')
if not 'point_amp' in kwargs_fixed:
num += 1
list.append('point_amp')
return num, list
def add_fix_linear(self, kwargs_fixed):
"""
:param kwargs_options:
:param kwargs_ps:
:return:
"""
for k, model in enumerate(self.model_list):
kwargs_fixed[k]['point_amp'] = 1
return kwargs_fixed
def num_param_linear(self):
"""
:return: number of linear parameters
"""
num = 0
if self._linear_solver is True:
for k, model in enumerate(self.model_list):
if self._fixed_magnification_list[k] is True:
num += 1
else:
num += self._num_point_sources_list[k]
return num
| StarcoderdataPython |
234541 | #--------------------------
# lexer.py
#
# Verilog-AMS Lexical Analyzer
#
# Copyright (C) 2015, <NAME>
# License: Apache 2.0
#--------------------------
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import re
sys.path.insert(0, os.path.dirname(os.path.dirname(os.dirname(os.path.abspath(__file)))) )
from pyvams.vamsparser import ply
from pyvams.vamsparser.ply.lex import *
class VerilogAMSLexer(object):
""" Verilog-AMS Lexical Analyzer """
def __init__(self, error_func):
self.filename = ''
self.error_func = error_func
self.directives = []
self.default_nettype = 'wire'
def build(self, **kwargs):
self.lexer = ply.lex.lex(object=self, **kwargs)
def input(self, data):
self.lexer.input(data)
def reset_lineno(self):
self.lexer.lineno = 1
def get_directives(self):
return tuple(self.directives)
def get_default_nettype(self):
return self.default_nettype
def token(self):
return self.lexer.token()
// Annex B - List of keywords - Table B.1--Reserved keywords
keywords = (
'ABOVE','ABS','ABSDELAY','ABSDELTA','ABSTOL','ACCESS','ACOS','ACOSH','AC_STIM','ALIASPARAM',
'ALWAYS','ANALOG','ANALYSIS','AND','ASIN','ASINH','AUTOMATIC','BEGIN','BRANCH','BUF',
'BUFIF0','BUFIF1','CASE','CASEX','CASEZ','CEIL','CELL','CMOS','CONFIG','CONNECT',
'CONNECTMODULE','CONNECTRULES','CONTINUOUS','COS','COSH','CROSS','DDT','DDT_NATURE','DDX','DEASSIGN',
'DEFAULT','DEFPARAM','DESIGN','DISABLE','DISCIPLINE','DISCRETE','DOMAIN','DRIVER_UPDATE','EDGE','ELSE',
'END','ENDCASE','ENDCONFIG','ENDCONNECTRULES','ENDDISCIPLINE','ENDFUNCTION','ENDGENERATE','ENDMODULE','ENDNATURE','ENDPARAMSET',
'ENDPRIMITIVE','ENDSPECIFY','ENDTABLE','ENDTASK','EVENT','EXCLUDE','EXP','FINAL_STEP','FLICKER_NOISE','FLOOR',
'FLOW','FOR','FORCE','FOREVER','FORK','FROM','FUNCTION','GENERATE','GENVAR','GROUND',
'HIGHZ0','HIGHZ1','HYPOT','IDT','IDTMOD','IDT_NATURE','IF','IFNONE','INCDIR','INCLUDE',
'INF','INITIAL','INITIAL_STEP','INOUT','INPUT','INSTANCE','INTEGER','JOIN','LAPLACE_ND','LAPLACE_NP',
'LAPLACE_ZD','LAPLACE_ZP','LARGE','LAST_CROSSING','LIBLIST','LIBRARY','LIMEXP','LN','LOCALPARAM','LOG',
'MACROMODULE','MAX','MEDIUM','MERGED','MIN','MODULE','NAND','NATURE','NEGEDGE','NET_RESOLUTION',
'NMOS','NOISE_TABLE','NOISE_TABLE_LOG','NOR','NOSHOWCANCELLED','NOT','NOTIF0','NOTIF1','OR','OUTPUT',
'PARAMETER','PARAMSET','PMOS','POSEDGE','POTENTIAL','POW','PRIMITIVE','PULL0','PULL1','PULLDOWN',
'PULLUP','PULSESTYLE_ONEVENT','PULSESTYLE_ONDETECT','RCMOS','REAL','REALTIME','REG','RELEASE','REPEAT','RESOLVETO',
'RNMOS','RPMOS','RTRAN','RTRANIF0','RTRANIF1','SCALARED','SIN','SINH','SHOWCANCELLED','SIGNED',
'SLEW','SMALL','SPECIFY','SPECPARAM','SPLIT','SQRT','STRING','STRONG0','STRONG1','SUPPLY0',
'SUPPLY1','TABLE','TAN','TANH','TASK','TIME','TIMER','TRAN','TRANIF0','TRANIF1',
'TRANSITION','TRI','TRI0','TRI1','TRIAND','TRIOR','TRIREG','UNITS','UNSIGNED','USE',
'UWIRE','VECTORED','WAIT','WAND','WEAK0','WEAK1','WHILE','WHITE_NOISE','WIRE','WOR',
'WREAL','XNOR','XOR','ZI_ND','ZI_NP','ZI_ZD','ZI_ZP',
)
reserved = {}
for keyword in keywords:
reserved[keyword.lower()] = keyword
operators = (
'PLUS','MINUS','POWER','TIMES','DIVIDE','MOD',
'SYM_NOT','SYM_OR','SYM_NOR','SYM_AND','SYM_NAND','SYM_XOR','SYM_XNOR',
'LOR','LAND','LNOT',
'LSHIFTA','RSHIFTA','LSHIFT','RSHIFT',
'LT','GT','LE','GE','EQ','NE','EQL','NEL',
'COND',
'EQUALS',
)
tokens = keywords + operators + (
'ID',
'AT','COMMA','COLON','SEMICOLON','DOT',
'PLUSCOLON','MINUSCOLON',
'FLOATNUMBER','STRING_LITERAL',
'INTNUMBER_DEC','SIGNED_INTNUMBER_DEC',
'INTNUMBER_HEX','SIGNED_INTNUMBER_HEX',
'INTNUMBER_OCT','SIGNED_INTNUMBER_OCT',
'INTNUMBER_BIN','SIGNED_INTNUMBER_BIN',
'LPAREN','RPAREN','LBRACKET','RBRACKET','LBRACE','RBRACE',
'DELAY','DOLLAR',
)
skipped = (
'COMMENTOUT','LINECOMMENT','DIRECTIVE',
)
# Ignore
t_ignore = ' \t'
# Directive
directive = r"""\`.*?\n"""
@TOKEN(directive)
def t_DIRECTIVE(self,t):
self.directives.append( (self.lexer.lineno,t.value) )
t.lexer.lineno += t.value.count("\n")
m = re.match("^`default_nettype\s+(.+)\n",t.value)
if m: self.default_nettype = m.group(1)
pass
# Comment
linecomment = r"""//.*?\n"""
commentout = r"""/\*(.|\n)*?\*/"""
@TOKEN(linecomment)
def t_LINECOMMENT(self,t):
t.lexer.lineno += t.value.count("\n")
pass
@TOKEN(commentout)
def t_COMMENTOUT(self,t):
t.lexer.lineno += t.value.count("\n")
pass
# Operator
t_LOR = r'\|\|'
t_LAND = r'\&\&'
t_SYM_NOR = r'~\|'
t_SYM_NAND = r'~\&'
t_SYM_XNOR = r'~\^'
t_SYM_OR = r'\|'
t_SYM_AND = r'\&'
t_SYM_XOR = r'\^'
t_SYM_NOT = r'~'
t_LNOT = r'!'
t_LSHIFTA = r'<<<'
t_RSHIFTA = r'>>>'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_EQL = r'==='
t_NEL = r'!=='
t_EQ = r'=='
t_NE = r'!='
t_LE = r'<='
t_GE = r'>='
t_LT = r'<'
t_GT = r'>'
t_POWER = r'\*\*'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_CONT = r'\?'
t_EQUALS = r'='
t_PLUSCOLON = r'\+:'
t_MINUSCOLON = r'-:'
t_AT = r'@'
t_COMMA = r','
t_SEMICOLON = r';'
t_COLON = r':'
t_DOT = r'\.'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_DELAY = r'\#'
t_DOLLAR = r'\$'
bin_number = '[0-9]*\'[bB][0-1xXzZ?][0-1xXzZ?_]*'
signed_bin_number = '[0-9]*\'[sS][bB][0-1xZzZ?][0-1xXzZ?_]*'
octal_number = '[0-9]*\'[oO][0-7xXzZ?][0-7xXzZ?_]*'
signed_octal_number = '[0-9]*\'[sS][oO][0-7xXzZ?][0-7xXzZ?_]*'
hex_number = '[0-9]*\'[hH][0-9a-fA-FxXzZ?][0-9a-fA-FxXzZ?_]*'
signed_hex_number = '[0-9]*\'[sS][hH][0-9a-fA-FxXzZ?][0-9a-fA-FxXzZ?_]*'
decimal_number = '([0-9]*\'[dD][0-9xXzZ?][0-9xXzZ?_]*)|([0-9][0-9_]*)'
signed_decimal_number = '[0-9]*\'[sS][dD][0-9xXzZ?][0-9xXzZ?_]*'
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
float_number = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+')))'
simple_escape = r"""([a-zA-Z\\?'"])"""
octal_escape = r"""([0-7]{1,3})"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
escape_sequence = r"""(\\("""+simple_escape+'|'+octal_escape+'|'+hex_escape+'))'
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
identifier = r"""(([a-zA-Z_])([a-zA-Z_0-9$])*)|((\\\S)(\S)*)"""
@TOKEN(string_literal)
def t_STRING_LITERAL(self, t):
return t
@TOKEN(float_number)
def t_FLOATNUMBER(self, t):
return t
@TOKEN(signed_bin_number)
def t_SIGNED_INTNUMBER_BIN(self, t):
return t
@TOKEN(bin_number)
def t_INTNUMBER_BIN(self, t):
return t
@TOKEN(signed_octal_number)
def t_SIGNED_INTNUMBER_OCT(self, t):
return t
@TOKEN(octal_number)
def t_INTNUMBER_OCT(self, t):
return t
@TOKEN(signed_hex_number)
def t_SIGNED_INTNUMBER_HEX(self, t):
return t
@TOKEN(hex_number)
def t_INTNUMBER_HEX(self, t):
return t
@TOKEN(signed_decimal_number)
def t_SIGNED_INTNUMBER_DEC(self, t):
return t
@TOKEN(decimal_number)
def t_INTNUMBER_DEC(self, t):
return t
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.reserved.get(t.value, 'ID')
return t
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
pass
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _find_tok_column(self, token):
i = token.lexpos
while i > 0:
if self.lexer.lexdata[i] == '\n': break
i -= 1
return (token.lexpos - i) + 1
def _make_tok_location(self, token):
return (token.lineno, self._find_tok_column(token))
#-------------------------------------------------------------------------------
def dump_tokens(text):
def my_error_func(msg, a, b):
sys.write(msg + "\n")
sys.exit()
lexer = VerilogAMSLexer(error_func=my_error_func)
lexer.build()
lexer.input(text)
ret = []
# Tokenize
while True:
tok = lexer.token()
if not tok: break # No more input
ret.append("%s %s %d %s %d\n" %
(tok.value, tok.type, tok.lineno, lexer.filename, tok.lexpos))
return ''.join(ret)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import pyvams.utils.version
from pyvams.vparser.preprocessor import preprocess
from optparse import OptionParser
INFO = "Verilog Preprocessor"
VERSION = pyvams.utils.version.VERSION
USAGE = "Usage: python preprocessor.py file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
text = preprocess(filelist,
preprocess_include=options.include,
preprocess_define=options.define)
dump = dump_tokens(text)
print(dump)
| StarcoderdataPython |
11228610 | from datetime import datetime, timedelta
import sys
from flight_tables.flight_tables import FlightTables
if __name__=="__main__":
"""
Save Arrivals and Departures to CSV
Inputs (Optional):
date_str (str): Date the data is for. Defaults to yesterday. Should be in the past & still available in Heathrow Airport's API.
Output:
Saves Arrivals and Departures CSV table to working directory.
"""
# Determine the Date
arg_count = len(sys.argv)
if arg_count == 1:
# Default Date to Yesterday
date = datetime.today().date()-timedelta(days=1)
date_str = date.isoformat()
elif arg_count == 2:
date_str = sys.argv[1]
try:
date_format = "%Y-%m-%d"
date = datetime.strptime(date_str, date_format)
except:
sys.exit(f"\nINPUT ERROR: Wrong input format. \n\tExpected 'YYYY-MM-DD' formatted string \n\tReceived: {sys.argv[1]}\n")
else:
sys.exit(f"\nINPUT ERROR: Too many input arguments.\n")
# Save Arrivals CSV
FlightTables.arrivals_csv(date_str)
# Save Departures csv
FlightTables.departures_csv(date_str)
| StarcoderdataPython |
5015164 | <reponame>yufernando/strspy
#!/usr/bin/env python3
# This file is part of STRspy project.
# MIT License
# Copyright (c) 2021 unique379r
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# author: <NAME> <bioinrupesh2009 DOT au AT gmail DOT com>
import sys
import os
import argparse
import time
import glob
import os.path
VERSION="0.1"
def in_progress(args):
"""placeholder"""
print('working on it...')
def version(args):
"""Print the version"""
print("StutterSpy v%s" % VERSION)
## arguments ask
USAGE = """\
StutterSpy v%s - A simple script to find stutters, non-stutters and True alleles in a given samples.
""" % VERSION
## ====================================================functions====================================================
## Arguments check/file validation
def check_dir(indir):
if not os.path.isdir(indir):
#print("ERROR: Provided input is not a directory !!")
sys.exit("ERROR: Provided input is not a directory !!")
def check_file(infile):
if not os.path.exists(infile):
#print("ERROR: Provided input is not a file !!")
sys.exit("ERROR: Provided input is not a file !!")
def check_multiplefiles(dirfiles):
if not glob.glob(dirfiles):
#print("ERROR: Provided input is not a file !!")
sys.exit("ERROR: Input dir exists but files do not exist, it must be bam or fastq files.")
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def run(args):
##check file type
motif_file = args.motif_file
count_file = args.count_file
locusname = args.locusname
samplename = args.samplename
depthreplicate = args.depthreplicate
check_file(motif_file)
check_file(count_file)
print("True Allele Motif file:\t", motif_file)
print("Count file :\t", count_file)
print("Locus :\t", locusname)
print("Sample Name :\t", samplename)
print("Depth & replicates :\t", depthreplicate)
##running program
f1 = open(motif_file, "r")
Lines1 = f1.readlines()
NC = []
for line in Lines1:
line = line.strip('\n')
col_list = line.split("\t")
if col_list[0] == locusname :
NC.append(col_list)
f1.close()
print("\n")
fo0 = open(locusname + "_" + samplename + "_"+ depthreplicate + "_TrueSTRs.out", "w")
fo1 = open(locusname + "_" + samplename + "_"+ depthreplicate + "_Stutters.out", "w")
print("Stutters Output:")
print("\n")
for line in NC:
# print("\n")
# print("\n")
print("--------------")
print(line)
print("--------------")
x = -1
y = -1
z = -1
x = num(line[-1])
#print(x)
yy = line[1].split("]")
if len(yy) == 2:
y = num(yy[1])
#print(y)
zz = line[2].split("]")
if len(zz) == 2:
z = num(zz[1])
#print(z)
f2 = open(count_file, "r")
Lines2 = f2.readlines()
for line in Lines2:
line = line.strip('\n')
col_list = line.split("\t")
first_col = col_list[0].split("_")
x1 = -1
y1 = -1
z1 = -1
#print (first_col[-1])
x1 = num(first_col[-1])
#print(x)
yy1 = first_col[1].split("]")
if len(yy1) == 2:
y1 = num(yy1[1])
#print(y)
zz1 = first_col[2].split("]")
if len(zz1) == 2:
z1 = num(zz1[1])
#print(z)
if x1 == x:
## Truth sets
truth_filt = line
#print(truth_filt)
fo0.write(line)
fo0.write("\n")
elif (x1 == x+1 or x1 == x-1) and (y1 == y or y1 == y+1 or y1 == y-1) and (z1 == z or z1 == z-1 or z1 == z+1):
## stutters
stutters_sets = line
#print(stutters_sets)
print(line)
fo1.write(line)
fo1.write("\n")
# else:
#print('Printing nothing for non-stutters')
## non-stutters but have duplicates
# non_Stutters_sets = line
# fo2.write(line)
# fo2.write("\n")
f2.close()
fo0.close()
fo1.close()
#fo2.close()
##remove duplicates from stutters list
stutters = str(locusname + "_" + samplename + "_"+ depthreplicate + "_Stutters.out")
stutters_undup = str(locusname + "_" + samplename + "_"+ depthreplicate + "_Stutters_undup.out")
lines_seen = set() # holds lines already seen
outfile = open(stutters_undup, "w")
infile = open(stutters, "r")
for line in infile:
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
infile.close()
##rename files
os.remove(stutters)
##cat *_Stutters.out *predictedTruth.out > temp2select
filt= str(locusname + "_" + samplename + "_"+ depthreplicate + "_TrueSTRs.out")
stutters_undup = str(locusname + "_" + samplename + "_"+ depthreplicate + "_Stutters_undup.out")
filenames = [filt, stutters_undup]
with open('filtered_Stutters.txt', 'w') as outfile:
for names in filenames:
with open(names) as infile:
for line in infile:
outfile.write(line)
outfile.close()
## make non-stutters file
doc = open('filtered_Stutters.txt', 'r')
doc1 = open(count_file, 'r')
f1 = [x for x in doc.readlines()]
f2 = [x for x in doc1.readlines()]
dif = [line for line in f2 if line not in f1] # lines present only in f2
doc.close()
doc1.close()
non_Stutters_out = str(locusname + "_" + samplename + "_" + depthreplicate + "_NonStutters.out")
with open(non_Stutters_out, 'w') as file_out:
for match in dif:
file_out.write(match)
file_out.close()
## now remove the *_Stutters.out *predictedTruth.ou i.e. filtered_Stutters.txt
os.remove('filtered_Stutters.txt')
def main():
"""
Argument parsing
"""
parser = argparse.ArgumentParser(prog="StutterSpy", description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i1", "--input1", default=None, metavar="INPUT_motif_file", dest="motif_file", type=str, help="Truth allele Motif", required=True)
parser.add_argument("-i2","--input2", default=None, metavar="INPUT_count_file", dest="count_file", type=str, help="Freq counts file", required=True)
parser.add_argument("-t","--locus_name", default=None, metavar="LOCUS_NAME", type=str, dest="locusname", help="locus name", required=True)
parser.add_argument("-s","--samplename", default=None, metavar="samplename",type=str, dest="samplename", help="dir of all str bed", required=True)
parser.add_argument("-d","--depthreplicate", default=None, metavar="DEPTH", type=str, dest="depthreplicate", help="depth such as 30X or 15X and replicates type_number", required=True)
#parser.add_argument("-o","--output_dir", default=None, type=str, metavar="DIR", dest="output", help="output directory to save the results", required=True)
parser.set_defaults(func=run)
args=parser.parse_args()
args.func(args)
if __name__=="__main__":
print("====================================")
start = time.process_time()
#print('\nSTR spying begins at:'+str(start))
main()
print("====================================\n")
end = time.process_time()
#print('\nSTR spying end at:'+str(end))
print("The Program run time is : %.03f seconds" %(end-start))
| StarcoderdataPython |
3242972 | <filename>lib/model/utils/factory_utils.py
from importlib import import_module
def get_class_from_package(package_full_path, class_rel_path, abstract_class):
module_name, class_name = class_rel_path.rsplit('.', 1)
try:
class_module = import_module(package_full_path + '.' + module_name)
returned_class = getattr(class_module, class_name)
if not issubclass(returned_class, abstract_class):
raise ImportError(
"{} is not a subclass of the given abstract class.".format(returned_class))
return returned_class
except (AttributeError, ModuleNotFoundError):
raise ImportError('{} is not part of the package!'.format(class_name))
def get_optimizer_class(optimizer_name):
from torch.optim import Optimizer
module_name = optimizer_name.lower()
class_rel_path = module_name + '.' + optimizer_name
optimizer_class = get_class_from_package('torch.optim', class_rel_path, Optimizer)
return optimizer_class
| StarcoderdataPython |
6414222 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[3]:
data = pd.read_csv('C:/Users/admin/Desktop/houseprices-1.csv')
# In[4]:
data
# In[5]:
price = data.Price
age = data.Age
size = data.LotSize
area = data.LivingArea
# In[7]:
plt.scatter(price,age)
plt.show()
# In[8]:
np.corrcoef(price,age)
# In[9]:
plt.scatter(price,size)
plt.show()
# In[10]:
np.corrcoef(price,size)
# In[11]:
plt.scatter(price,area)
plt.show()
# In[12]:
np.corrcoef(price,area)
| StarcoderdataPython |
104976 | from domain.entities.value_objects.cashback import Cashback
| StarcoderdataPython |
1800470 | '''
A SPACESHIP ORBITAL SIMULATION - WITH REAL PHYSICS
CHALLENGES (ordered by difficulty)
Question: If ISS was orbiting around Mars, would it stay in orbit?
Mars Gravity : -3.71 m/s^2
Mars Mass : 6.39 * 1e23 kg
Mars Radius : 3389 km
Question: If a spaceship orbits at an altitude of 500km around Mars,
what velocity does it need to stay in a mostly CIRCULAR orbit?
Question: How much does Phobos, the innermost, largest moon of Mars,
mess with the orbit of a spaceship in orbit at 1000km?
Phobos Distance from Center of Mars: ~ 9300 km
Phobos Radius : 11 km
Phobos Mass : 1.0659 * 1e16 kg
Question: How much change in velocity (delta-v) does that same spaceship
need for its orbit to actually "intersect" the planet (barely)
In otherwords, what is the difference in orbital velocity and
maximum velocity required to impact the surface tangentially?
'''
import matplotlib.pyplot as plt
from time import time
import numpy as np
# First, enable interactive plotting
plt.ion() # this fancy function makes a matplotlib window ALIIVEEEEE!
# We want to draw the planet.... So let's do it in POLAR COORDINATES!
# First we need some angles...
# This gets 100 evenly spaced angles between 0 and 2pi
circle_angles = np.linspace(0, (2*np.pi), 100)
# ----------------------------- Getting planet set up
R0 = 6371 * 1e3 # km * 1e3 => meters, radius of the planet
planet_r = np.linspace(R0, R0, 100) # get 100 evenly spaced R0s
planet_X = planet_r*np.cos(circle_angles) # X = take the cos(all_angles) * R0
planet_Y = planet_r*np.sin(circle_angles) # Y = take the cos(all_angles) * R0
planet = plt.plot(planet_X,planet_Y)[0] # make a plot with x and y
# ----------------------------- Getting spaceship all set up
''' NOTE: numpy.arrays are used throughout. why?
If you try [1,0,0] * 3 with Python lists, you'll get an error.
If you try [1,0,0] * 3 with Numpy arrays, you'll get [3,0,0]... Useful!
'''
alt_initial = 408_773 # ISS altitude, meters
# let's start y = initial altitude + radius of planet
pos_initial = np.array([
0.,
alt_initial + R0
])
vel_initial = np.array([
7666.736,
0.
]) # ISS horizontal velocity, meters per second
trail_points = 500 # how many points should the trail keep?
spaceship_trail = { 'x': [pos_initial[0]], 'y': [pos_initial[1]] }
spaceship = plt.plot(*pos_initial)[0] # give plot the position intially
# ----------------------------- Getting physics set up
def gravity(pos):
G = 6.674 * 1e-11 # universal gravitational constant
M = 5.972 * 1e24 # mass of planet, kg
# g = GM / r**2
gravity_acceleration = G*M / (np.sqrt(pos[0]**2 + pos[1]**2))**2
# which direction?
# what vector tells us the direction of gravity?
# the "down" vector of course!
# also known as the negative of the position vector (normalized)!
# pos / the magnitude of pos * gravity_acceleration at this alt
g = (-pos/np.sqrt(pos[0]**2 + pos[1]**2)) * gravity_acceleration
return g
pos = pos_initial
vel = vel_initial
acc = gravity(pos)
dt = 10
while True:
acc = gravity(pos)
vel += (acc) * dt
pos += (vel) * dt
spaceship_trail['x'].append(pos[0])
spaceship_trail['y'].append(pos[1])
spaceship.set_xdata(spaceship_trail['x']) # get all the saved x data
spaceship.set_ydata(spaceship_trail['y']) # get all the saved y data
print('Trail N : ', len(spaceship_trail['x']), end =' | ')
print('Altitude: ', round(np.linalg.norm(pos),2), end =' | ')
print('Velocity: ', round(np.linalg.norm(vel),2), end =' | ')
print('Gravity : ', round(np.linalg.norm(acc),2))
if len(spaceship_trail['x']) > trail_points:
spaceship_trail['x'].pop(0)
spaceship_trail['y'].pop(0)
plt.pause(.01)
| StarcoderdataPython |
8196733 | from tkinter import *
import random, dropletClass, personClass, buttonFunctions
def onClosing():
root.destroy()
def mainLoop(stats, socdis, peopleArray, vents, ac, germs, rein):
##########SETTINGS##########
deathChance = 10000 #chance of death, default 1/10000, number is denominator
bigDropChance = 8 #chance of a big droplet infecting someone with contact, default 1/8, number is denominator
mediumDropChance = 16 #chance of a medium droplet infecting someone with contact, default 1/16, number is denominator
smallDropChance = 32 #chance of a small droplet infecting someone with contact, default 1/32, number is denominator
bigDistance = 30 #maximum distance big droplet travels, default 30
mediumDistance = 45 #maximum distance medium droplet travels, default 45
smallDistance = 60 #maxium distance small droplet travels, default 60
bigTimer = 40 #maximum time big droplet will 'hang', minimum time is max/2, default 40
mediumTimer = 200 #maximum time medium droplet will 'hang', minimum time is max/2, default 200
smallTimer = 500 #maximum time small droplet will 'hang', minimum time is max/2, default 500
bigNumber = 5 #maximum amount of big drops expelled when sneeze happens, minimum is 0, default 5
mediumNumber = 10 #maximum amount of medium drops expelled when sneeze happens, minimum is 0, default 10
smallNumber = 20 #maximum amount of small drops expelled when sneeze happens, minimum is 0, default 20
recoveryLength = 2000 #amount of time it takes a dot to recover, default 2000
immunityLength = 1000 #amount of time someone stays immune if fading immunity setting is checked, default 1000
sneezeTimer = 200 #maxmum amount of time between sneezes, minimum is max/2, default 200
sideSneeze = True #True if the sneeze expells to the side of a person as if head was turned, default True
############################
canvas.after(10, mainLoop, stats, socdis, peopleArray, vents, ac, germs, rein)
deathLabel.config(text="Deaths: "+str(stats[0]))
infectedLabel.config(text="Infected: "+str(stats[2]))
recoveredLabel.config(text="Recovered: "+str(stats[1]))
totalLabel.config(text="Total People: " + str(stats[3]))
for g in germs:
g.move(vents, ac, germs)
g.tickTimer(germs)
for p in peopleArray:
p.tickDirectionTimer()
p.tickCuredTimer(stats, recoveryLength, rein)
p.tickInfectedTimer(sneezeTimer, germs, bigDistance, mediumDistance, smallDistance, sideSneeze, bigNumber, mediumNumber, smallNumber, bigTimer, mediumTimer, smallTimer)
p.tickImmunityTimer(immunityLength)
p.move(socdis, peopleArray)
death = random.randint(0,deathChance-1)
if death == 0 and p.infected:
stats[2] -= 1
p.kill(peopleArray)
stats[0] += 1
for g in germs:
if p.x-3 < g.x and p.x+3 > g.x and p.y-3 < g.y and p.y+3 > g.y:
chance = random.randint(0,bigDropChance - 1)
if g.size == 1:
chance = random.randint(0,mediumDropChance - 1)
if g.size == 2:
chance = random.randint(0, smallDropChance - 1)
if chance == 0 and not p.cured and not p.infected:
p.startInfection()
stats[2] += 1
g.absorbed(germs)
if __name__ == "__main__":
peopleArray = []
germs = []
stats = [0,0,0,0] #deaths, recovered, infected, total people
root = Tk()
canvas = Canvas(root, width=500, height=500, bg="white")
canvas.grid(row=0, column=0, columnspan=4)
Label(text="Add People:").grid(row=1, column=0)
buttonAdd = Button(root, text="Add Person", command = lambda arg1=stats, arg2=peopleArray, arg3=canvas: buttonFunctions.addPerson(arg1, arg2, arg3))
buttonAdd.grid(row=1, column=1)
buttonTenAdd = Button(root, text="Add 10 People", command = lambda arg1=stats, arg2=peopleArray, arg3=canvas: buttonFunctions.addTenPeople(arg1, arg2, arg3))
buttonTenAdd.grid(row=1, column=2)
buttonTenAdd = Button(root, text="Add 50 People", command = lambda arg1=stats, arg2=peopleArray, arg3=canvas: buttonFunctions.addFiftyPeople(arg1, arg2, arg3))
buttonTenAdd.grid(row=1, column=3)
Label(text="Add Infected:").grid(row=2, column=0)
buttonAddI = Button(root, text="Add Infected Person", command = lambda arg1=stats, arg2=canvas, arg3=peopleArray: buttonFunctions.addIPerson(arg1, arg2, arg3))
buttonAddI.grid(row=2, column=1)
buttonAddI = Button(root, text="Add 10 Infected People", command = lambda arg1=stats, arg2=canvas, arg3=peopleArray: buttonFunctions.addTenIPeople(arg1, arg2, arg3))
buttonAddI.grid(row=2, column=2)
buttonAddI = Button(root, text="Add 50 Infected People", command = lambda arg1=stats, arg2=canvas, arg3=peopleArray: buttonFunctions.addFiftyIPeople(arg1, arg2, arg3))
buttonAddI.grid(row=2, column=3)
vents = IntVar()
ventCheck = Checkbutton(root, text="Airflow on", variable=vents, command= lambda arg1=vents, arg2=canvas: buttonFunctions.drawVents(arg1, arg2))
ventCheck.grid(row=3, column=0)
ac = IntVar()
acCheck = Checkbutton(root, text="No filter in airflow", variable=ac)
acCheck.grid(row=3, column=1)
socdis = IntVar()
socdisCheck = Checkbutton(root, text="Social Distancing", variable=socdis)
socdisCheck.grid(row=3, column=2)
rein = IntVar()
reinCheck = Checkbutton(root, text="Fading Immunity", variable = rein)
reinCheck.grid(row=3, column=3)
totalLabel = Label(root, text="Total People: "+str(stats[3]))
totalLabel.grid(row=4, column=0)
deathLabel = Label(root, text="Deaths: "+str(stats[0]))
deathLabel.grid(row=4, column=1)
infectedLabel = Label(root, text="Infected: "+str(stats[2]))
infectedLabel.grid(row=4, column=2)
recoveredLabel = Label(root, text="Recovered: "+str(stats[1]))
recoveredLabel.grid(row=4, column=3)
mainLoop(stats, socdis, peopleArray, vents, ac, germs, rein)
root.protocol("WM_DELETE_WINDOW", onClosing)
root.mainloop() | StarcoderdataPython |
5172343 | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: <NAME>
# @Date: 2018-07-17 23:33:27
# @Last modified by: <NAME>
# @Last Modified time: 2018-07-18 23:09:17
from __future__ import print_function, division, absolute_import
from marvin.utils.datamodel.vacs.base import VACDataModelList
from marvin.utils.datamodel.vacs.releases import vacdms
# build the full list
datamodel = VACDataModelList(vacdms)
| StarcoderdataPython |
5151390 | # -*- python -*-
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gdb_test
class KillTest(gdb_test.GdbTest):
def test_kill(self):
# Test that you can stop on a breakpoint, then kill the program being
# debugged.
self.gdb.Command('break test_kill')
self.gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
self.gdb.Kill()
def tearDown(self):
# Test program should end first with the kill return code.
# Intentionally bypass superclass's tearDown as it assumes gdb exits first.
self.AssertSelLdrExits()
self.gdb.Quit()
self.gdb.Wait()
if __name__ == '__main__':
gdb_test.Main()
| StarcoderdataPython |
210883 | from __future__ import absolute_import, division, print_function
import sys,os
import iotbx.phil
from scitbx.array_family import flex
'''
Utilities to generate models and maps from standard or user-selected PDB files
Use map_model_manager to access these utilities
'''
def get_map_manager(map_data,wrapping,
unit_cell_dimensions=None,crystal_symmetry=None,):
'''
Get a minimal map_manager in SG p1 from any map_data and
if unit_cell_dimensions. Assume cell angles are 90,90,90
Shift origin to (0,0,0)
'''
assert unit_cell_dimensions or crystal_symmetry
assert isinstance(wrapping, bool)
from iotbx.map_manager import map_manager
map_data = map_data.shift_origin()
if not crystal_symmetry:
from cctbx import crystal
crystal_symmetry=crystal.symmetry(
tuple(list(unit_cell_dimensions[:3])+[90,90,90]), 1)
mm=map_manager(map_data=map_data,
unit_cell_grid = map_data.all(),
unit_cell_crystal_symmetry = crystal_symmetry,
origin_shift_grid_units = (0,0,0),
wrapping = wrapping)
return mm
def read_map_and_model(file_name_1,file_name_2, regression_directory = None,
prefix = None):
'''
Identify which file is map and which is model, read in and
create map_model_manager
If regression_directory is specified, look there for these files, assuming
prefix of $PHENIX/modules/phenix_regression/
'''
if regression_directory and not prefix:
import libtbx.load_env
prefix = libtbx.env.under_dist(
module_name="phenix_regression",
path=regression_directory,
test=os.path.isdir)
if prefix:
file_name_1 = os.path.join(prefix,file_name_1)
file_name_2 = os.path.join(prefix,file_name_2)
map_file_name = None
model_file_name = None
for f in [file_name_1,file_name_2]:
for ending in ['.ccp4','.mrc','.map']:
if f.endswith(ending):
map_file_name = f
for ending in ['.pdb','.cif']:
if f.endswith(ending):
model_file_name = f
if not map_file_name or not model_file_name:
raise Sorry("Unable to guess map and model from %s and %s" %(
file_name_1, file_name_2))
from iotbx.data_manager import DataManager
from iotbx.map_model_manager import map_model_manager
dm = DataManager()
dm.process_real_map_file(map_file_name)
mm = dm.get_real_map(map_file_name)
dm.process_model_file(model_file_name)
model = dm.get_model(model_file_name)
mam = map_model_manager(model=model,map_manager=mm)
return mam
def generate_model(
file_name=None,
n_residues=None,
start_res=None,
b_iso=30,
box_cushion=5,
space_group_number=1,
output_model_file_name=None,
shake=None,
random_seed=None,
log=sys.stdout):
'''
generate_model: Simple utility for generating a model for testing purposes.
This function typically accessed and tested through map_model_manager
Summary
-------
Generate a model from a user-specified file or from some examples available
in the cctbx. Cut out specified number of residues, shift to place on
positive side of origin, optionally set b values to b_iso,
place in box with buffering of box_cushion on all
edges, optionally randomly shift (shake) atomic positions by rms of shake A,
and write out to output_model_file_name and return model object.
Parameters:
file_name (string, None): File containing model (PDB, CIF format)
n_residues (int, 10): Number of residues to include
start_res (int, None): Starting residue number
b_iso (float, 30): B-value (ADP) to use for all atoms
box_cushion (float, 5): Buffer (A) around model
space_group_number (int, 1): Space group to use
output_model_file_name (string, None): File for output model
shake (float, None): RMS variation to add (A) in shake
random_seed (int, None): Random seed for shake
Returns:
model.manager object (model) in a box defined by a crystal_symmetry object
'''
# Get the parameters
space_group_number=int(space_group_number)
if n_residues is not None:
n_residues=int(n_residues)
box_cushion=float(box_cushion)
if start_res:
start_res=int(start_res)
if shake:
shake=float(shake)
if random_seed:
random_seed=int(random_seed)
import random
random.seed(random_seed)
random_seed=random.randint(1,714717)
flex.set_random_seed(random_seed)
# Choose file with coordinates
if not file_name:
if not n_residues:
n_residues = 10 # default
import libtbx.load_env
iotbx_regression = os.path.join(libtbx.env.find_in_repositories("iotbx"),
'regression')
if n_residues < 25:
file_name=os.path.join(iotbx_regression,'secondary_structure',
'5a63_chainBp.pdb') # starts at 219
if not start_res: start_res=219
elif n_residues < 167:
file_name=os.path.join(iotbx_regression,'secondary_structure',
'3jd6_noh.pdb') # starts at 58
if not start_res:start_res=58
else:
file_name=os.path.join(iotbx_regression,'secondary_structure',
'4a7h_chainC.pdb') # starts at 9
if not start_res:start_res=9
else: # have file_name
if start_res is None:
start_res=1
if not n_residues:
n_residues = 100000 # a big number
# Read in coordinates and cut out the part of the model we want
from iotbx.data_manager import DataManager
dm = DataManager(['model'])
dm.process_model_file(file_name)
model = dm.get_model(file_name)
if not model.crystal_symmetry() or not model.crystal_symmetry().unit_cell():
from cctbx.maptbx.box import shift_and_box_model
model = shift_and_box_model(model = model,
box_cushion = box_cushion)
selection=model.selection('resseq %s:%s' %(start_res,start_res+n_residues-1))
model=model.select(selection)
# shift the model and return it with new crystal_symmetry
from cctbx.maptbx.box import shift_and_box_model
model = shift_and_box_model(model = model,
box_cushion = box_cushion)
if b_iso is not None:
b_values=flex.double(model.get_sites_cart().size(), b_iso)
ph = model.get_hierarchy()
ph.atoms().set_b(b_values)
# Optionally shake model
if shake:
model=shake_model(model,shake=shake)
if output_model_file_name:
f=open(output_model_file_name,'w')
print ("%s" %(model.model_as_pdb()),file=f)
f.close()
print ("Writing model with %s residues and b_iso=%s from %s to %s" %(
n_residues,b_iso,file_name,output_model_file_name),file=log)
else:
print ("Generated model with %s residues and b_iso=%s from %s " %(
n_residues,b_iso,file_name),file=log)
return model
def shake_model(model,shake=None,log=sys.stdout):
# Shake model with rmsd of shake
from mmtbx.pdbtools import master_params_str
params=iotbx.phil.parse(master_params_str).extract()
params.modify.sites[0].shake=shake
from mmtbx.pdbtools import modify
new_model = modify(
model = model,
params = params.modify,
log = log).get_results().model
return new_model
def generate_map_coefficients(
model=None, # Required model
output_map_coeffs_file_name=None,
d_min=3,
k_sol = None,
b_sol = None,
scattering_table='electron',
f_obs_array = None,
log=sys.stdout):
'''
Convenience function to create map coefficients from a model.
This function typically accessed and tested through map_model_manager
Summary: Supply model.manager object or parameters for generate_model,
high_resolution limit of d_min and optional scattering_table ("electron"
for cryo-EM, "n_gaussian" for x-ray) and optional
output_map_coeffs_file_name.
Writes map coefficients and returns miller_array object
containing the map coefficients
Parameters:
-----------
model (model.manager object, None): model to use.
contains crystal_symmetry
output_map_coeffs_file_name (string, None): output model file name
d_min (float, 3): High-resolution limit for map coeffs (A)
scattering_table (choice, 'electron'): choice of scattering table
All choices: wk1995 it1992 n_gaussian neutron electron
f_obs_array: array used to match indices of fcalc
crystal_symmetry: used for crystal symmetry (overrides model value but
not f_obs)
'''
assert model is not None
# get map coefficients
from mmtbx.utils import fmodel_from_xray_structure
import iotbx.phil
from mmtbx.command_line.fmodel import master_phil
fmodel_params=master_phil.extract()
if f_obs_array:
assert f_obs_array.crystal_symmetry().is_similar_symmetry(
model.crystal_symmetry())
assert model.crystal_symmetry() is not None # Need crystal_symmetry in model
xrs=model.get_xray_structure()
fmodel_params.high_resolution=float(d_min)
fmodel_params.scattering_table=scattering_table
if k_sol is not None and b_sol is not None:
fmodel_params.fmodel.k_sol = k_sol
fmodel_params.fmodel.b_sol = b_sol
if f_obs_array:
fmodel_params.high_resolution=f_obs_array.d_min()-0.0001 # different cut
fmodel=fmodel_from_xray_structure(
xray_structure = xrs,
params = fmodel_params,
f_obs = f_obs_array,
out = log)
f_model=fmodel.f_model
if output_map_coeffs_file_name:
f_model.as_mtz_dataset(column_root_label='FWT').mtz_object().write(
file_name=output_map_coeffs_file_name)
print("Writing map coefficients to resolution of %s A to %s" %(
d_min,output_map_coeffs_file_name),file=log)
else:
print("Generated map coefficients to resolution of %s A " %(
d_min),file=log)
return f_model
def generate_map(
output_map_file_name=None,
map_coeffs=None, # Required
d_min=3,
map_manager = None, # source of info, not map
gridding=None,
wrapping=False,
origin_shift_grid_units=None,
low_resolution_fourier_noise_fraction=0,
high_resolution_fourier_noise_fraction=0,
low_resolution_real_space_noise_fraction=0,
high_resolution_real_space_noise_fraction=0,
low_resolution_noise_cutoff=None,
random_seed = None,
log=sys.stdout):
'''
Generate map from map_coefficients and add noise in Fourier or real space
This function typically accessed and tested through map_model_manager
Summary:
--------
Calculate a map and optionally add noise to it. Supply map
coefficients (miller_array object) and types of noise to add,
along with optional gridding (nx,ny,nz), and origin_shift_grid_units.
Optionally create map coefficients from a model and optionally
generate a model.
Unique aspect of this noise generation is that it can be specified
whether the noise is local in real space (every point in a map
gets a random value before Fourier filtering), or local in Fourier
space (every Fourier coefficient gets a complex random offset).
Also the relative contribution of each type of noise vs resolution
can be controlled.
Parameters:
-----------
output_map_file_name (string, None): Output map file (MRC/CCP4 format)
map_coeffs (miller.array object, None) : map coefficients
d_min(float, 3): high_resolution limit (A)
gridding (tuple (nx,ny,nz), None): Gridding of map (optional)
origin_shift_grid_units (tuple (ix,iy,iz), None): Move location of
origin of resulting map to (ix,iy,iz) before writing out
low_resolution_fourier_noise_fraction (float, 0): Low-res Fourier noise
high_resolution_fourier_noise_fraction (float, 0): High-res Fourier noise
low_resolution_real_space_noise_fraction(float, 0): Low-res
real-space noise
high_resolution_real_space_noise_fraction (float, 0): High-res
real-space noise
low_resolution_noise_cutoff (float, None): Low resolution where noise
starts to be added
'''
if random_seed:
random_seed=int(random_seed)
import random
random.seed(random_seed)
random_seed=random.randint(1,714717)
flex.set_random_seed(random_seed)
if map_manager:
origin_shift_grid_units=map_manager.origin_shift_grid_units
gridding=map_manager.map_data().all()
wrapping=map_manager.wrapping()
if gridding:
if type(gridding) in [type((1,2,3)), type([1,2,3])] and type(gridding[0])==type(1):
pass # already fine
else:
gridding=[]
for x in str(gridding).replace("(","").replace(")","").replace("[","").replace("]","").replace(",","").split():
gridding.append(int(x))
low_resolution_fourier_noise_fraction=float(
low_resolution_fourier_noise_fraction)
high_resolution_fourier_noise_fraction=float(
high_resolution_fourier_noise_fraction)
low_resolution_real_space_noise_fraction=float(
low_resolution_real_space_noise_fraction)
high_resolution_real_space_noise_fraction=float(
high_resolution_real_space_noise_fraction)
if low_resolution_noise_cutoff:
low_resolution_noise_cutoff=float(low_resolution_noise_cutoff)
if d_min:
d_min=float(d_min)
map_coeffs=map_coeffs.resolution_filter(d_min=d_min)
# Calculate a map from Fourier coefficients:
from cctbx.maptbx.segment_and_split_map import get_map_from_map_coeffs
map_data=get_map_from_map_coeffs(
map_coeffs=map_coeffs,
crystal_symmetry=map_coeffs.crystal_symmetry(),
n_real=gridding,
apply_sigma_scaling=False)
# Optionally add noise to this map as an additive noise map
# Noise can be added in Fourier space (leads to correlated errors
# in real space) or in real space (leads to correlated errors
# in Fourier space).
# Noise is Fourier-weighted as function of resolution.
# RMS noise to add at low-resolution (Fourier based noise) as fraction of RMS
# value in map at low-resolution is: low_resolution_fourier_noise_fraction
# RMS Fourier high-res noise:is high_resolution_fourier_noise_fraction
# RMS real-space low-res noise:is low_resolution_real_space_noise_fraction
# RMS real-space high-res noise:is high_resolution_real_space_noise_fraction
# Low-resolution where noise begins to be added is low_resolution_noise_cutoff
if (low_resolution_fourier_noise_fraction or
high_resolution_fourier_noise_fraction):
fourier_noise_map=get_fourier_noise_map(n_real=map_data.all(),
map_coeffs=map_coeffs,
low_resolution_fourier_noise_fraction=
low_resolution_fourier_noise_fraction,
high_resolution_fourier_noise_fraction=
high_resolution_fourier_noise_fraction,
d_min=d_min,
low_resolution_noise_cutoff=low_resolution_noise_cutoff,
log=log)
else:
fourier_noise_map=None
if (low_resolution_real_space_noise_fraction or
high_resolution_real_space_noise_fraction):
real_space_noise_map=get_real_space_noise_map(map_data=map_data,
map_coeffs=map_coeffs,
low_resolution_real_space_noise_fraction=
low_resolution_real_space_noise_fraction,
high_resolution_real_space_noise_fraction=
high_resolution_real_space_noise_fraction,
d_min=d_min,
low_resolution_noise_cutoff=low_resolution_noise_cutoff,
log=log)
else:
real_space_noise_map=None
if fourier_noise_map:
map_data+=fourier_noise_map
if real_space_noise_map:
map_data+=real_space_noise_map
if map_manager:
mm=map_manager.customized_copy(map_data=map_data)
else:
# Create a map_manager object directly (unusual use of map_manager)
from iotbx.map_manager import map_manager
mm=map_manager(map_data=map_data,
unit_cell_grid=map_data.all(),
unit_cell_crystal_symmetry=map_coeffs.crystal_symmetry(),
origin_shift_grid_units=origin_shift_grid_units,
wrapping=wrapping)
if output_map_file_name:
mm.write_map(output_map_file_name)
else:
print("Generated map with origin at %s and size of %s" %(
mm.map_data().origin(),mm.map_data().all()),file=log)
return mm
def squares_of_complex(m1):
a1=flex.pow2(m1.parts()[0])
a2=flex.pow2(m1.parts()[1])
a3=a1+a2
return a3
def norm(m1):
a3=squares_of_complex(m1)
return a3.min_max_mean().mean**0.5
def map_coeffs_as_fp_phi(map_coeffs):
amplitudes=map_coeffs.amplitudes()
amplitudes.set_observation_type_xray_amplitude()
assert amplitudes.is_real_array()
phases=map_coeffs.phases(deg=True)
return amplitudes,phases
def fp_phi_as_map_coeffs(fp,phi):
return fp.phase_transfer(phase_source=phi,deg=True)
def get_real_space_noise_map(map_data=None,
map_coeffs=None,
low_resolution_real_space_noise_fraction=None,
high_resolution_real_space_noise_fraction=None,
d_min=None,
low_resolution_noise_cutoff=None,
log=sys.stdout):
'''
Procedure for generating a map with noise in real space
NOTE: only applies for space group P1
Parameters:
-----------
map_data (map_data object, None): map_data with current values in map
map_coeffs (miller.array object, None): map coefficients assumed to
match map_data. Used to get RMS values in Fourier representation of
map vs resolution and used to specify which Fourier coefficients to
calculate
low_resolution_real_space_noise_fraction (float, None): ratio of
RMS value in output map to input map at low resolution
high_resolution_real_space_noise_fraction (float, None): ratio of
RMS value in output map to input map at high resolution
low_resolution_noise_cutoff (float, None): resolution where noise is added
'''
# Get random values at grid points in map. Then obtain Fourier
# coefficients by back-transformation. Then scale Fourier coefficients to
# yield low_resolution_real_space_noise_fraction at low resolution and
# high_resolution_real_space_noise_fraction at high resolution and
# linear in 1/d in between.
assert map_coeffs.crystal_symmetry().space_group_number() in [
0,1]
print ("\nGenerating random map in real space, then Fourier filtering",
file=log)
# Create a map with mean zero and rms 1 with random values
random_values=flex.random_double(map_data.size())
acc=map_data.accessor()
random_values.reshape(acc) # map with random values rms 1 mean zero
# Get map as fourier coefficients
from cctbx import miller
randomized_map_coeffs=map_coeffs.structure_factors_from_map(random_values)
return scale_map_coeffs(
n_real=map_data.all(),
randomized_map_coeffs=randomized_map_coeffs,
map_coeffs=map_coeffs,
high_resolution_noise_fraction=high_resolution_real_space_noise_fraction,
low_resolution_noise_fraction=low_resolution_real_space_noise_fraction,
random_selection_within_bins=False,
low_resolution_noise_cutoff=low_resolution_noise_cutoff,
log=log)
def get_fourier_noise_map(n_real=None,
map_coeffs=None,
low_resolution_fourier_noise_fraction=None,
high_resolution_fourier_noise_fraction=None,
d_min=None,
low_resolution_noise_cutoff=None,
log=sys.stdout):
'''
Procedure for generating a map with noise in Fourier space
NOTE: only applies for space group P1
Parameters:
-----------
n_real (tuple (nx,ny,nz), None): Gridding of output map. Usually
obtained from map_data.all()
map_coeffs (miller.array object, None): map coefficients assumed to
match n_real used to get RMS values in Fourier representation of
map vs resolution and used to specify which Fourier coefficients to
calculate
low_resolution_fourier_noise_fraction (float, None): ratio of
RMS value in output map to input map at low resolution
high_resolution_fourier_noise_fraction (float, None): ratio of
RMS value in output map to input map at high resolution
low_resolution_noise_cutoff (float, None): resolution where noise is added
'''
# Get random values of Fourier coefficients with rms values scaled to
# yield low_resolution_fourier_noise_fraction at low resolution and
# high_resolution_fourier_noise_fraction at high resolution and
# linear in 1/d in between.
assert map_coeffs.crystal_symmetry().space_group_number() in [
0,1]
return scale_map_coeffs(
n_real=n_real,
map_coeffs=map_coeffs,
high_resolution_noise_fraction=high_resolution_fourier_noise_fraction,
low_resolution_noise_fraction=low_resolution_fourier_noise_fraction,
random_selection_within_bins=True,
low_resolution_noise_cutoff=low_resolution_noise_cutoff,
log=log)
def scale_map_coeffs(
n_real=None,
randomized_map_coeffs=None,
map_coeffs=None,
high_resolution_noise_fraction=None,
low_resolution_noise_fraction=None,
random_selection_within_bins=False,
low_resolution_noise_cutoff=None,
log=sys.stdout):
'''
Scale map coefficients to target value vs resolution, optionally randomize
Scales map coefficients in resolution bins, scale factor adjusted
to yield high_resolution_noise_fraction at high-resolution limit
and low_resolution_noise_fraction at low_resolution_noise_cutoff and
linearly between in 1/resolution.
Optionally randomizes amplitudes and phases by shuffling within bins
'''
assert random_selection_within_bins or randomized_map_coeffs
if not hasattr(map_coeffs,'binner') or not map_coeffs.binner():
map_coeffs.setup_binner(auto_binning=True)
d_max,d_min=map_coeffs.d_max_min()
if d_max < 0: d_max = 1.e+10
if random_selection_within_bins:
new_map_coeffs=map_coeffs.customized_copy(
data=flex.complex_double(map_coeffs.size(),(0+0.j)))
print ("\nGenerating map randomized in Fourier space",file=log)
else:
new_map_coeffs=randomized_map_coeffs
print ("Relative error added at high-resolution: %.3f" %(
high_resolution_noise_fraction),file=log)
print ("Relative error added at low-resolution: %.3f" %(
low_resolution_noise_fraction),file=log)
print("Resolution Noise ratio RMS original RMS error ",file=log)
for i_bin in map_coeffs.binner().range_used():
sel=map_coeffs.binner().selection(i_bin)
dd=map_coeffs.d_spacings().data().select(sel)
local_d_mean = dd.min_max_mean().mean
local_s_mean=1/local_d_mean
s_max=1/max(1.e-10,d_min)
if low_resolution_noise_cutoff:
s_min=1/max(1.e-10,min(d_max,low_resolution_noise_cutoff))
else:
s_min=1/max(1.e-10,d_max)
fraction_high= max(0.,min(1.,
(local_s_mean-s_min)/max(1.e-10,s_max-s_min)))
noise_ratio=low_resolution_noise_fraction+\
fraction_high * (
high_resolution_noise_fraction-
low_resolution_noise_fraction)
mc=map_coeffs.select(sel)
rms_original=norm(mc.data())
if random_selection_within_bins: # randomize, normalize, scale
fp,phi=map_coeffs_as_fp_phi(mc)
sel_fp=flex.random_permutation(fp.size())
new_fp=fp.select(sel_fp)
data=new_fp.data()
data*=noise_ratio
sel_phi=flex.random_permutation(phi.size())
new_phi=phi.select(sel_phi)
new_mc=fp_phi_as_map_coeffs(new_fp,new_phi)
else: # just normalize and scale
randomized_mc=randomized_map_coeffs.select(sel)
rms_new=norm(randomized_mc.data())
scale=rms_original/max(1.e-10,rms_new)
new_fp,new_phi=map_coeffs_as_fp_phi(randomized_mc)
data=new_fp.data()
data*=noise_ratio*scale
new_mc=fp_phi_as_map_coeffs(new_fp,new_phi)
rms_new=norm(new_mc.data())
new_map_coeffs.data().set_selected(sel,new_mc.data())
print (" %.3f %.3f %.3f %.3f " %(
local_d_mean, noise_ratio,rms_original,rms_new),file=log)
# Convert to map
from cctbx import maptbx
return maptbx.map_coefficients_to_map(
map_coeffs = new_map_coeffs,
crystal_symmetry = new_map_coeffs.crystal_symmetry(),
n_real = n_real)
def get_map_from_map_coeffs(map_coeffs = None, crystal_symmetry = None,
n_real = None, apply_sigma_scaling = True):
from cctbx import maptbx
from cctbx.maptbx import crystal_gridding
if not crystal_symmetry:
crystal_symmetry = map_coeffs.crystal_symmetry()
if map_coeffs.crystal_symmetry().space_group_info()!= \
crystal_symmetry.space_group_info():
assert str(map_coeffs.crystal_symmetry().space_group_info()
).replace(" ", "").lower() == 'p1'
# use map_coeffs.crystal_symmetry
crystal_symmetry = map_coeffs.crystal_symmetry()
if n_real:
cg = crystal_gridding(
unit_cell = crystal_symmetry.unit_cell(),
space_group_info = crystal_symmetry.space_group_info(),
pre_determined_n_real = n_real)
else:
cg = None
fft_map = map_coeffs.fft_map( resolution_factor = 0.25,
crystal_gridding = cg,
symmetry_flags = maptbx.use_space_group_symmetry)
if apply_sigma_scaling:
fft_map.apply_sigma_scaling()
else:
fft_map.apply_volume_scaling()
map_data = fft_map.real_map_unpadded()
return map_data
| StarcoderdataPython |
3411599 | <filename>schema_validation/schemas/tests/test_schemas.py
import os
import json
import pytest
from ... import JSONSchema
from .. import iter_schemas_with_names, load_schema
num_schemas = {'jsonschema-draft04.json': 29,
'vega-v3.0.7.json': 631,
'vega-lite-v1.2.json': 309,
'vega-lite-v2.0.json': 645}
num_definitions = {'jsonschema-draft04.json': 6,
'vega-v3.0.7.json': 106,
'vega-lite-v1.2.json': 54,
'vega-lite-v2.0.json': 150}
@pytest.mark.filterwarnings('ignore:Unused')
@pytest.mark.parametrize('name,schema', iter_schemas_with_names())
def test_full_schemas(name, schema):
root = JSONSchema(schema)
assert len(root._registry) == num_schemas.get(name, None)
assert len(root._definitions) == num_definitions.get(name, None)
@pytest.mark.parametrize('name,schema', iter_schemas_with_names())
def test_metaschema(name, schema):
root = JSONSchema(load_schema('jsonschema-draft04.json'))
root.validate(schema)
@pytest.mark.filterwarnings('ignore:Unused')
def test_schema_validation():
schema = JSONSchema(load_schema('vega-lite-v2.0.json'))
vega_lite_bar = {
"$schema": "https://vega.github.io/schema/vega-lite/v2.json",
"description": "A simple bar chart with embedded data.",
"data": {
"values": [
{"a": "A","b": 28}, {"a": "B","b": 55}, {"a": "C","b": 43},
{"a": "D","b": 91}, {"a": "E","b": 81}, {"a": "F","b": 53},
{"a": "G","b": 19}, {"a": "H","b": 87}, {"a": "I","b": 52}
]
},
"mark": "bar",
"encoding": {
"x": {"field": "a", "type": "ordinal"},
"y": {"field": "b", "type": "quantitative"}
}
}
schema.validate(vega_lite_bar)
vega_lite_github_punchcard = {
"$schema": "https://vega.github.io/schema/vega-lite/v2.json",
"data": { "url": "data/github.csv"},
"mark": "circle",
"encoding": {
"y": {
"field": "time",
"type": "ordinal",
"timeUnit": "day"
},
"x": {
"field": "time",
"type": "ordinal",
"timeUnit": "hours"
},
"size": {
"field": "count",
"type": "quantitative",
"aggregate": "sum"
}
}
}
schema.validate(vega_lite_github_punchcard)
| StarcoderdataPython |
11260028 | import inspect
import pprint
from numbers import Number
from typing import Any, Dict, Sequence
import torch
from meddlr.transforms.mixins import DeviceMixin, TransformCacheMixin
from meddlr.transforms.param_kind import ParamKind
from meddlr.transforms.tf_scheduler import SchedulableMixin
__all__ = ["TransformGen"]
class TransformGen(DeviceMixin, SchedulableMixin, TransformCacheMixin):
"""
TransformGen takes an array of type float as input.
It creates a :class:`Transform` based on the given image, sometimes with
randomness. The transform can then be used to transform images
or other data (boxes, points, annotations, etc.) associated with it.
The assumption made in this class is that the image itself is sufficient to
instantiate a transform. When this assumption is not true, you need to
create the transforms by your own.
A list of `TransformGen` can be applied with :func:`apply_transform_gens`.
"""
def __init__(
self, params: Dict[str, Any] = None, p: float = 0.0, param_kinds: Dict[str, Any] = None
) -> None:
from meddlr.transforms.tf_scheduler import TFScheduler
self._params = {}
if params is None:
params = {}
if param_kinds is None:
param_kinds = {}
params.update({"p": p})
self._set_attributes(params)
self._param_kinds = param_kinds
self._schedulers: Sequence[TFScheduler] = []
self._generator = None
self._device = "cpu"
def _set_attributes(self, params=None, **kwargs):
if params is None:
params = {}
params.update(kwargs)
if params:
self._params.update(
{k: v for k, v in params.items() if k != "self" and not k.startswith("_")}
)
def get_transform(self, input):
raise NotImplementedError
def reset(self):
self._reset_transform()
def __getattr__(self, name):
if name in self._params:
return self._params[name]
raise AttributeError(f"Attribute '{name}' does not exist in class {type(self)}")
def _get_param_values(self, use_schedulers=False):
if not use_schedulers:
return self._params
params = self._params.copy()
for s in self._schedulers:
params.update(s.get_params())
return params
def _rand(self) -> float:
"""Uniform sample between [0, 1) using ``self._generator``.
Returns:
float: The sample between [0, 1).
"""
return torch.rand(1, generator=self._generator).cpu().item()
def _rand_choice(self, n=None, probs: torch.Tensor = None) -> int:
"""Chooses random integer between [0, n-1].
Args:
n (int): Number of choices. This is required if ``probs``
is not specified.
probs (torch.Tensor): The probability tensor.
Returns:
int: The index of the selected choice.
"""
device = "cpu" if self._generator is None else self._generator.device
if probs is None:
probs = torch.ones(n, device=device) / n
return torch.multinomial(probs.to(device), 1, generator=self._generator).cpu().item()
def _rand_range(self, low, high, size: int = None):
"""Uniform float random number between [low, high).
Args:
low (number-like): The lower bound.
high (number-like): The upper bound.
size (int): Number of samples to draw in the range.
Returns:
float: A uniformly sampled number in range [low, high).
"""
if size is None:
size = 1
if low > high:
high, low = low, high
if high - low == 0:
val = low
else:
val = (low + (high - low) * torch.rand(size, generator=self._generator)).cpu().item()
return val
def _format_param(self, val, kind: ParamKind, ndim=None):
if kind == ParamKind.MULTI_ARG:
if isinstance(val, Number):
return ((-val, val),) * ndim
elif isinstance(val, (list, tuple)):
out = []
if len(val) == 1:
val = val * ndim
for v in val:
if isinstance(v, (list, tuple)):
out.append(v)
elif isinstance(v, Number):
out.append((-v, v))
else:
raise ValueError(f"Type {type(val)} not supported - val={val}")
return type(val)(out)
return val
def seed(self, value: int):
self._generator = torch.Generator(device=self._device).manual_seed(value)
return self
def __repr__(self):
"""
Produce something like:
"MyTransformGen(field1={self.field1}, field2={self.field2})"
"""
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for name, param in sig.parameters.items():
assert (
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), (
"Attribute {} not found! "
"Default __repr__ only works if attributes match "
"the constructor.".format(name)
)
attr = getattr(self, name)
default = param.default
if default is attr:
continue
argstr.append("{}={}".format(name, pprint.pformat(attr)))
return "{}({})".format(classname, ", ".join(argstr))
except AssertionError:
return super().__repr__()
def __str__(self) -> str:
return self.__repr__()
| StarcoderdataPython |
3214585 | import os
from collections import OrderedDict, deque
from datetime import datetime, date
from enum import Enum, IntEnum
from scripts import SmartJson
class Test:
def __init__(self):
self.name = "test"
self.date = datetime.now()
self.list = ["is list item", 1, datetime.now()]
self.complex = complex(2, -3)
self.bytes = "Me".encode("utf-8")
self.dict = {'url': "https://pypi.org/project/smartjson/", 'version': "2.0.0", 'author': "K.J.O",
'date': date(2019, 10, 1)}
self.bool = True
self.float = 9500.50
self.int = 12
self.path = os.getcwd()
self.bytes = "pip install smartjson".encode("utf-8")
class MyObject:
def __init__(self):
self.object = Test()
self.date = datetime.now()
self.id = 1
self.lastId = None
self.set = (["1", 12, datetime.now()])
self.list = [datetime.now(), 1]
self.ordereddict = OrderedDict([
("b", OrderedDict([("b", 2), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])]))
])
self.deque = deque([
deque([1, 2]),
deque([3, 4]),
])
# self.data = data
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
]),
'complex': complex(42, 13)
}
class LoggerLevel(Enum):
CRITICAL = 'CRITICAL'
ERROR = 'ERROR'
WARNING = 'WARNING'
INFO = 'INFO'
DEBUG = 'DEBUG'
NOTSET = data
class Status(IntEnum):
success = 0
failure = 1
if __name__ == '__main__':
print("")
SmartJson(complex(1, 2)).serializeToJsonFile()
print(SmartJson(["LoggerLevel", 1, datetime.now()]).serialize())
# print(SmartJson(Test()).serialize())
"""
class Test:
def __init__(self):
self.test = "none"
self.id = 2
self.date = datetime.now()
self.tuple = [((1, 'a'), (2, 'b'))]
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
])
}
class Pull:
def __init__(self):
self.id = 2
self.title = "Iam pull"
self.author = "<NAME>."
self.subPull = Pull.SubPull()
self.data = data
self.date = datetime.now()
self.list = [1, datetime.now(), Pull.SubPull()]
class SubPull:
def __init__(self):
self.subId = 3
self.subTitle = "I am sub title"
self.subAuthor = "OKJ."
self.date = date(2010, 1, 1)
class Jobs:
def __init__(self):
self.name = 'John'
self.url = "5444"
self.id = 1
self.job = Jobs.Job()
self.data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1)
}
def name(self, set=None):
if set != None:
self.name = set
return self.name
class Job:
def __init__(self):
self.job_name = 'Test'
self.job_url = "_blank"
self.date = datetime.now().strftime('%m/%d/%Y')
self.date2 = datetime.now()
self.item = Jobs.Item()
self.pull = Pull()
class Item:
def __init__(self):
self.item_name = 'item 1'
self.item_boof = datetime.now()
self.mylist = [1, 2, 3]
self.another = Jobs.Item.Another()
class Another:
def __init__(self):
self.age = 26
self.precision = 99.56
self.ville = "Lille"
self.meteo = Jobs.Item.Another.Meteo()
class Meteo:
def __init__(self):
self.pluie = True
self.complex = complex(12, 78)
self.tuple = [((1, 'a'), (2, 'b'))]
self.none = None
"""
| StarcoderdataPython |
3344701 | <reponame>creikey/DeepSpace2019
from wpilib.command import CommandGroup
from commands import updateodemetry, checkdiagnostics
class GlobalCommandGroup(CommandGroup):
"""Run at the start of all modes."""
def __init__(self):
super().__init__('Global Program')
self.addParallel(updateodemetry.UpdateOdemetry())
# self.addParallel(checkdiagnostics.CheckDiagnostics())
| StarcoderdataPython |
5192831 | <gh_stars>1-10
import copy
import logging
import numpy as np
import torch
import torch.utils.data as td
from sklearn.utils import shuffle
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms.functional as trnF
class ResultLoader(td.Dataset):
def __init__(self, data, labels, transform=None, loader=None, data_dict=None):
self.data = data
self.labels = labels
self.transform=transform
self.loader = loader
self.data_dict = data_dict
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, index):
img = self.data[index]
try:
img = Image.fromarray(img)
except:
try:
img = self.data_dict[img]
except:
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
return img, self.labels[index]
def make_ResultLoaders(data, labels, taskcla, transform=None, shuffle_idx=None, loader=None, data_dict=None):
if shuffle_idx is not None:
labels = shuffle_idx[labels]
sort_index = np.argsort(labels)
data = data[sort_index]
labels = np.array(labels)
labels = labels[sort_index]
loaders = []
start = 0
for t, ncla in taskcla:
start_idx = np.argmin(labels<start) # start data index
end_idx = np.argmax(labels>(start+ncla-1)) # end data index
if end_idx == 0:
end_idx = data.shape[0]
loaders.append(ResultLoader(data[start_idx:end_idx],
labels[start_idx:end_idx]%ncla,
transform=transform,
loader=loader,
data_dict=data_dict))
start += ncla
return loaders | StarcoderdataPython |
3290167 | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
from os import listdir
from os.path import isfile, join
import sys
import urllib.request
import tempfile
import shutil
import numpy as np
import open3d as o3d
import open3d.ml.tf as ml3d
from open3d.visualization.tensorboard_plugin import summary
import tensorflow as tf
from util import ensure_demo_data
BASE_LOGDIR = "demo_logs/tf/"
def semantic_segmentation(DEMO_DATA_DIR):
"""Example writing 3D TensorBoard summary data for semantic segmentation"""
SEMANTIC_KITTI_LABELS = {
0: 'unlabeled',
1: 'car',
2: 'bicycle',
3: 'motorcycle',
4: 'truck',
5: 'other-vehicle',
6: 'person',
7: 'bicyclist',
8: 'motorcyclist',
9: 'road',
10: 'parking',
11: 'sidewalk',
12: 'other-ground',
13: 'building',
14: 'fence',
15: 'vegetation',
16: 'trunk',
17: 'terrain',
18: 'pole',
19: 'traffic-sign'
}
labels_dir = join(DEMO_DATA_DIR, 'SemanticKITTI', 'labels')
label_files = tuple(
join(labels_dir, fn)
for fn in listdir(labels_dir)
if isfile(join(labels_dir, fn)))
points_dir = join(DEMO_DATA_DIR, 'SemanticKITTI', 'points')
pcd_files = tuple(
join(points_dir, fn)
for fn in listdir(points_dir)
if isfile(join(points_dir, fn)))
if len(pcd_files) == 0 or len(pcd_files) != len(label_files):
print("No point cloud data or labels found.")
sys.exit(1)
rng = np.random.default_rng()
logdir = join(BASE_LOGDIR, "semseg-example")
writer = tf.summary.create_file_writer(logdir)
for step in range(len(pcd_files)):
# We will pretend these are the inputs and outputs of a Semantic
# Segmentation model
# float, shape (N, 3), or (B, N, 3) for a batch
points = np.load(pcd_files[step])
# int, shape (N, 1), or (B, N, 1) for a batch
labels = np.load(label_files[step])
# We can also visualize noisy scores (car, road, vegetation)
scores = np.hstack((labels == 1, labels == 9, labels == 15))
scores = np.clip(scores + rng.normal(0., 0.05, size=scores.shape), 0.,
1.)
# and outputs of some pretend network layers. The first 3 dimensions
# can be visualized as RGB colors. Here we will use distances from the
# centroids of (all points, road, vegetation).
centroid_all = np.mean(points, axis=0)
d_all = np.linalg.norm(points - centroid_all, axis=1)
centroid_road = np.mean(points[np.squeeze(labels) == 9, :], axis=0)
d_road = np.linalg.norm(points - centroid_road, axis=1)
centroid_vegetation = np.mean(points[np.squeeze(labels) == 15, :],
axis=0)
d_vegetation = np.linalg.norm(points - centroid_vegetation, axis=1)
features = np.stack((d_all, d_road, d_vegetation), axis=1)
with writer.as_default():
# You can use TensorFlow tensors directly too.
# Prefix the data with "vertex_" to indicate that this is per vertex
# data.
summary.add_3d(
"semantic_segmentation",
{
"vertex_positions": points, # (N, 3)
"vertex_labels": labels, # (N, 1)
"vertex_scores": scores, # (N, 3)
"vertex_features": features # (N, 3)
},
step,
label_to_names=SEMANTIC_KITTI_LABELS,
logdir=logdir)
def object_detection(DEMO_DATA_DIR):
"""Example writing 3D TensorBoard summary data for object detection"""
dset = ml3d.datasets.KITTI(dataset_path=join(DEMO_DATA_DIR, 'KITTI'))
val_split = dset.get_split('validation')
name_to_labels = {
name: label for label, name in dset.get_label_to_names().items()
}
if len(val_split) == 0:
print("No point cloud data or bounding boxes found.")
sys.exit(1)
logdir = join(BASE_LOGDIR, "objdet-example")
writer = tf.summary.create_file_writer(logdir)
for step in range(len(val_split)): # one pointcloud per step
data = val_split.get_data(step)
with writer.as_default():
# We will pretend these are the inputs and outputs of an Object
# Detection model. You can use TensorFlow tensors directly too.
summary.add_3d(
"input_pointcloud",
{ # float, shape (N, 3), or (B, N, 3) for a batch
"vertex_positions": data['point'][:, :3],
# Extra features: float, shape (N, 1), or (B, N, 1) for a batch
# [should not be (N,)]
"vertex_intensities": data['point'][:, 3:]
},
step,
logdir=logdir)
# We need label_class to be int, not str
for bb in data['bounding_boxes']:
if not isinstance(bb.label_class, int):
bb.label_class = name_to_labels[bb.label_class]
# Bounding boxes (pretend model output): (Nbb, ) or (B, Nbb) for a batch
# Write bounding boxes in a separate call.
summary.add_3d("object_detection",
{"bboxes": data['bounding_boxes']},
step,
label_to_names=dset.get_label_to_names(),
logdir=logdir)
if __name__ == "__main__":
DEMO_DATA_DIR = ensure_demo_data()
print("Writing example summary for semantic segmentation...")
semantic_segmentation(DEMO_DATA_DIR)
print("Writing example summary for object detection...")
object_detection(DEMO_DATA_DIR)
| StarcoderdataPython |
8188739 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'export_params.ui'
#
# Created: Fri Jun 17 13:29:29 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_ExportParams(object):
def setupUi(self, ExportParams):
ExportParams.setObjectName("ExportParams")
ExportParams.resize(300, 200)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ExportParams.sizePolicy().hasHeightForWidth())
ExportParams.setSizePolicy(sizePolicy)
ExportParams.setMinimumSize(QtCore.QSize(300, 200))
self.exportParamsLayout = QtGui.QFormLayout(ExportParams)
self.exportParamsLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.exportParamsLayout.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.exportParamsLayout.setContentsMargins(5, 5, 5, 5)
self.exportParamsLayout.setObjectName("exportParamsLayout")
self.perPixelLabel = QtGui.QLabel(ExportParams)
self.perPixelLabel.setObjectName("perPixelLabel")
self.exportParamsLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.perPixelLabel)
self.perPixelEdit = QtGui.QLineEdit(ExportParams)
self.perPixelEdit.setObjectName("perPixelEdit")
self.exportParamsLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.perPixelEdit)
self.textureLabel = QtGui.QLabel(ExportParams)
self.textureLabel.setObjectName("textureLabel")
self.exportParamsLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.textureLabel)
self.textureEdit = QtGui.QLineEdit(ExportParams)
self.textureEdit.setObjectName("textureEdit")
self.exportParamsLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.textureEdit)
self.exportNameEdit = QtGui.QLineEdit(ExportParams)
self.exportNameEdit.setObjectName("exportNameEdit")
self.exportParamsLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.exportNameEdit)
self.exportAnimEdit = QtGui.QLineEdit(ExportParams)
self.exportAnimEdit.setObjectName("exportAnimEdit")
self.exportParamsLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.exportAnimEdit)
self.exportNameLabel = QtGui.QLabel(ExportParams)
self.exportNameLabel.setObjectName("exportNameLabel")
self.exportParamsLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.exportNameLabel)
self.exportAnimLabel = QtGui.QLabel(ExportParams)
self.exportAnimLabel.setObjectName("exportAnimLabel")
self.exportParamsLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.exportAnimLabel)
self.retranslateUi(ExportParams)
QtCore.QMetaObject.connectSlotsByName(ExportParams)
def retranslateUi(self, ExportParams):
ExportParams.setWindowTitle(QtGui.QApplication.translate("ExportParams", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.perPixelLabel.setText(QtGui.QApplication.translate("ExportParams", "Pixels", None, QtGui.QApplication.UnicodeUTF8))
self.perPixelEdit.setText(QtGui.QApplication.translate("ExportParams", "256", None, QtGui.QApplication.UnicodeUTF8))
self.textureLabel.setText(QtGui.QApplication.translate("ExportParams", "Texture", None, QtGui.QApplication.UnicodeUTF8))
self.exportNameLabel.setText(QtGui.QApplication.translate("ExportParams", "Export Name", None, QtGui.QApplication.UnicodeUTF8))
self.exportAnimLabel.setText(QtGui.QApplication.translate("ExportParams", "Export Anim", None, QtGui.QApplication.UnicodeUTF8))
| StarcoderdataPython |
1617912 |
#if else statement
gender = input("Gender? ")
#Taking input from user
gender = gender.lower()
#input taken from user is lowercased
if gender == "male":
#if statement condition here is that entered string should be "male"
print("Your cat is male")
elif gender == "female":
#elif is same as if I have added elif in detail in this file "if elif.py" check it
print("Your cat is female")
else:
# else will activate when someone enters anything other than male or female in input
print("Invalid input")
#this is a simple if else statement
age = int(input("Age of your cat? "))
if age < 5:
print("Your cat is young.")
else:
print("Your cat is adult.")
#output :
#1]
#Gender? male
#Your cat is male
#Age of your cat? 4
#Your cat is young.
#2]
#Gender? female
#Your cat is female
#Age of your cat? 6
#Your cat is adult.
#3]
#Gender? 0
#Invalid input
#Age of your cat? 0
#Your cat is young.
| StarcoderdataPython |
3580563 | from collections import OrderedDict
from djoser.compat import get_user_email, get_user_email_field_name
from django.conf import settings
from django.db import connection
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from djoser.serializers import UserCreateSerializer
from .models import *
from .utils import get_enum_value_row_else_null
def filter_excluded_fields(serializer_instance, serializer_model):
"""
Removes fields to be serialized by a serializer instance
that are in the serializer's model's SERIALIZER_EXCLUDED_FIELDS.
"""
for field in serializer_model.SERIALIZER_EXCLUDED_FIELDS:
if field in serializer_instance.fields:
serializer_instance.fields.pop(field)
def filter_excluded_fields(serializer_instance, serializer_model):
"""
Removes fields to be serialized by a serializer instance
that are in the serializer's model's SERIALIZER_EXCLUDED_FIELDS.
"""
for field in serializer_model.SERIALIZER_EXCLUDED_FIELDS:
if field in serializer_instance.fields:
serializer_instance.fields.pop(field)
class PolygonSerializer(geo_serializers.GeoFeatureModelSerializer):
"""
Class to serialize Polygon objects into GeoJSON format
plus extra properties in the properties section of
the GeoJSON
"""
AHJID = serializers.SerializerMethodField()
class Meta:
model = Polygon
geo_field = 'Polygon'
id_field = False
fields = ['AHJID', 'LandArea', 'GEOID', 'InternalPLatitude', 'InternalPLongitude']
def get_AHJID(self, instance):
return self.context.get('AHJID', '')
class OrangeButtonSerializer(serializers.Field):
"""
Custom serializer to add the Orange Button primitives to each field.
Below is example JSON:
.. code-block:: json
{
"<field_name>": {
"Value": "<value>"
}
}
Currently, only Orange Button's **Value** primitive is added.
"""
def get_attribute(self, instance):
"""
Overridden method for correctly adding
Orange Button primitives even when the
field's value is null.
Otherwise, this class' to_representation
will not be called by the calling serializer.
"""
attribute = super().get_attribute(instance)
if attribute is None:
return {'Value': None}
else:
return attribute
def to_representation(self, value):
if type(value) is dict and 'Value' in value and value['Value'] is None:
return value
ob_obj = {'Value': value}
return ob_obj
class EnumModelSerializer(serializers.Serializer):
Value = serializers.CharField()
def get_attribute(self, instance):
attribute = super().get_attribute(instance)
if attribute is None:
return {'Value': ''}
else:
return attribute
def to_representation(self, value):
if type(value) is dict and 'Value' in value and value['Value'] == '':
return value
return super().to_representation(value)
class FeeStructureSerializer(serializers.Serializer):
"""
Serializes Orange Button FeeStructure object to OrderedDict
"""
FeeStructurePK = OrangeButtonSerializer()
FeeStructureID = OrangeButtonSerializer()
FeeStructureName = OrangeButtonSerializer()
FeeStructureType = EnumModelSerializer()
Description = OrangeButtonSerializer()
FeeStructureStatus = OrangeButtonSerializer()
def to_representation(self, feestructure):
"""
Returns an OrderedDict representing an FeeStructure object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, FeeStructure)
return super().to_representation(feestructure)
class LocationSerializer(serializers.Serializer):
"""
Serializes Orange Button Location object to OrderedDict
"""
LocationID = OrangeButtonSerializer()
Altitude = OrangeButtonSerializer()
Elevation = OrangeButtonSerializer()
Latitude = OrangeButtonSerializer()
Longitude = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
LocationDeterminationMethod = EnumModelSerializer()
LocationType = EnumModelSerializer()
def to_representation(self, location):
"""
Returns an OrderedDict representing an Location object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, Location)
return super().to_representation(location)
class AddressSerializer(serializers.Serializer):
"""
Serializes Orange Button Address object to OrderedDict
"""
AddressID = OrangeButtonSerializer()
AddrLine1 = OrangeButtonSerializer()
AddrLine2 = OrangeButtonSerializer()
AddrLine3 = OrangeButtonSerializer()
City = OrangeButtonSerializer()
Country = OrangeButtonSerializer()
County = OrangeButtonSerializer()
StateProvince = OrangeButtonSerializer()
ZipPostalCode = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
AddressType = EnumModelSerializer()
Location = LocationSerializer(source='LocationID')
def to_representation(self, address):
"""
Returns an OrderedDict representing an Address object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, Address)
return super().to_representation(address)
class ContactSerializer(serializers.Serializer):
"""
Serializes Orange Button Contact object to OrderedDict
"""
ContactID = OrangeButtonSerializer()
FirstName = OrangeButtonSerializer()
MiddleName = OrangeButtonSerializer()
LastName = OrangeButtonSerializer()
HomePhone = OrangeButtonSerializer()
MobilePhone = OrangeButtonSerializer()
WorkPhone = OrangeButtonSerializer()
ContactType = EnumModelSerializer()
ContactTimezone = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
Email = OrangeButtonSerializer()
Title = OrangeButtonSerializer()
URL = OrangeButtonSerializer()
PreferredContactMethod = EnumModelSerializer()
Address = AddressSerializer(source='AddressID')
def to_representation(self, contact):
"""
Returns an OrderedDict representing an Contact object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, Contact)
return super().to_representation(contact)
class RecursiveField(serializers.Serializer):
"""
Serializer that calls the caller serializer on the value
of the field that was passed to it.
Used for serializing Comments
"""
def to_representation(self, value):
"""
Calls the caller serializer that called this serializer
"""
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class APITokenSerializer(serializers.Serializer):
"""
Serializes APIToken to OrderedDict
"""
auth_token = serializers.CharField(source='key')
is_active = serializers.BooleanField()
expires = serializers.DateTimeField()
class UserSerializer(serializers.Serializer):
"""
Serializes User to OrderedDict
"""
UserID = serializers.IntegerField(read_only=True)
ContactID = ContactSerializer()
Username = serializers.CharField()
Email = serializers.CharField()
PersonalBio = serializers.CharField()
CompanyAffiliation = serializers.CharField()
Photo = serializers.CharField()
NumAPICalls = serializers.IntegerField()
SignUpDate = serializers.DateField()
AcceptedEdits = serializers.IntegerField(source='get_num_accepted_edits')
SubmittedEdits = serializers.IntegerField(source='get_num_submitted_edits')
MaintainedAHJs = serializers.ListField(source='get_maintained_ahjs')
APIToken = APITokenSerializer(source='get_API_token')
is_superuser = serializers.BooleanField()
def to_representation(self, user):
"""
Returns an OrderedDict representing an User object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users. Unlike other
serializers, the default is True to passively prevent
sensitive data being serialized when it's not needed.
"""
if self.context.get('is_public_view', True):
filter_excluded_fields(self, User)
return super().to_representation(user)
class UserCreateSerializer(UserCreateSerializer):
"""
Serializes User to Ordered Dict.
Used when a new user is created.
"""
FirstName = serializers.CharField()
MiddleName = serializers.CharField(allow_blank=True)
LastName = serializers.CharField()
Title = serializers.CharField(allow_blank=True)
WorkPhone = serializers.CharField(allow_blank=True)
PreferredContactMethod = serializers.CharField(allow_blank=True)
ContactTimezone = serializers.CharField(allow_blank=True)
def validate(self, attrs):
contact_fields = {field.name for field in Contact._meta.get_fields()}
pcm = get_enum_value_row_else_null('PreferredContactMethod', attrs['PreferredContactMethod'])
if pcm is None:
attrs.pop('PreferredContactMethod')
else:
attrs['PreferredContactMethod'] = pcm
user_dict = OrderedDict({k: v for k, v in attrs.items() if k not in contact_fields})
super().validate(user_dict)
return attrs
def to_representation(self, user):
return UserSerializer(user).data
class Meta(UserCreateSerializer.Meta):
model = User
fields = ('UserID',
'ContactID',
'Username',
'password',
'Email',
'is_staff',
'is_active',
'SignUpDate',
'PersonalBio',
'URL',
'CompanyAffiliation',
'Photo',
'SecurityLevel',
'NumAPICalls',
'FirstName',
'MiddleName',
'LastName',
'Title',
'WorkPhone',
'PreferredContactMethod',
'ContactTimezone')
class CommentSerializer(serializers.Serializer):
"""
Serializes Comment to OrderedDict.
"""
CommentID = OrangeButtonSerializer()
User = UserSerializer(source='UserID')
CommentText = OrangeButtonSerializer()
Date = OrangeButtonSerializer()
Replies = RecursiveField(source='get_replies', many=True)
class DocumentSubmissionMethodUseSerializer(serializers.Serializer):
"""
Serializes Orange Button DocumentSubmissionMethod object value to OrderedDict
"""
UseID = serializers.IntegerField()
Value = serializers.CharField(source='get_value')
def to_representation(self, dsmu):
"""
Returns an OrderedDict representing an DocumentSubmissionMethod object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, AHJDocumentSubmissionMethodUse)
return super().to_representation(dsmu)
class PermitIssueMethodUseSerializer(serializers.Serializer):
"""
Serializes Orange Button PermitIssueMethod object value to OrderedDict
"""
UseID = serializers.IntegerField()
Value = serializers.CharField(source='get_value')
def to_representation(self, pimu):
"""
Returns an OrderedDict representing an PermitIssueMethod object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, AHJPermitIssueMethodUse)
return super().to_representation(pimu)
class AHJInspectionSerializer(serializers.Serializer):
"""
Serializes Orange Button AHJInspection object
"""
InspectionID = OrangeButtonSerializer()
InspectionType = EnumModelSerializer()
AHJInspectionName = OrangeButtonSerializer()
AHJInspectionNotes = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
FileFolderURL = OrangeButtonSerializer()
TechnicianRequired = OrangeButtonSerializer()
InspectionStatus = OrangeButtonSerializer()
Contacts = ContactSerializer(source='get_contacts', many=True)
UnconfirmedContacts = ContactSerializer(source='get_uncon_con', many=True)
def to_representation(self, inspection):
"""
Returns an OrderedDict representing an AHJInspection object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, AHJInspection)
return super().to_representation(inspection)
class EngineeringReviewRequirementSerializer(serializers.Serializer):
"""
Serializes Orange Button EngineeringReviewRequirement object to OrderedDict
"""
EngineeringReviewRequirementID = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
EngineeringReviewType = EnumModelSerializer()
RequirementLevel = EnumModelSerializer()
RequirementNotes = OrangeButtonSerializer()
StampType = EnumModelSerializer()
EngineeringReviewRequirementStatus = OrangeButtonSerializer()
def to_representation(self, err):
"""
Returns an OrderedDict representing an EngineeringReviewRequirement object to OrderedDict
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, EngineeringReviewRequirement)
return super().to_representation(err)
class AHJSerializer(serializers.Serializer):
"""
Serializes Orange Button AHJ object
"""
AHJPK = OrangeButtonSerializer()
AHJID = OrangeButtonSerializer()
AHJCode = OrangeButtonSerializer()
AHJLevelCode = EnumModelSerializer()
AHJName = OrangeButtonSerializer()
Description = OrangeButtonSerializer()
DocumentSubmissionMethodNotes = OrangeButtonSerializer()
PermitIssueMethodNotes = OrangeButtonSerializer()
EstimatedTurnaroundDays = OrangeButtonSerializer()
FileFolderURL = OrangeButtonSerializer()
URL = OrangeButtonSerializer()
BuildingCode = EnumModelSerializer()
BuildingCodeNotes = OrangeButtonSerializer()
ElectricCode = EnumModelSerializer()
ElectricCodeNotes = OrangeButtonSerializer()
FireCode = EnumModelSerializer()
FireCodeNotes = OrangeButtonSerializer()
ResidentialCode = EnumModelSerializer()
ResidentialCodeNotes = OrangeButtonSerializer()
WindCode = EnumModelSerializer()
WindCodeNotes = OrangeButtonSerializer()
Address = AddressSerializer(source='AddressID')
Contacts = ContactSerializer(source='get_contacts', many=True)
UnconfirmedContacts = ContactSerializer(source='get_unconfirmed', many=True)
UnconfirmedInspections = AHJInspectionSerializer(source='get_unconfirmed_inspections', many=True)
Polygon = serializers.SerializerMethodField()
Comments = CommentSerializer(source='get_comments', many=True)
AHJInspections = AHJInspectionSerializer(source='get_inspections', many=True)
DocumentSubmissionMethods = DocumentSubmissionMethodUseSerializer(source='get_document_submission_methods', many=True)
UnconfirmedDocumentSubmissionMethods = DocumentSubmissionMethodUseSerializer(source='get_uncon_dsm', many=True)
PermitIssueMethods = PermitIssueMethodUseSerializer(source='get_permit_submission_methods', many=True)
UnconfirmedPermitIssueMethods = PermitIssueMethodUseSerializer(source='get_uncon_pim', many=True)
EngineeringReviewRequirements = EngineeringReviewRequirementSerializer(source='get_err', many=True)
UnconfirmedEngineeringReviewRequirements = EngineeringReviewRequirementSerializer(source='get_uncon_err', many=True)
FeeStructures = FeeStructureSerializer(source='get_fee_structures', many=True)
UnconfirmedFeeStructures = FeeStructureSerializer(source='get_uncon_fs', many=True)
def to_representation(self, ahj):
"""
Returns an OrderedDict representing an AHJ object
Note not every AHJ has every child object.
If 'is_public_view' is True, will not serialize fields
that are not meant for public api users.
"""
if self.context.get('is_public_view', False):
filter_excluded_fields(self, AHJ)
return super().to_representation(ahj)
def get_Polygon(self, instance):
"""
Helper method to serialize the polygon associated with an AHJ
"""
if instance.PolygonID is None:
return None
return PolygonSerializer(instance.PolygonID, context={'AHJID': instance.AHJID}).data
class EditSerializer(serializers.Serializer):
"""
Serializes edits for the webpage AHJPage.
"""
EditID = serializers.IntegerField(read_only=True)
ChangedBy = UserSerializer()
ApprovedBy = UserSerializer()
AHJPK = serializers.IntegerField(source='AHJPK.AHJPK')
SourceTable = serializers.CharField()
SourceColumn = serializers.CharField()
SourceRow = serializers.IntegerField()
ReviewStatus = serializers.CharField()
OldValue = serializers.CharField(read_only=True)
NewValue = serializers.CharField()
DateRequested = serializers.DateTimeField(read_only=True)
DateEffective = serializers.DateTimeField(read_only=True)
EditType = serializers.CharField()
DataSourceComment = serializers.CharField()
def to_representation(self, edit):
if self.context.get('drop_users', False):
"""
This gives the option for callers of the serializer to only serialize the username of the user.
"""
self.fields['ChangedBy'] = serializers.CharField(source='ChangedBy.Username')
if edit.ApprovedBy is None:
self.fields['ApprovedBy'] = UserSerializer()
else:
self.fields['ApprovedBy'] = serializers.CharField(source='ApprovedBy.Username')
return super().to_representation(edit)
class WebpageTokenSerializer(serializers.Serializer):
"""
Serializes webpage token and user info when a user logs
into the webpage.
"""
auth_token = serializers.CharField(source='key')
User = UserSerializer(source='get_user')
# Serializer used in Djoser's password reset endpoint.
class UserFunctionsMixin:
def get_user(self):
try:
user = User._default_manager.get(
**{self.email_field: self.data.get(self.email_field, "")},
)
if user.has_usable_password():
return user
except User.DoesNotExist:
pass
if (
settings.PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND
or settings.USERNAME_RESET_SHOW_EMAIL_NOT_FOUND
):
self.fail("email_not_found")
# Serializer used in Djoser's password reset endpoint
class SendEmailResetSerializer(serializers.Serializer, UserFunctionsMixin):
default_error_messages = {
"email_not_found": "User with given email does not exist."
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.email_field = get_user_email_field_name(User)
self.fields[self.email_field] = serializers.EmailField()
| StarcoderdataPython |
5079090 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
class Token(object):
def __init__(self, linenu, colnu, value):
super(Token, self).__init__()
self.__linenu = linenu
self.__colnu = colnu
self.__value = value
@property
def linenu(self):
return self.__linenu
@property
def colnu(self):
return self.__colnu
@property
def value(self):
return self.__value
class IntToken(Token):
pattern = re.compile(r"\s*([+-]?(?:[1-9][0-9]*|[0-9]))(?:\s+|\)|$)")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(IntToken, self).__init__(linenu, colnu, value)
class FloatToken(Token):
pattern = re.compile(r"\s*([+-]?[1-9][0-9]*\.[0-9]*)(?:\s+|\)|$)")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(FloatToken, self).__init__(linenu, colnu, value)
class BoolToken(Token):
pattern = re.compile(r"\s*(#[tf])\b")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(BoolToken, self).__init__(linenu, colnu, value)
class CharToken(Token):
pattern = re.compile(r"\s*(#\\.+?)\b")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(CharToken, self).__init__(linenu, colnu, value)
class StringToken(Token):
pattern = re.compile(r'\s*"((?:\\"|\n|\\|[^"])*)"')
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end())
def __init__(self, linenu, colnu, value):
super(StringToken, self).__init__(linenu, colnu, value)
class SymbolToken(Token):
pattern = re.compile(r"\s*'([a-zA-Z~!?@#$%^&*\-+=_./\<>][a-zA-Z~!?@#$%^&*\-+=_./\<>0-9]*)(?:\s+|\)|$)")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(SymbolToken, self).__init__(linenu, colnu, value)
class ListLiteralToken(Token):
pattern = re.compile(r"\s*('\()")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
token = ["'("]
l_brace_count = 1
start_pos = m.start(1)
pos = m.end() - 1
while l_brace_count > 0:
pos += 1
if pos == len(line):
raise SyntaxError("'( is lack of ).")
m = line[++pos]
if m == "(":
l_brace_count += 1
elif m == ")":
l_brace_count -= 1
token.append(m)
return (cls(linenu, start_pos, "".join(token)), pos + 1)
def __init__(self, linenu, colnu, value):
super(ListLiteralToken, self).__init__(linenu, colnu, value)
class IdentifierToken(Token):
pattern = re.compile(r"\s*([a-zA-Z~!?@#$%^&*\-+=_./\<>][a-zA-Z~!?@#$%^&*\-+=_./\<>0-9]*)(?:\s+|\)|$)")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(IdentifierToken, self).__init__(linenu, colnu, value)
class LeftBraceToken(Token):
pattern = re.compile(r"\s*(\()")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(LeftBraceToken, self).__init__(linenu, colnu, value)
class RightBraceToken(Token):
pattern = re.compile(r"\s*(\))")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(RightBraceToken, self).__init__(linenu, colnu, value)
class DottedPairToken(Token):
pattern = re.compile(r"\s*( . )\S+")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(DottedPairToken, self).__init__(linenu, colnu, value)
class SingleCommentToken(Token):
pattern = re.compile(r"\s*;(.*)")
@classmethod
def match(cls, linenu, pos, line):
m = cls.pattern.match(line, pos)
if m:
return (cls(linenu, m.start(1), m.group(1)), m.end(1))
def __init__(self, linenu, colnu, value):
super(SingleCommentToken, self).__init__(linenu, colnu, value)
| StarcoderdataPython |
4810367 | <gh_stars>0
import logging
from datetime import datetime
from airflow import DAG
from airflow.hooks import S3Hook
from airflow.operators import BaseOperator
from utils.airflow import datetime_to_quarter
from datasets import job_postings
from algorithms.corpus_creators.basic import JobCategoryCorpusCreator
# some DAG args, please tweak for sanity
default_args = {
'depends_on_past': False,
'start_date': datetime(2011, 1, 1),
}
dag = DAG(
'job_labeler',
schedule_interval=None,
default_args=default_args
)
class JobLabelOperator(BaseOperator):
def execute(self, context):
s3_conn = S3Hook().get_conn()
quarter = datetime_to_quarter(context['execution_date'])
job_label_filename = 'tmp/job_label_train_'+quarter+'.csv'
with open(job_label_filename, 'w') as outfile:
writer = csv.writer(outfile, delimiter=',')
job_postings_generator = job_postings(s3_conn, quarter)
corpus_generator = JobCategoryCorpusCreator().label_corpora(job_postings_generator)
for label in corpus_generator:
writer.writerow([label])
logging.info('Done labeling job categories to %s', job_label_filename)
JobLabelOperator(task_id='job_labeling', dag=dag)
| StarcoderdataPython |
6613564 | <filename>1_beginner/chapter2/solutions/print_data_types.py
# Print Data Types
# Come up with 3 examples each of
# floating numbers, integers, and strings and print them.
# floats
print(1.56)
print(32.0)
print(-35.25)
# integers
print(25)
print(0)
print(-1)
# strings
print("Tahiti, it's a magical place")
print("May the Force be with you")
print("Hey guys")
| StarcoderdataPython |
759 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
def proportion_of_education():
# your code goes here
# YOUR CODE HERE
df_edu = df.EDUC1
edu_list = [1, 2, 3, 4]
zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list)
for edu in edu_list:
zero_df[edu][df_edu==edu]=1
#zero_df
sum_ret = zero_df.sum(axis=0)
name_l = ["less than high school", "high school", "more than high school but not college", "college"]
rat = sum_ret.values/sum(sum_ret.values)
dic = dict()
for i in range(4):
dic[name_l[i]] = rat[i]
return dic
raise NotImplementedError()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
def average_influenza_doses():
# YOUR CODE HERE
#是否喂养母乳
fed_breastmilk = list(df.groupby(by='CBF_01'))
be_fed_breastmilk = fed_breastmilk[0][1]
not_fed_breastmilk = fed_breastmilk[1][1]
#喂养母乳的influenza数目
be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU
num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean()
#未喂养母乳的influenza数目
not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU
num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean()
return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza
raise NotImplementedError()
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
def chickenpox_by_sex():
# YOUR CODE HERE
#是否感染Varicella
cpox = df.HAD_CPOX
#cpox.value_counts()
cpox_group = list(df.groupby(by='HAD_CPOX'))
have_cpox = cpox_group[0][1]
not_have_cpox = cpox_group[1][1]
#男女分开
have_cpox_group = list(have_cpox.groupby(by='SEX'))
not_have_cpox_group = list(not_have_cpox.groupby(by='SEX'))
have_cpox_boy = have_cpox_group[0][1]
have_cpox_girl = have_cpox_group[1][1]
not_have_cpox_boy = not_have_cpox_group[0][1]
not_have_cpox_girl = not_have_cpox_group[1][1]
#接种感染
#have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)]
have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)]
num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC']
have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)]
num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC']
#接种未感染
not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)]
num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC']
not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)]
num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC']
#计算比例
ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected
ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected
dic = {}
dic['male'] = ratio_boy
dic['female'] = ratio_girl
return dic
raise NotImplementedError()
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
def corr_chickenpox():
cpox = df[(df.P_NUMVRC).notnull()]
have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)]
df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX,
"num_chickenpox_vaccine_column":have_cpox.P_NUMVRC})
corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"])
return corr
raise NotImplementedError()
| StarcoderdataPython |
1922401 | <gh_stars>0
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import nn
from ppocr.modeling.heads.rec_ctc_head import get_para_bias_attr
from ppocr.modeling.backbones.rec_svtrnet import Block, ConvBNLayer, trunc_normal_, zeros_, ones_
class Im2Seq(nn.Layer):
def __init__(self, in_channels, **kwargs):
super().__init__()
self.out_channels = in_channels
def forward(self, x):
B, C, H, W = x.shape
assert H == 1
x = x.squeeze(axis=2)
x = x.transpose([0, 2, 1]) # (NTC)(batch, width, channels)
return x
class EncoderWithRNN(nn.Layer):
def __init__(self, in_channels, hidden_size):
super(EncoderWithRNN, self).__init__()
self.out_channels = hidden_size * 2
self.lstm = nn.LSTM(
in_channels, hidden_size, direction='bidirectional', num_layers=2)
def forward(self, x):
x, _ = self.lstm(x)
return x
class EncoderWithFC(nn.Layer):
def __init__(self, in_channels, hidden_size):
super(EncoderWithFC, self).__init__()
self.out_channels = hidden_size
weight_attr, bias_attr = get_para_bias_attr(
l2_decay=0.00001, k=in_channels)
self.fc = nn.Linear(
in_channels,
hidden_size,
weight_attr=weight_attr,
bias_attr=bias_attr,
name='reduce_encoder_fea')
def forward(self, x):
x = self.fc(x)
return x
class EncoderWithSVTR(nn.Layer):
def __init__(
self,
in_channels,
dims=64, # XS
depth=2,
hidden_dims=120,
use_guide=False,
num_heads=8,
qkv_bias=True,
mlp_ratio=2.0,
drop_rate=0.1,
attn_drop_rate=0.1,
drop_path=0.,
qk_scale=None):
super(EncoderWithSVTR, self).__init__()
self.depth = depth
self.use_guide = use_guide
self.conv1 = ConvBNLayer(
in_channels, in_channels // 8, padding=1, act=nn.Swish)
self.conv2 = ConvBNLayer(
in_channels // 8, hidden_dims, kernel_size=1, act=nn.Swish)
self.svtr_block = nn.LayerList([
Block(
dim=hidden_dims,
num_heads=num_heads,
mixer='Global',
HW=None,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
act_layer=nn.Swish,
attn_drop=attn_drop_rate,
drop_path=drop_path,
norm_layer='nn.LayerNorm',
epsilon=1e-05,
prenorm=False) for i in range(depth)
])
self.norm = nn.LayerNorm(hidden_dims, epsilon=1e-6)
self.conv3 = ConvBNLayer(
hidden_dims, in_channels, kernel_size=1, act=nn.Swish)
# last conv-nxn, the input is concat of input tensor and conv3 output tensor
self.conv4 = ConvBNLayer(
2 * in_channels, in_channels // 8, padding=1, act=nn.Swish)
self.conv1x1 = ConvBNLayer(
in_channels // 8, dims, kernel_size=1, act=nn.Swish)
self.out_channels = dims
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
zeros_(m.bias)
ones_(m.weight)
def forward(self, x):
# for use guide
if self.use_guide:
z = x.clone()
z.stop_gradient = True
else:
z = x
# for short cut
h = z
# reduce dim
z = self.conv1(z)
z = self.conv2(z)
# SVTR global block
B, C, H, W = z.shape
z = z.flatten(2).transpose([0, 2, 1])
for blk in self.svtr_block:
z = blk(z)
z = self.norm(z)
# last stage
z = z.reshape([0, H, W, C]).transpose([0, 3, 1, 2])
z = self.conv3(z)
z = paddle.concat((h, z), axis=1)
z = self.conv1x1(self.conv4(z))
return z
class SequenceEncoder(nn.Layer):
def __init__(self, in_channels, encoder_type, hidden_size=48, **kwargs):
super(SequenceEncoder, self).__init__()
self.encoder_reshape = Im2Seq(in_channels)
self.out_channels = self.encoder_reshape.out_channels
self.encoder_type = encoder_type
if encoder_type == 'reshape':
self.only_reshape = True
else:
support_encoder_dict = {
'reshape': Im2Seq,
'fc': EncoderWithFC,
'rnn': EncoderWithRNN,
'svtr': EncoderWithSVTR
}
assert encoder_type in support_encoder_dict, '{} must in {}'.format(
encoder_type, support_encoder_dict.keys())
if encoder_type == "svtr":
self.encoder = support_encoder_dict[encoder_type](
self.encoder_reshape.out_channels, **kwargs)
else:
self.encoder = support_encoder_dict[encoder_type](
self.encoder_reshape.out_channels, hidden_size)
self.out_channels = self.encoder.out_channels
self.only_reshape = False
def forward(self, x):
if self.encoder_type != 'svtr':
x = self.encoder_reshape(x)
if not self.only_reshape:
x = self.encoder(x)
return x
else:
x = self.encoder(x)
x = self.encoder_reshape(x)
return x
| StarcoderdataPython |
6463222 | from Utils.Array import input_array
"""
In this question statement : The nearly sorted array thing, is very similar to one of the questions we did in heap
Problem Name : Sort nearly sorted array or Sort a K Sorted Array check in notion as well
One thing to remember is the difference bw [nearly_sorted / k_sorted] and sorted_rotated_array << Note
"""
"""
^^^^^^ Important Question ^^^^^^
We will modify the standard binary search implementation a little bit
"""
# nearly_sorted vs sorted_rotated_array
def find_key_in_nearly_sorted_array(A, key) -> int:
n = len(A)
left = 0
right = n - 1
while left <= right:
mid = left + (right - left) // 2
prev = (mid + n - 1) % n
next = (mid + 1) % n
# comparing with mid, mid-1 and mid+1
if A[mid] == key:
return mid
elif A[prev] == key:
return prev
elif A[next] == key:
return next
# mid is not in any of the near 3 indexes
elif A[mid] < key: # key present in the right half
left = mid + 2
else: # A[mid] > key: # key present in the right half
right = mid - 2
return -1
if __name__ == '__main__':
array = input_array()
target = int(input())
index = find_key_in_nearly_sorted_array(array, target)
print("index ", index)
"""
10 30 40 20 50 80 70
40
2 << Output: Output is index of 40 in given array
10 30 40 20 50 80 70
90
-1 << Output -1 is returned to indicate element is not present
10 30 40 20 50 80 70
10
0 << Output
10 30 40 20 50 80 70
70
0 << Output
10 30 40 20 50 80 70
20
3 << Output
"""
| StarcoderdataPython |
3469059 | from django.db import models
class PlaceType(models.Model):
name = models.CharField(
max_length=100,
unique=True,
)
class Place(models.Model):
type = models.ForeignField(
PlaceType,
related_name='places',
)
name = models.CharField(
max_length=100,
)
class PlaceAltName(models.Model):
place = models.ForeignField(
Place,
related_name='place_alt_names'
)
alt_name = models.CharField(
max_length=100,
)
class Meta:
unique_together = (('place', 'alt_name',),)
# Place Type: Country -----------------------------------
class Country(models.Model):
place = models.OneToOneField(
Place,
related_name='country',
)
tld = models.CharField(
max_length=100,
)
cca2 = models.CharField(
max_length=2,
)
cca3 = models.CharField(
max_length=3,
)
ccn3 = models.CharField(
max_length=3,
)
world_region = models.ForeignField(
Place,
related_name='countries_world_region',
)
world_sub_region = models.ForeignField(
Place,
related_name='countries_world_sub_region'
)
class CountryCallingCode(models.Model):
country = models.ForeignField(
Country,
related_name='country_calling_codes'
)
calling_code = models.CharField(
max_length=100,
)
class CountryCurrency(models.Model):
country = models.ForeignField(
Country,
related_name='country_currencies'
)
currency = models.CharField(
max_length=100,
) | StarcoderdataPython |
5184641 | import torch
from torch import nn
print (torch.hub.list("msk-mind/luna-ml:main"))
model = torch.hub.load("msk-mind/luna-ml:main", "logistic_regression_model", weight_tag="main:logistic_regression_ones.pth")
print (model.lin1.weight)
model = torch.hub.load("msk-mind/luna-ml:main", "logistic_regression_model", weight_tag="main:logistic_regression_random.pth")
print (model.lin1.weight)
model = torch.hub.load("msk-mind/luna-ml:main", "tissue_tile_net_model", weight_tag="main:tissue_net_2021-01-19_21.05.24-e17.pth",
activation=nn.Sigmoid(), num_classes=5)
print (model)
transform = torch.hub.load("msk-mind/luna-ml:main", "tissue_tile_net_transform")
print (transform) | StarcoderdataPython |
192496 | from app import database
from app.database import Database
import datetime
from configparser import ConfigParser
config = ConfigParser()
def rep():
#Get delivery time from config
config.read("app/config.ini")
del_time = datetime.timedelta(days=int(config.get("Settings", "del_time")))
today = datetime.datetime.now().date()
ordered_stock = Database.get("Stock")
stock_out = []
for stock in ordered_stock:
if stock[3] == None and stock[3] is not None:
#Retrieve stock items that are out of stock
stock_out.append(stock)
for stock in stock_out:
#If the delivery time has passed set the stock level as healthy
print(stock[4])
if stock[4] - today >= del_time:
Database.update("Stock", "stock_healthy", 1, "stock_healthy", "Null")
rep()
| StarcoderdataPython |
1709105 | import collections
from unittest.mock import MagicMock
import bson
import pytest
from ihashmap.cache import Cache
from ihashmap.index import Index, IndexContainer
@pytest.fixture
def fake_cache():
return {Index.INDEX_CACHE_NAME: {}}
@pytest.fixture
def fake_get(fake_cache):
def _get(self, name, key, default=None):
return fake_cache[name].get(key, default)
return _get
@pytest.fixture
def fake_set(fake_cache):
def _set(self, name, key, value):
fake_cache.setdefault(name, {})[key] = value
return value
return _set
@pytest.fixture
def fake_update(fake_cache):
return fake_cache.update
@pytest.fixture
def fake_delete(fake_cache):
def _delete(self, name, key):
del fake_cache[name][key]
return _delete
def test_Cache_simple(fake_cache, fake_get, fake_set, fake_update, fake_delete):
Cache.register_get_method(fake_get)
Cache.register_set_method(fake_set)
Cache.register_update_method(fake_update)
Cache.register_delete_method(fake_delete)
class IndexByModel(Index):
keys = ["_id", "model"]
cache_name = "test"
cache = Cache()
entity = collections.UserDict({"_id": "1234", "model": 1, "release": "1.0"})
cache.set("test", "1234", entity)
assert cache.get("test", "1234") == entity
assert cache.all("test") == [
entity,
]
assert fake_cache == {
"test": {"1234": entity},
Index.INDEX_CACHE_NAME: {"test:_id": ["1234"], "test:_id_model": ["1234:1"]},
}
assert cache.search("test", {"model": 1}) == [
entity,
]
class Cache2(Cache):
pass
mocked_func = MagicMock()
Cache2.PIPELINE.get.before()(mocked_func)
cache2 = Cache2()
cache = Cache()
cache2.get("test", "test")
assert mocked_func.called
assert mocked_func.call_count == 1
cache.get("test", "1234")
assert mocked_func.call_count == 1
entity2 = collections.UserDict({"_id": "3456", "model": 2})
cache.set("test", "3456", entity2)
assert cache.search("test", {"model": lambda model: int(model) in [2, 3]}) == [
entity2,
]
def test_IndexContainer_append():
container = IndexContainer()
container.append(5)
assert container == [5]
container.append(4)
assert container == [4, 5]
container = IndexContainer()
id1 = bson.ObjectId()
id2 = bson.ObjectId()
container.append(id2)
assert container == [id2]
container.append(id1)
assert container == [id1, id2]
| StarcoderdataPython |
8109629 | #!/usr/bin/python
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader(['./templates'])
vrfs = {'vrf_name': 'blue',
'rd_number': '100:1',
'ipv4_enabled': True,
'ipv6_enabled': True}
def main():
template_file = 'ex3.j2'
template = env.get_template(template_file)
output = template.render(**vrfs)
print(output)
main()
| StarcoderdataPython |
6576248 | <filename>mseg/dataset_apis/SunrgbdImageLevelDataset.py<gh_stars>1-10
#!/usr/bin/env python3
import argparse
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pdb
from typing import List, Optional
from mseg.utils.names_utils import (
get_dataloader_id_to_classname_map,
get_classname_to_dataloaderid_map
)
from mseg.utils.mask_utils import (
form_mask_triple_embedded_classnames,
save_binary_mask_double,
get_present_classes_in_img
)
"""
Note: We do not use this dataset API at training or inference time.
It is designed purely for generating the re-labeled masks of the
MSeg dataset (found in ground truth label maps) on disk, prior to
training/inference.
"""
class SunrgbdImageLevelDataset:
def __init__(self, dataset_dir):
"""
"""
self.id_to_classname_map = get_dataloader_id_to_classname_map(dataset_name='sunrgbd-37')
self.classname_to_id_map = get_classname_to_dataloaderid_map(dataset_name='sunrgbd-37')
self.img_dir = f'{dataset_dir}/image'
self.label_dir = f'{dataset_dir}/semseg-label37'
def get_class_masks(
self,
required_class_names: List[str],
highlight_classname: str,
condition,
folder_prefix: str):
"""
"""
for split in ['train', 'test']:
rgb_fpaths = glob.glob(f'{self.img_dir}/{split}/*.jpg')
num_split_imgs = len(rgb_fpaths)
for i, rgb_fpath in enumerate(rgb_fpaths):
print(f'On image {i}/{num_split_imgs-1}')
fname_stem = Path(rgb_fpath).stem
rgb_img, label_img = self.get_img_pair(fname_stem, split)
present_classnames = get_present_classes_in_img(label_img, self.id_to_classname_map)
if not all([req_name in present_classnames for req_name in required_class_names]):
continue
fname_stem = Path(rgb_fpath).stem
for class_idx in np.unique(label_img):
instance_classname = self.id_to_classname_map[class_idx]
if instance_classname != highlight_classname: # not in required_class_names:
continue
label_mask = (label_img == class_idx).astype(np.uint8)
save_fpath = f'temp_files/{folder_prefix}_{split}/{fname_stem}_{class_idx}.jpg'
save_binary_mask_double(rgb_img, label_mask, save_fpath, save_to_disk=True)
def get_img_pair(self, fname_stem, split):
"""
"""
if split == 'val':
# SUNRGB has no val, only test
split = 'test'
fname_stem = fname_stem.replace('img-', '')
rgb_fpath = f'{self.img_dir}/{split}/img-{fname_stem}.jpg'
extended_fname_stem = fname_stem.zfill(8)
label_fpath = f'{self.label_dir}/{split}/{extended_fname_stem}.png'
label_img = imageio.imread(label_fpath)
rgb_img = imageio.imread(rgb_fpath)
return rgb_img, label_img
def get_segment_mask(
self,
seq_id: str,
query_segmentid: int,
fname_stem: str,
split: str
) -> Optional[np.ndarray]:
"""
seq_id is only provided so that all other datasets can share a common API.
"""
if split == 'val':
# SUNRGB has no val, only test
split = 'test'
rgb_img, label_img = self.get_img_pair(fname_stem, split)
for class_idx in np.unique(label_img):
if class_idx == query_segmentid:
label_mask = (label_img == class_idx).astype(np.uint8)
return label_mask
return None
def visualize_sunrgbd_class_masks(d_api, classname):
"""
Dump one image per mask, for all masks of a specific class.
"""
required_class_names = [classname]
highlight_classname = classname
condition = 'intersection'
folder_prefix = f'sunrgbd_{classname}'
d_api.get_class_masks(
required_class_names=required_class_names,
highlight_classname=highlight_classname,
condition=condition,
folder_prefix=folder_prefix)
def main():
"""
Visualize masks of a chosen category from SUN RGB-D.
Usage:
python SunrgbdImageLevelDataset.py \
--dataroot /Users/johnlamb/Downloads/SUNRGBD-37-CLUSTER --classname lamp
"""
parser = argparse.ArgumentParser()
parser.add_argument("--classname", type=str, required=True, help="name of class to visualize")
parser.add_argument("--dataroot", type=str, required=True, help="path to SUN RGB-D data root")
args = parser.parse_args()
d_api = SunrgbdImageLevelDataset(args.dataroot)
visualize_sunrgbd_class_masks(d_api, args.classname)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11228023 | __author__ = 'brandonkelly'
import numpy as np
from sklearn import cross_validation, metrics
from sklearn.decomposition import PCA
import multiprocessing
import copy
import matplotlib.pyplot as plt
class SupervisedPCABase(object):
def __init__(self, regressor, max_components=None, n_components=1, whiten=True):
"""
Base class for performing supervised principal component regression. This is useful for cases where the number
of inputs (features) is greater than the number of data points.
@param regressor: The object that will perform the regression. The following members must be defined for this
object:
regressor.fit(X, y) : Fits the regression model y = f(X).
regressor.predict(X) : Compute the prediction y = f(X).
regressor.coef_score_ : The score of each parameter, used for ranking the most important features when
computing the reduced feature space. In general this will be the absolute value of
the coefficient value divided by its standard error. Note that this should *not*
include the intercept.
@param max_components: Maximum number of components to search over. The default is p.
@param n_components: The number of reduced data matrix PCA components to use in the regression.
@param whiten: Remove differences in variance among the components, i.e., principal components will have unit
variance
"""
self.regressor = regressor
self.max_components = max_components
self.pca_object = PCA(n_components=n_components, whiten=whiten)
self.n_components = n_components
self.whiten = whiten
self.n_reduced = 0
self.sort_idx = np.zeros(1)
def _compute_stnd_coefs(self, X, y):
"""
Compute the standardized regression coefficients, up to a common scaling factor.
@param X: The matrix of inputs, shape (n,p).
@param y: The array of response values, size n.
@return: The standardized regression coefficients, size p.
"""
p = X.shape[1]
scoefs = np.zeros(p)
for j in xrange(p):
thisX = X[:, j]
self.regressor.fit(thisX[:, np.newaxis], y)
scoefs[j] = self.regressor.coef_score_
return scoefs
def _get_reduced_features(self, X, coefs, pmax):
"""
Return the data projected onto the first n_components principal components computed using the reduced feature
space.
@param X: The array of inputs, shape (n, p).
@param coefs: The array of standardized coefficients, size p.
@param pmax: The maximum number of features to use in the reduced feature space PCA.
@return: The data projected onto the reduced feature space PCA, shape (n, self.n_components).
"""
sort_idx = np.argsort(coefs)[::-1]
sort_idx = sort_idx[:pmax]
self.pca_object.fit(X[:, sort_idx])
X_reduced = self.pca_object.transform(X[:, sort_idx])
return X_reduced, sort_idx
def fit(self, X, y, n_reduced):
"""
Perform the regression using the first self.n_components principal components from the reduced feature space.
Note that this will call self.regressor.fit(X,y) to perform the regression.
@param X: The array of inputs, shape (n, p).
@param y: The array of response values, size n.
@param n_reduced: The number of features to use in the reduced feature space.
"""
scoefs = self._compute_stnd_coefs(X, y)
X_reduced, sort_idx = self._get_reduced_features(X, scoefs, n_reduced)
self.sort_idx = sort_idx
self.regressor.fit(X_reduced, y)
def predict(self, X):
"""
Predict the value y = f(X) based on the PCA using the reduced feature space, based on the most recent call to
self.fit(X, y, n_reduced).
@param X: The array of inputs, shape (n, p).
@return: The predicted values of the response.
"""
X_reduced = self.pca_object.transform(X[:, self.sort_idx])
y_predict = self.regressor.predict(X_reduced)
return y_predict
def launch_coef_scores(args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
spca, X, y = args
scoefs = spca._compute_stnd_coefs(X, y)
return scoefs
def compute_cv_prediction(args):
"""
Internal method to get predictions based on supervised PCA regression for each cross-validation fold. Need this
format in order to compute the predictions for the CV folds in parallel.
"""
spca, X_train, y_train, X_test, n_reduced, scoef = args
SPCA = SupervisedPCABase(copy.deepcopy(spca.regressor), spca.max_components, spca.n_components, spca.whiten)
X_reduced, sort_idx = SPCA._get_reduced_features(X_train, scoef, n_reduced)
SPCA.regressor.fit(X_reduced, y_train)
X_test_reduced = SPCA.pca_object.transform(X_test[:, sort_idx])
y_predict = SPCA.regressor.predict(X_test_reduced)
return y_predict
class SupervisedPCA(SupervisedPCABase):
def __init__(self, regressor, max_components=None, n_components=1, whiten=True, n_jobs=1):
"""
Class for performing supervised principal component regression. This is useful for cases where the number of
inputs (features) is greater than the number of data points.
@param regressor: The object that will perform the regression. The following members must be defined for this
object:
regressor.fit(X, y) : Fits the regression model y = f(X).
regressor.predict(X) : Compute the prediction y = f(X).
regressor.coef_score_ : The score of each parameter, used for ranking the most important features when
computing the reduced feature space. In general this will be the absolute value of
the coefficient value divided by its standard error. Note that this should *not*
include the intercept.
@param max_components: Maximum number of components to search over. The default is p.
@param n_components: The number of reduced data matrix PCA components to use in the regression.
@param whiten: Remove differences in variance among the components, i.e., principal components will have unit
variance
@param n_jobs: The number of threads to use for parallel processing. If n_jobs = -1 then use maximum number
available.
"""
super(SupervisedPCA, self).__init__(regressor, max_components, n_components, whiten)
if n_jobs < 0:
n_jobs = multiprocessing.cpu_count()
self.n_jobs = n_jobs
def _compute_cv_prediction(self, args):
"""
Internal method to get predictions based on supervised PCA regression for each cross-validation fold. Need this
format in order to compute the predictions for the CV folds in parallel.
"""
X_train, y_train, X_test, n_reduced, scoef = args
SPCA = SupervisedPCABase(copy.deepcopy(self.regressor), self.max_components, self.n_components, self.whiten)
X_reduced, sort_idx = SPCA._get_reduced_features(X_train, scoef, n_reduced)
SPCA.regressor.fit(X_reduced, y_train)
X_test_reduced = SPCA.pca_object.transform(X_test[:, sort_idx])
y_predict = SPCA.regressor.predict(X_test_reduced)
return y_predict
def _launch_coef_scores(self, args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
X, y = args
scoefs = self._compute_stnd_coefs(X, y)
return scoefs
def choose_nreduced(self, X, y, lossfunc=None, cv=None, verbose=False, cvplot=False):
"""
Choose the number of features to use in the reduced feature set by minimizing the cross-validation error.
@param X: The feature matrix, shape (n,p)
@param y: The vector of response values, size n.
@param lossfunc: The loss function to use for the CV error, callable. The default is mean squared error.
@param cv: Number of CV folds (if int), or cross-validation iterator.
@param verbose: Print helpful information.
@param cvplot: Plot the CV error as a function of the number features in the reduced feature set.
@return: The number of features in the reduced feature set that minimized the CV error.
"""
if self.n_jobs > 1:
pool = multiprocessing.Pool(self.n_jobs)
pool.map(int, range(self.n_jobs)) # Trick to "warm up" the Pool
# setup cross-validation iterator
if cv is None:
K_folds = 8
if isinstance(cv, int):
K_folds = cv
cv = cross_validation.KFold(y.size, n_folds=K_folds)
if lossfunc is None:
lossfunc = metrics.mean_squared_error
if self.max_components is None:
self.max_components = X.shape[1]
if verbose:
print 'Searching over', self.max_components, ' features to include in the reduced feature space.'
print 'Computing univariate regression tests statistics for each feature...'
# first compute coefficients scores
sargs = []
for train_idx, test_idx in cv:
if self.n_jobs == 1:
sargs.append((X[train_idx, :], y[train_idx]))
else:
sargs.append((self, X[train_idx, :], y[train_idx]))
if self.n_jobs == 1:
scoefs = map(self._launch_coef_scores, sargs)
else:
scoefs = pool.map(launch_coef_scores, sargs)
# find optimal number of features to use in PCA on reduced feature set, do this by minimizing cross-validation
# error on a grid.
cverrors = np.zeros(self.max_components)
if verbose:
print 'Computing cross-validation errors on a grid of up to', self.max_components, 'features used in the', \
'reduced feature space...'
for k in xrange(self.max_components):
cverror_args = []
ytest = []
fold_idx = 0
for train_idx, test_idx in cv:
if self.n_jobs == 1:
cverror_args.append((X[train_idx, :], y[train_idx], X[test_idx, :], k + 1, scoefs[fold_idx]))
else:
cverror_args.append((self, X[train_idx, :], y[train_idx], X[test_idx, :], k + 1, scoefs[fold_idx]))
ytest.append(y[test_idx])
fold_idx += 1
if self.n_jobs == 1:
ypredictions = map(self._compute_cv_prediction, cverror_args)
else:
ypredictions = pool.map(compute_cv_prediction, cverror_args)
cverror_k = 0.0
for yt, yp in zip(ytest, ypredictions):
cverror_k += lossfunc(yt, yp) / K_folds
cverrors[k] = cverror_k
if cvplot:
plt.plot(np.arange(1, self.max_components + 1), cverrors)
plt.xlabel('# of features in reduced set')
plt.ylabel('CV Loss Function')
plt.show()
n_reduced = cverrors.argmin() + 1
if verbose:
print 'Selected', n_reduced, 'features to use in the reduced feature set.'
return n_reduced | StarcoderdataPython |
5059441 | '''
实验名称:温度传感器DS18B20
版本:v1.0
日期:2020.3
作者:01Studio
说明:通过编程采集温度数据,并在oled显示。
'''
#Sensor lib
from w1thermsensor import W1ThermSensor
#导入luma相关库,oled lib
from luma.core.render import canvas
from luma.oled.device import ssd1306
from time import sleep
#初始化oled,I2C接口1,oled地址是0x3c
device = ssd1306(port=1, address=0x3C)
#################DS18B20对象构建###################################
#构建方法1:需要知道传感器地址
#DS18B20 = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, "000005e35af8")
#构建方法2:无需知道传感器地址,只有1个传感器连接时适用
DS18B20=W1ThermSensor()
while True:
temperature = DS18B20.get_temperature() #数据采集
#oled显示温度信息,结果保留2位小数
with canvas(device) as draw:
draw.text((0, 0), '01Studio', fill="white")
draw.text((0, 15), 'Temp test:', fill="white")
draw.text((0, 40), '%.2f'%temperature+' C', fill="white")
print('%.2f'%temperature) #打印显示
sleep(2) #采集间隔2秒 | StarcoderdataPython |
11216978 | <reponame>docusign/eg-03-python-auth-code-grant
from docusign_rooms import RoomsApi
from datetime import datetime
from flask import session, request
from ..utils import create_rooms_api_client
class Eg005GetRoomsWithFiltersController:
@staticmethod
def get_args():
"""Get required session and request arguments"""
return {
"account_id": session["ds_account_id"], # Represents your {ACCOUNT_ID}
"access_token": session["ds_access_token"], # Represents your {ACCESS_TOKEN}
"start_date": request.form.get("start_date"),
"end_date": request.form.get("end_date")
}
@staticmethod
def get_rooms(args):
"""
1. Create an API client with headers
2. Get rooms with filter
"""
# Step 1. Create an API client with headers
api_client = create_rooms_api_client(access_token=args["access_token"])
# Step 2. Get room templates
rooms_api = RoomsApi(api_client)
rooms = rooms_api.get_rooms(account_id=args["account_id"])
return rooms.rooms
@staticmethod
def worker(args):
"""
1. Create an API client with headers
2. Get room field data using SDK
"""
# Step 1. Create an API client with headers
api_client = create_rooms_api_client(access_token=args["access_token"])
# Step 2. Get room field data using SDK
rooms_api = RoomsApi(api_client)
response = rooms_api.get_rooms(
account_id=args["account_id"],
field_data_changed_start_date=datetime.strptime(args['start_date'], "%Y-%m-%d"),
field_data_changed_end_date=datetime.strptime(args['end_date'], "%Y-%m-%d"),
)
return response
| StarcoderdataPython |
182449 | <reponame>prathimacode-hub/WIE-WoC
n = int(input())
space = n-1
for i in range(n):
for k in range(space):
print(" ",end="")
for j in range(i+1):
print("* ",end="")
print()
space -= 1
| StarcoderdataPython |
4932295 | <filename>authserver/mailauth/migrations/0005_auto_20170709_1844.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-09 16:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mailauth', '0004_auto_20170626_1731'),
]
operations = [
migrations.RenameModel(
old_name='MNGroups',
new_name='MNGroup',
),
]
| StarcoderdataPython |
9725606 | <gh_stars>1-10
import sim
import plot_gen
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing
import json
import os
import functools
def generate_star_data():
"""Returns a set of """
q_c_range = [-9, 19]
q_c_set = np.around(np.exp(np.linspace(*q_c_range, num=100)), 10)
data_ret = []
for step_length in [1e-6, 1e-5, 1e-4, 1e-3]:
with multiprocessing.Pool(12) as w_pool:
sim_star_partial = functools.partial(sim.sim_star, step_length)
results = w_pool.imap(sim_star_partial, q_c_set)
star_edge_list_x = []
star_edge_list_mu = []
star_result_set = []
for x_list, mu_list, q_list in tqdm(results, total=len(q_c_set), desc="Solve ODEs"):
star_edge_list_x.append(x_list[-1])
star_edge_list_mu.append(mu_list[-1])
star_result_set.append([x_list, mu_list, q_list])
data_ret.append((step_length, list(q_c_set), star_result_set, star_edge_list_x, star_edge_list_mu))
return data_ret
if __name__ == "__main__":
if not os.path.exists("star_data.json"):
out_data = generate_star_data()
file_formatted_data = [
{"step_length": step_length,
"q_c_set": q_c_set,
"star_radii": star_edge_list,
"star_masses": star_mass_list}
for step_length, q_c_set, _, star_edge_list, star_mass_list in out_data
]
with open("star_data.json", "w") as json_obj:
json.dump(file_formatted_data, json_obj, indent=4)
with open("star_data.json", "r") as json_obj:
saved_run_data = json.load(json_obj)
q_c_vals = saved_run_data[0]["q_c_set"]
radial_edge_list = saved_run_data[0]["star_radii"]
star_mass_list = saved_run_data[0]["star_masses"]
mass_radius_step_length_tuple_list = [
(simulation_data["step_length"], simulation_data["star_masses"], simulation_data["star_radii"])
for simulation_data in saved_run_data
]
plot_gen.plot_mass_radius_relation(radial_edge_list, star_mass_list)
plot_gen.plot_mass_radius_relation_relations(mass_radius_step_length_tuple_list)
x_list, mu_list, q_list = sim.sim_star(1e-6, 10)
plot_gen.plot_single_white_dwarf_values(x_list, q_list, mu_list, 10)
exit(0)
| StarcoderdataPython |
269462 | <reponame>pashas2k3/find-relative
import json
import boto3
from src.relation import DynamoRelation
from src.search import findPath
class SearchAction:
def __init__(self, start: str, end: str, ):
self.start = start
self.end = end
resource = boto3.resource('dynamodb')
dynamoRelation = DynamoRelation(resource, 'Person')
def lambda_handler(event, _):
search_act = SearchAction(**event)
path = findPath(search_act.start, search_act.end, dynamoRelation.neighbors)
return {
'body': json.dumps(path) if path else "None"
}
| StarcoderdataPython |
11376444 | <reponame>pyvain/niaki_solver<filename>rand_init.py
HEIGHT = 9
WIDTH = 6
NB_COLORS = 6
def test_ready(g):
for i in range(HEIGHT):
for j in range(WIDTH):
for vi, vj in [(i-1,j), (i+1,j), (i,j-1), (i,j+1)]:
if vi >= 0 and vj >= 0 and vi < HEIGHT and vj < WIDTH and g[vi][vj] == g[i][j]:
return False
return True
def print_grid(g):
print("-----------")
for l in g:
print(l)
def rand_grid():
from random import shuffle
g = [[j for j in range(WIDTH)] for i in range(HEIGHT)]
while not test_ready(g):
for i in range(HEIGHT):
shuffle(g[i])
if(test_ready(g)):
return g
for j in range(WIDTH):
c = [g[i][j] for i in range(HEIGHT)]
shuffle(c)
for i in range(HEIGHT):
g[i][j] = c[i]
if(test_ready(g)):
return g
return g
if __name__ == "__main__":
print("Niak niak niak...")
g = rand_grid()
for l in g:
print(l)
| StarcoderdataPython |
6694045 | import subprocess
import random
import threading
def worker():
subprocess.call(["Build_Client/Client", "-p", "2000", "-a", "127.0.0.1", "-n", "test" + `random.randint(0, 1000000)`])
random.seed()
threads = [threading.Thread(target = worker) for _i in range(100)]
for thread in threads:
thread.start()
| StarcoderdataPython |
1708861 | <gh_stars>1-10
#!/usr/bin/env python3
import rospy
import pygame
from std_msgs.msg import Int32
def handle_input():
left_pub = rospy.Publisher('left_motor', Int32, queue_size = 10)
right_pub = rospy.Publisher('right_motor', Int32, queue_size = 10)
left = Int32()
right = Int32()
while not(rospy.is_shutdown()):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
left.data = 255
right.data = 255
if event.key == pygame.K_DOWN:
left.data = -255
right.data = -255
if event.key == pygame.K_LEFT:
left.data = -100
right.data = 100
if event.key == pygame.K_RIGHT:
left.data = 100
right.data = -100
else:
left.data = 0
right.data = 0
left_pub.publish(left)
right_pub.publish(right)
rate.sleep()
if __name__=='__main__':
rospy.init_node('simplest_teleop')
pygame.init()
screen = pygame.display.set_mode((640, 480))
rate = rospy.Rate(10)
handle_input()
| StarcoderdataPython |
3399750 | class Node:
def __init__(self, val):
self.val = val
self.left = self.right = None
class Solution:
def pathSum(self, nums: List[int]) -> int:
self.totalSum = 0
root = Node(nums[0] % 10)
for num in nums[1:]:
depth, position, val = num // 100, (num // 10) % 10, num % 10
position -= 1
currentNode = root
for d in range(depth - 2, -1, -1):
if position < 2 ** d:
currentNode.left = currentNode = currentNode.left or Node(val)
else:
currentNode.right = currentNode = currentNode.right or Node(val)
position %= 2 ** d
def preOrderTraversal(node, currentSum):
if node is not None:
currentSum += node.val
if node.left is None and node.right is None:
self.totalSum += currentSum
else:
preOrderTraversal(node.left, currentSum)
preOrderTraversal(node.right, currentSum)
preOrderTraversal(root, 0)
return self.totalSum | StarcoderdataPython |
6506988 | <gh_stars>100-1000
# Sets global variable notebook when line magic is called with "%torchbearer notebook"global notebook
notebook = {'nb': False}
def set_notebook(is_notebook):
notebook['nb'] = is_notebook
def torchbearer(line):
if line == 'notebook':
set_notebook(True)
elif line == 'normal':
set_notebook(False)
try:
import IPython.core.magic
torchbearer = IPython.core.magic.register_line_magic(torchbearer)
set_notebook(True)
except (NameError, ImportError) as e:
pass
def is_notebook():
return notebook['nb']
| StarcoderdataPython |
345496 | <reponame>pengfei-chen/algorithm_qa
"""
问题描述:一棵二叉树原本是搜索二叉树,但是其中有两个节点调换了位置,使得这棵二叉树不再是搜索二叉树,
请找到这两个节点并返回。已知二叉树所有节点的值都不一样,给定二叉树的头结点head,返回一个长度为2的
二叉树节点类型的数组errs。errs[0]表示一个错误节点,errs[1]表示一个错误节点。
进阶:如果在原问题中得到了这两个错误节点,我们当然可以通过交换两个节点值的方式让整棵二叉树重新成为
搜索二叉树,但现在要求不能这么做,而是在结构上完全交换两个节点的位置,请实现调整的函数。
"""
from binarytree.toolcls import Node
# todo 完成进阶题目
class BSTChecker:
def __init__(self):
self.nodes = list()
self.errors = list()
def get_wrong_nodes(self):
for index, node in enumerate(self.nodes[:-1]):
if node.value > self.nodes[index+1].value:
self.errors.append((node, self.nodes[index+1]))
if len(self.errors) == 1:
return list(self.errors[0])
if len(self.errors) == 2:
return [self.errors[0][0], self.errors[1][1]]
def get_all_nodes(self, head):
if head is None:
return None
if head.left is not None:
self.get_all_nodes(head.left)
self.nodes.append(head)
if head.right is not None:
self.get_all_nodes(head.right)
if __name__ == '__main__':
head14 = Node(5)
head14.left = Node(3)
head14.right = Node(7)
head14.left.left = Node(2)
head14.left.right = Node(8)
head14.right.left = Node(6)
head14.right.right = Node(4)
head14.left.left.left = Node(1)
bst = BSTChecker()
bst.get_all_nodes(head14)
err_nodes = bst.get_wrong_nodes()
for err_node in err_nodes:
print(err_node.value)
| StarcoderdataPython |
6441473 | from .facade_v1 import *
| StarcoderdataPython |
11338339 | """ Backend client handling data functions
"""
import json
import numpy as np
import pandas as pd
import plotly.graph_objs as go
scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
class Client(object):
def __init__(self, filters=None):
self.filters = filters
self.unfiltered_df = self._load_data()
self.filtered_df = self.unfiltered_df.copy()
self.unique_customers = self.unfiltered_df["Customer Name"].unique()
self.years = sorted(self.unfiltered_df["Year"].unique())
def _load_data(self):
# load data from xlsx file and add calculated cols
df = pd.read_excel("data/Superstore Data.xlsx")
# get 2-letter state abbreviations
with open('data/states.json') as json_file:
state_mapping = json.load(json_file)
df["State Code"] = df["State"].map(state_mapping)
# get year
df["Year"] = df["Order Date"].apply(lambda x: x.year)
return df
def filter_dataframe(self, filters):
self.filters = filters
df = self.unfiltered_df.copy()
if not self.filters["customers"]:
customer_bool = np.ones(len(df),dtype=bool)
else:
# convert unicode to strings
customers = [str(i) for i in self.filters["customers"]]
customer_bool = (df["Customer Name"].isin(customers))
start_bool = (df["Year"] >= self.filters["years"][0])
end_bool = (df["Year"] <= self.filters["years"][1])
filters = customer_bool & start_bool & end_bool
dff = df.loc[filters].reset_index(drop=True)
self.filtered_df = dff
def make_total_sales_chart(self):
# show total sales by category
grouped = self.filtered_df.groupby("Category").sum()
traces = [
dict(
type="bar",
x=grouped.index.values,
y=grouped["Sales"].values,
)
]
layout = {'title': 'Total Sales by Category'}
figure = dict(data=traces, layout=layout)
return figure
def make_choropleth_chart(self):
# heatmap by state
df = self.filtered_df.copy().groupby("State Code").sum()
df["text"] = df.index \
+ "<br>" + "Sales: " + df["Sales"].astype(str) \
+ "<br>" + "Profit: " + df["Profit"].astype(str)
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = df.index,
z = df['Sales'].astype(float),
locationmode = 'USA-states',
hoverinfo="text",
text = df['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "Total Sales")
) ]
layout = dict(
title = 'State Heatmap by Total Sales (Hover for Profit)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
figure = dict(data=data, layout=layout)
return figure
def make_scatterplot(self):
# scatterplot showing sales vs profits
# show by subcategory and color by category
categories = sorted(self.filtered_df["Category"].unique())
data = []
for category in categories:
dff = self.filtered_df.copy().loc[self.filtered_df["Category"] == category]
grouped = dff.groupby("Sub-Category")["Sales","Profit"].sum()
data.append(
go.Scatter(
x=grouped["Sales"].values,
y=grouped["Profit"].values,
text=grouped.index,
mode='markers',
opacity=0.7,
marker={
'size': 15,
'line': {'width': 0.5, 'color': 'white'}
},
name=category
)
)
layout = {
'title': 'Profit vs Sales by Sub-Category',
'xaxis': {'title': 'Sales'},
'yaxis': {'title': 'Profit'},
'hovermode': 'closest',
}
figure = {
'data': data,
'layout': layout,
}
return figure
def make_piechart(self):
# make a pie chart showing sales by region
layout = {
'title': 'Sales by Region',
'showlegend': False,
}
grouped = self.filtered_df.copy().groupby(["Region"])["Sales"].sum()
data = [{
'values': grouped.values,
'text': grouped.index,
'type': 'pie',
'hoverinfo': 'text+value',
}]
figure = {
'data': data,
'layout': layout,
}
return figure
| StarcoderdataPython |
6585805 | # -*- coding: utf-8 -*-
__author__ = 'bernardo'
import sys
import re
from server.handlers import errorHandler
from xml.sax.saxutils import escape, unescape
import codecs
from server.utils.httpMessages import messages
import config
from server.utils.bcolors import bcolors
from server.utils import info
from subprocess import Popen, PIPE
import subprocess
import sys
if sys.version_info >= (3, 0):
import io as strIO
else:
import StringIO as strIO
html_escape_table = {'"': '"',
"'": '''}
html_unescape_table = {v:k for k, v in html_escape_table.items()}
def html_escape(text):
return escape(text, html_escape_table)
def html_unescape(text):
return unescape(text, html_unescape_table)
def server_version():
return info.__SRV_VERSION__
def replaceAll(self, response, getNpost):
__GET__ = getNpost[0]
__POST__ = getNpost[1]
requests = []
if __GET__ is not None:
requests.extend(__GET__)
if __POST__ is not None:
requests.extend(__POST__)
match = re.compile('<%(.+?)%>', flags=re.DOTALL)
results = match.findall(response)
response_content = response
exception = None
for res in results:
if sys.version_info >= (3, 0):
codeOut = strIO.BytesIO()
else:
codeOut = strIO.StringIO()
sys.stdout = codeOut
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
try:
exec('# -*- coding: utf-8 -*-\n\r\n\r' + res)
except Exception as e:
exception = e
sys.stdout = sys.__stdout__
response2 = codeOut.getvalue()
if sys.version_info >= (3, 0):
response2 = response2.decode('UTF-8')
response_content = response_content.replace('<%' + res + '%>',
str(response2))
if exception != None:
response_content = messages.InternalError
errorHandler.handle(" --InternalError:\n\t\t" + str(exception), color=bcolors.BACK_LRED, level = 9)
match = re.compile('<pl%(.+?)%pl>',
flags=re.DOTALL)
results = match.findall(response)
perlUseLib = "use lib '"+config.__WWW_DIR__+"';"
for res in results:
p = subprocess.Popen(["perl", "-e "+perlUseLib +res+""],
stdout=subprocess.PIPE)
out, err = p.communicate()
response_content = response_content.replace('<pl%' + res + '%pl>',
str(out.decode('UTF-8')))
match = re.compile('!%(.+?)%!',
flags=re.DOTALL)
results = match.findall(response)
for res in results:
response_content = response_content.replace('!%' + res + '%!',
eval(res))
return response_content
| StarcoderdataPython |
6451138 | <reponame>roskenet/kopf
"""
All the functions to keep track of the last seen state.
The "state" is a snapshot of meaningful fields, which must be tracked
to identify the actual changes on the object (or absence of such).
Used in the handling routines to check if there were significant changes at all
(i.e. not the internal and system changes, like the uids, links, etc),
and to get the exact per-field diffs for the specific handler functions.
Conceptually similar to how ``kubectl apply`` stores the applied state
on any object, and then uses that for the patch calculation:
https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/
"""
import copy
import hashlib
import json
from kopf.structs import dicts
from kopf.structs import diffs
LAST_SEEN_ANNOTATION = 'kopf.zalando.org/last-handled-configuration'
""" The annotation name for the last stored state of the resource. """
def get_state(body, extra_fields=None):
"""
Extract only the relevant fields for the state comparisons.
The framework ignores all the system fields (mostly from metadata)
and the status senza completely. Except for some well-known and useful
metadata, such as labels and annotations (except for sure garbage).
A special set of fields can be provided even if they are supposed
to be removed. This is used, for example, for handlers which react
to changes in the specific fields in the status stenza,
while the rest of the status stenza is removed.
"""
# Always use a copy, so that future changes do not affect the extracted state.
orig = copy.deepcopy(body)
body = copy.deepcopy(body)
# The top-level identifying fields never change, so there is not need to track them.
if 'apiVersion' in body:
del body['apiVersion']
if 'kind' in body:
del body['kind']
# Purge the whole stenzas with system info (extra-fields are restored below).
if 'metadata' in body:
del body['metadata']
if 'status' in body:
del body['status']
# We want some selected metadata to be tracked implicitly.
dicts.cherrypick(src=orig, dst=body, fields=[
'metadata.labels',
'metadata.annotations', # but not all of them! deleted below.
])
# But we do not want not all of the annotations, only potentially useful.
annotations = body.get('metadata', {}).get('annotations', {})
for annotation in list(annotations):
if annotation == LAST_SEEN_ANNOTATION:
del annotations[annotation]
if annotation == 'kubectl.kubernetes.io/last-applied-configuration':
del annotations[annotation]
# Restore all explicitly whitelisted extra-fields from the original body.
dicts.cherrypick(src=orig, dst=body, fields=extra_fields)
# Cleanup the parent structs if they have become empty, for consistent state comparison.
if 'annotations' in body.get('metadata', {}) and not body['metadata']['annotations']:
del body['metadata']['annotations']
if 'metadata' in body and not body['metadata']:
del body['metadata']
if 'status' in body and not body['status']:
del body['status']
return body
def has_state(body):
annotations = body.get('metadata', {}).get('annotations', {})
return LAST_SEEN_ANNOTATION in annotations
def get_state_diffs(body, extra_fields=None):
old = retreive_state(body)
new = get_state(body, extra_fields=extra_fields)
return old, new, diffs.diff(old, new)
def retreive_state(body):
state_str = body.get('metadata', {}).get('annotations', {}).get(LAST_SEEN_ANNOTATION, None)
state_obj = json.loads(state_str) if state_str is not None else None
return state_obj
def refresh_state(*, body, patch, extra_fields=None):
frozen_state = body.get('status', {}).get('kopf', {}).get('frozen-state')
stored_state = retreive_state(body)
actual_state = get_state(body, extra_fields=extra_fields)
if stored_state is None or stored_state != actual_state:
annotations = patch.setdefault('metadata', {}).setdefault('annotations', {})
annotations[LAST_SEEN_ANNOTATION] = frozen_state or json.dumps(actual_state)
if frozen_state is not None:
storage = patch.setdefault('status', {}).setdefault('kopf', {})
storage['frozen-state'] = None
def freeze_state(*, body, patch, extra_fields=None):
frozen_state = body.get('status', {}).get('kopf', {}).get('frozen-state')
actual_state = get_state(body, extra_fields=extra_fields)
if frozen_state is None:
storage = patch.setdefault('status', {}).setdefault('kopf', {})
storage['frozen-state'] = json.dumps(actual_state)
def compute_digest(body, extra_fields=None):
state = get_state(body, extra_fields=extra_fields)
# Any digest with a short str/int result is sufficient. Even CRC. No security is needed.
hash = hashlib.md5()
hash.update(json.dumps(state).encode('utf-8'))
return hash.hexdigest()
| StarcoderdataPython |
1773904 | <reponame>parkerwray/smuthi-1
import sys
from smuthi.version import __version__
try:
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
except:
mpi_rank = 0
def print_smuthi_header():
welcome_msg = ("\n" + "*" * 32 + "\n SMUTHI version " + __version__ + "\n" + "*" * 32 + "\n")
sys.stdout.write(welcome_msg)
sys.stdout.flush()
#if mpi_rank == 0:
# print_smuthi_header()
| StarcoderdataPython |
5087654 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Formatting functions for printing pipes
"""
from __future__ import annotations
from meerschaum.utils.typing import PipesDict, Dict
def pprint_pipes(pipes : PipesDict) -> None:
"""
Print a stylized tree of a Pipes dictionary.
Supports ANSI and UNICODE global settings.
This code is pretty unreadable. Just a warning. But it's thoroughly tested,
so things *shouldn't* break.
"""
from meerschaum.utils.warnings import error
from meerschaum.utils.packages import attempt_import, import_rich
from meerschaum.utils.misc import sorted_dict, replace_pipes_in_dict
from meerschaum.utils.formatting import UNICODE, ANSI, pprint, colored, get_console
from meerschaum.config import get_config
import copy
rich = import_rich('rich', warn=False)
Text = None
if rich is not None:
rich_text = attempt_import('rich.text')
Text = rich_text.Text
icons = {'connector' : '', 'metric' : '', 'location' : '', 'key' : ''}
styles = {'connector' : '', 'metric' : '', 'location' : '', 'key' : ''}
guide_style, none_style = '', ''
if UNICODE:
icons['connector'] = get_config('formatting', 'emoji', 'connector', patch=True) + ' '
icons['metric'] = get_config('formatting', 'emoji', 'metric', patch=True) + ' '
icons['location'] = get_config('formatting', 'emoji', 'location', patch=True) + ' '
icons['key'] = get_config('formatting', 'emoji', 'key', patch=True) + ' '
if ANSI:
styles['connector'] = 'green'
styles['metric'] = 'bright_blue'
styles['location'] = 'magenta'
guide_style = 'dim'
none_style = 'black on magenta'
print()
def ascii_print_pipes():
"""
Print the dictionary with no unicode allowed. Also works in case rich fails to import
(though rich should auto-install when `attempt_import()` is called).
"""
asciitree = attempt_import('asciitree')
def _replace_pipe_ascii_tree(pipe):
return {str(pipe) : {}}
ascii_dict, replace_dict = {}, {'connector' : {}, 'metric' : {}, 'location' : {}}
for conn_keys, metrics in pipes.items():
_colored_conn_key = colored(icons['connector'] + conn_keys, style=styles['connector'])
if Text is not None:
replace_dict['connector'][_colored_conn_key] = (
Text(conn_keys, style=styles['connector'])
)
ascii_dict[_colored_conn_key] = {}
for metric, locations in metrics.items():
_colored_metric_key = colored(icons['metric'] + metric, style=styles['metric'])
if Text is not None:
replace_dict['metric'][_colored_metric_key] = (
Text(metric, style=styles['metric'])
)
ascii_dict[_colored_conn_key][_colored_metric_key] = {}
for location, pipe in locations.items():
if location is None:
_location_style = none_style
else:
_location_style = styles['location']
pipe_addendum = '\n ' + str(pipe)
_colored_location = colored(
icons['location'] + str(location), style=_location_style
)
_colored_location_key = _colored_location + pipe_addendum
if Text is not None:
replace_dict['location'][_colored_location] = (
Text(str(location), style=_location_style)
)
ascii_dict[_colored_conn_key][_colored_metric_key][_colored_location_key] = {}
tree = asciitree.LeftAligned()
output = ''
cols = []
### This is pretty terrible, unreadable code.
### Please know that I'm normally better than this.
key_str = (
(Text(" ") if Text is not None else " ") +
(
Text("Key", style='underline') if Text is not None else
colored("Key", style='underline')
) + (Text('\n\n ') if Text is not None else '\n\n ') +
(
Text("Connector", style=styles['connector']) if Text is not None else
colored("Connector", style=styles['connector'])
) + (Text('\n +-- ') if Text is not None else '\n +-- ') +
(
Text("Metric", style=styles['metric']) if Text is not None else
colored("Metric", style=styles['metric'])
) + (Text('\n +-- ') if Text is not None else '\n +-- ') +
(
Text("Location", style=styles['location']) if Text is not None else
colored("Location", style=styles['location'])
) + (Text('\n\n') if Text is not None else '\n\n')
)
output += str(key_str)
cols.append(key_str)
def replace_tree_text(tree_str : str) -> Text:
"""
Replace the colored words with stylized Text instead.
Is not executed if ANSI and UNICODE are disabled.
"""
tree_text = Text(tree_str) if Text is not None else None
for k, v in replace_dict.items():
for _colored, _text in v.items():
parts = []
lines = tree_text.split(_colored)
for part in lines:
parts += [part, _text]
if lines[-1] != Text(''):
parts = parts[:-1]
_tree_text = Text('')
for part in parts:
_tree_text += part
tree_text = _tree_text
return tree_text
tree_output = ""
for k, v in ascii_dict.items():
branch = {k : v}
tree_output += tree(branch) + '\n\n'
if not UNICODE and not ANSI:
_col = (Text(tree(branch)) if Text is not None else tree(branch))
else:
_col = replace_tree_text(tree(branch))
cols.append(_col)
if len(output) > 0:
tree_output = tree_output[:-2]
output += tree_output
if rich is None:
return print(output)
rich_columns = attempt_import('rich.columns')
Columns = rich_columns.Columns
columns = Columns(cols)
# rich.print(columns)
get_console().print(columns)
if not UNICODE:
return ascii_print_pipes()
rich_panel, rich_tree, rich_text, rich_columns, rich_table = attempt_import(
'rich.panel',
'rich.tree',
'rich.text',
'rich.columns',
'rich.table',
)
from rich import box
Panel = rich_panel.Panel
Tree = rich_tree.Tree
Text = rich_text.Text
Columns = rich_columns.Columns
Table = rich_table.Table
key_panel = Panel(
(
Text("\n") +
Text(icons['connector'] + "Connector", style=styles['connector']) + Text("\n\n") +
Text(icons['metric'] + "Metric", style=styles['metric']) + Text("\n\n") +
Text(icons['location'] + "Location", style=styles['location']) + Text("\n")
),
title = Text(icons['key'] + "Keys", style=guide_style),
border_style = guide_style,
expand = True
)
cols = []
conn_trees = {}
metric_trees = {}
pipes = sorted_dict(pipes)
for conn_keys, metrics in pipes.items():
conn_trees[conn_keys] = Tree(
Text(
icons['connector'] + conn_keys,
style = styles['connector'],
),
guide_style = styles['connector']
)
metric_trees[conn_keys] = dict()
for metric, locations in metrics.items():
metric_trees[conn_keys][metric] = Tree(
Text(
icons['metric'] + metric,
style = styles['metric']
),
guide_style = styles['metric']
)
conn_trees[conn_keys].add(metric_trees[conn_keys][metric])
for location, pipe in locations.items():
if location is None:
_location = Text(str(location), style=none_style)
else:
_location = Text(location, style=styles['location'])
_location = Text(icons['location']) + _location + Text("\n" + str(pipe) + "\n")
metric_trees[conn_keys][metric].add(_location)
cols += [key_panel]
for k, t in conn_trees.items():
cols.append(t)
columns = Columns(cols)
# rich.print(columns)
get_console().print(columns)
def pprint_pipe_columns(
pipe : meerschaum.Pipe,
nopretty : bool = False,
debug : bool = False,
) -> None:
"""
Pretty-print a pipe's columns.
"""
import json
from meerschaum.utils.warnings import info
from meerschaum.utils.formatting import pprint, print_tuple, get_console
from meerschaum.utils.formatting._shell import make_header
from meerschaum.utils.packages import attempt_import, import_rich
_cols = pipe.columns
_cols_types = pipe.get_columns_types(debug=debug)
def _nopretty_print():
print(json.dumps(pipe.__getstate__()))
print(json.dumps(_cols))
print(json.dumps(_cols_types))
def _pretty_print():
rich = import_rich()
rich_table = attempt_import('rich.table')
table = rich_table.Table(title=f"Column types for pipe '{pipe}'")
table.add_column('Column')
table.add_column('Type', justify='right')
info(make_header(f"\nColumns for pipe '{pipe}':"), icon=False)
if _cols:
pprint(_cols, nopretty=nopretty)
print()
else:
print_tuple((False, f"No registered columns for pipe '{pipe}'."))
for c, t in _cols_types.items():
table.add_row(c, t)
if _cols_types:
# rich.print(table)
get_console().print(table)
# print(f"\nTable columns and types:")
# pprint(_cols_types, nopretty=nopretty)
else:
print_tuple((False, f"No table columns for pipe '{pipe}'. Does the pipe exist?"))
if nopretty:
_nopretty_print()
else:
_pretty_print()
| StarcoderdataPython |
6516841 | <reponame>mehak-mann/mehak-mann.github.io<gh_stars>1-10
import os
from flask import Flask
from flask import render_template
from flask import request
from instance.scripts.analysis import main
app = Flask(__name__)
uploads_dir = os.path.join(app.instance_path, 'scripts')
app.config['FLASK_APP'] = 'app'
app.config['ENV'] = 'development'
app.config['DEBUG'] = True
app.config['TESTING'] = True
@app.route('/')
def home():
return render_template('index.html')
@app.route('/analysis')
def analysis():
return render_template('analysis.html')
@app.route('/info')
def info():
return render_template('info.html')
@app.route('/charts')
def charts():
return render_template('charts.html')
@app.route('/chartsm')
def chartsm():
return render_template('charts-matt.html')
@app.route('/get_data', methods=['POST'])
def get_data():
dataFile = request.files['file_path']
dataFileName = dataFile.filename
dataFile.save(os.path.join(uploads_dir, dataFileName))
if "csv" in dataFileName:
fileType = "csv"
if "json" in dataFileName:
fileType = "json"
if "matt" in dataFileName:
which_url = "/chartsm"
else:
which_url ="/charts"
filePath = 'scripts/' + dataFileName
result = main(os.path.join(app.instance_path, filePath), fileType)
numberOfMessages = result[0]
numberOfResponses = result[1]
averageResponseTimes = result[2]
conversationsStarted = result[3]
numberOfWords = result[4]
# capsLockRatio = result[5]
userSentiment = result[6]
userKeywords = result[7]
conversationSentiment = result[8]
if (conversationSentiment < -0.25):
sentiment = "toxic. Try harder to get along with this person."
overallSent = "😠"
elif (conversationSentiment > 0.25):
sentiment = "healthy. You are great friends!"
overallSent = "😊"
else:
sentiment = "neutral. This may indicate that the conversation didn't have much value. There could have been a mix of positive and negative emotions."
overallSent = "😐"
aRT=averageResponseTimes
aRTKeys=list(averageResponseTimes.keys())
aRTLeft=aRT[aRTKeys[0]]
aRTRight=aRT[aRTKeys[1]]
secondsLeft = aRTLeft.seconds
secondsRight = aRTRight.seconds
aRTLeft=[aRTLeft.days, secondsLeft//3600, (secondsLeft//60)%60, secondsLeft%60]
aRTRight=[aRTRight.days, secondsRight//3600, (secondsRight//60)%60, secondsRight%60]
aRTLeftStr = "{} days {} hr {} min {} secs".format(aRTLeft[0], aRTLeft[1], aRTLeft[2], aRTLeft[3])
aRTRightStr = "{} days {} hr {} min {} secs".format(aRTRight[0], aRTRight[1], aRTRight[2], aRTRight[3])
return render_template('analysis.html', nM=numberOfMessages, nMKeys=list(numberOfMessages.keys()),
nR=numberOfResponses, nRKeys=list(numberOfResponses.keys()),
cS=conversationsStarted, cSKeys=list(conversationsStarted.keys()),
aRTKeys=aRTKeys, aRTLeft=aRTLeftStr, aRTRight=aRTRightStr,
nW=numberOfWords, nWKeys=list(numberOfWords.keys()),
#cLR=capsLockRatio, cLRKeys=list(capsLockRatio.keys()),
uS=userSentiment, uSKeys=list(userSentiment.keys()),
uK=userKeywords, uKKeys=list(userKeywords.keys()),
cSe=conversationSentiment, sentiment=sentiment, overallSent=overallSent,
which_url=which_url
)
if __name__ == "__main__":
app.run(debug=True) | StarcoderdataPython |
60265 | <filename>SimCalorimetry/EcalSimProducers/python/esEcalLiteDTUPedestalsProducer_cfi.py
import FWCore.ParameterSet.Config as cms
EcalLiteDTUPedestalsRcd = cms.ESSource("EmptyESSource",
recordName = cms.string("EcalLiteDTUPedestalsRcd"),
firstValid = cms.vuint32(1),
iovIsRunNotTime = cms.bool(True)
)
EcalLiteDTUPedestals = cms.ESProducer(
"EcalLiteDTUPedestalsESProducer",
ComponentName = cms.string('EcalLiteDTUPedestalProducer'),
MeanPedestalsGain10 = cms.double(12),
RMSPedestalsGain10 = cms.double(2.5),
MeanPedestalsGain1 = cms.double(12.),
RMSPedestalsGain1 = cms.double(2.)
)
| StarcoderdataPython |
1713949 | import requests_mock
from openregister.client import RegisterClient
def test_client_url():
client = RegisterClient()
assert client is not None
with requests_mock.Mocker() as mock:
mock.get("https://example.com", text="data")
assert "data" == client.get("https://example.com").text
| StarcoderdataPython |
11219547 | <reponame>BenOsborn/Cerci
class AddElementwise:
@staticmethod
def forward(matrix_left, matrix_right, backwards=False):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [a+b for a, b in zip(matrix_left.tensor, matrix_right.tensor)]
if (not backwards):
return Tensor(new_tensor, matrix_left.shape, left=matrix_left, right=matrix_right,
track_grad=(matrix_left.track_grad or matrix_right.track_grad), operator=AddElementwise)
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddleft(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [1 for _ in range(matrix_left.size)]
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddright(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [1 for _ in range(matrix_left.size)]
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
class MultiplyElementwise:
@staticmethod
def forward(matrix_left, matrix_right, backwards=False):
assert(matrix_left.shape == matrix_right.shape)
new_tensor = [a*b for a, b in zip(matrix_left.tensor, matrix_right.tensor)]
if (not backwards):
return Tensor(new_tensor, matrix_left.shape, left=matrix_left, right=matrix_right,
track_grad=(matrix_left.track_grad or matrix_right.track_grad), operator=MultiplyElementwise)
return Tensor(new_tensor, matrix_left.shape, left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddleft(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
return Tensor(matrix_right.tensor.copy(), matrix_right.shape.copy(), left=None, right=None,
track_grad=False, operator=None)
@staticmethod
def ddright(matrix_left, matrix_right):
assert(matrix_left.shape == matrix_right.shape)
return Tensor(matrix_left.tensor.copy(), matrix_left.shape.copy(), left=None, right=None,
track_grad=False, operator=None)
class TensorBase:
def __init__(self, tensor, shape): # The left and the right will contain the links to the other nodes in the tree
self.dims = len(shape)
self.size = len(tensor)
check_length = 1
for i in range(self.dims):
check_length *= shape[i]
assert(check_length == self.size)
self.tensor = tensor
self.shape = shape
def __str__(self):
return self.__string()
# There is probably a more efficient way to do this
def __string(self, index=-1, position=0):
if (abs(index) == self.dims):
mat = "[ "
for i in range(self.shape[0]):
mat += f"{self.tensor[position + i]} "
mat += "]"
return mat
mat_final = "[ "
product = 1
for i in range(self.dims + index):
product *= self.shape[i]
for i in range(self.shape[index]):
mat_final += f"\n{abs(index) * ' '}{ self.__string(index-1, position+product*i)} "
return f"{mat_final}\n{(abs(index) - 1) * ' '}]" if (index != -1) else f"{mat_final}\n]"
def __add__(self, other):
return AddElementwise.forward(self, other)
def __mul__(self, other):
return MultiplyElementwise.forward(self, other)
class Tensor(TensorBase):
def __init__(self, tensor, shape, left=None, right=None, track_grad=False, operator=None):
super().__init__(tensor, shape)
self.track_grad = track_grad
if (track_grad):
self.operator = operator
self.left = left
self.right = right
self.grad = Tensor([0 for _ in range(self.size)], self.shape)
def zeroGrad(self):
if (self.track_grad):
self.grad = Tensor([0 for _ in range(self.size)], self.shape)
if (self.left != None):
self.left.zeroGrad()
if (self.right != None):
self.right.zeroGrad()
# Now I need to do the actual backwards function
def backwards(self, factors=None):
if (factors == None):
factors = Tensor([1 for _ in range(self.size)], self.shape)
self.grad = AddElementwise.forward(self.grad, factors, backwards=True)
# I cant just use none when I port this to C++
if (self.left != None):
self.left.backwards(factors=MultiplyElementwise.forward(factors, self.operator.ddleft(self.left, self.right)))
if (self.right != None):
self.right.backwards(factors=MultiplyElementwise.forward(factors, self.operator.ddright(self.left, self.right))) | StarcoderdataPython |
6410309 | <gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, re
class IOCommon(object):
conf = {}
def __init__(self, sc):
self.sc = sc
@classmethod
def getProperty(cls, key, default = None):
return cls.conf.get(key, default)
@classmethod
def getPropertiesFromFile(cls):
def split(x):
ret = re.split("\s", x.strip(), 1)
if len(ret)<2: return (ret[0], '')
return tuple(ret)
prop_file = os.environ.get("SPARKBENCH_PROPERTIES_FILES", None)
assert prop_file, "SPARKBENCH_PROPERTIES_FILES undefined!"
with open(prop_file) as f:
cls.conf = dict([split(x.strip()) for x in f.readlines() if x.strip() and x.strip()[0]!="#"])
def load(self, filename, force_format=None):
input_format = force_format if force_format else IOCommon.getProperty("sparkbench.inputformat", "Text")
if input_format == "Text":
return self.sc.textFile(filename)
elif input_format == "Sequence":
return self.sc.sequenceFile(filename, "org.apache.hadoop.io.NullWritable, org.apache.hadoop.io.Text")\
.map(lambda x:x[1])
else:
raise Exception("Unknown input format: %s" % input_format)
def save(selfself, filename, data, PropPrefix = "sparkbench.outputformat"):
output_format = IOCommon.getProperty(PropPrefix, "Text")
output_format_codec = IOCommon.getProperty(PropPrefix+".codec")
if output_format == "Text":
if not output_format_codec: # isEmpty
data.saveAsTextFile(filename)
else:
print "Warning, save as text file with a format codec is unsupported in python api"
data.saveAsTextFile(filename)
#data.saveAsTextFile(filename, output_format_codec)
elif output_format == "Sequence":
sequence_data = data.map(lambda x:(None, x))
if not output_format_codec: # isEmpty
data.saveAsHadoopFile(filename, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.NullWritable", "org.apache.hadoop.io.Text")
else:
data.saveAsHadoopFile(filename, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.NullWritable", "org.apache.hadoop.io.Text",
compressionCodecClass = output_format_codec)
IOCommon.getPropertiesFromFile() | StarcoderdataPython |
3426922 | <gh_stars>1-10
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_video_unique_count(path):
vidcap = cv2.VideoCapture(path)
count = 0
old_frame = None
full_frame_stable = None
STATIONARY_FLAG = False
unique_elements = []
# Extract frames from video
while True:
success, current_frame = vidcap.read()
if success == True:
# Preprocess image before sending it for inference
full_frame_stable = current_frame
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
current_frame = cv2.resize(current_frame, (640, 480))
current_frame = cv2.blur(current_frame, (5, 5))
if old_frame is not None:
diff_image = cv2.absdiff(old_frame, current_frame)
unique = len(np.unique(diff_image))
print("Frame No:", count, " | ", unique)
unique_elements.append(unique)
old_frame = current_frame
count += 1
else :
break
print(len(unique_elements))
if len(unique_elements) > 100:
return [0 for i in range(0,100)]
padded_arr = padarray(unique_elements,100)
return padded_arr
def padarray(A, size):
t = size - len(A)
return np.pad(A, pad_width=(0, t), mode='constant')
if __name__ == "__main__":
VIDEO_LIST = glob.glob("/home/nj/HBRS/Studies/Sem-3/CV/Dataset/Videos/*.avi")
print(len(VIDEO_LIST))
VIDEO = "/home/nj/HBRS/Studies/Sem-3/CV/Dataset/Videos/CV19_video_6.avi" #20
# VIDEO = "/home/nj/HBRS/Studies/Sem-3/CV/Dataset/Videos/CV19_video_250.avi"
Y_LIST = []
X_LIST = [i for i in range(0,100)]
for i,pth in enumerate(VIDEO_LIST):
# print(get_video_unique_count(pth))
if i == 4:
break
print(pth)
Y_LIST.append(get_video_unique_count(pth))
for y_list in Y_LIST:
# plt.plot(X_LIST, y_list, color=np.random.rand(3,))
plt.plot(X_LIST, y_list, color='red',alpha=0.3)
plt.scatter(X_LIST, y_list,label='Frame')
plt.title("Stable frame detection")
plt.xlabel("Number of frames ")
plt.ylabel("Number of unique elements")
plt.axhline(y=15, color='r', linestyle='dotted')
plt.axhline(y=150, color='r', linestyle='dotted')
plt.axvline(x=2, color='r', linestyle='dotted')
plt.axvline(x=13, color='r', linestyle='dotted')
# Add ticks
locs, labels = plt.xticks()
print(locs)
plt.legend()
plt.show()
| StarcoderdataPython |
5000093 | <gh_stars>0
import sys, pdb, os
import time
import numpy as np
import tensorflow as tf
class CB_VEC:
def __init__(self,v_window_size=3,c_window_size=3,num_dx=200,num_rx=20000,dx_emb_size=64,rx_emb_size=64,
max_visit_per_person=34,max_rx_per_dx=1000,visit_emb_size=16
,l2=0.001,batch_size=16,max_dx_per_visit=1):
self.v_window_size=v_window_size
self.c_window_size=c_window_size
self.num_dx=num_dx
self.num_rx=num_rx
self.dx_emb_size=dx_emb_size
self.rx_emb_size=rx_emb_size
self.max_visit_per_person=max_visit_per_person
self.max_dx_per_visit=max_dx_per_visit#
self.max_rx_per_dx=max_rx_per_dx
self.visit_emb_size=visit_emb_size
self.L2=l2
self.batch_size=batch_size
self.one_label=1
W_emb_dx = tf.get_variable('W_emb_dx', shape=(self.num_dx, self.dx_emb_size), dtype=tf.float32)
W_emb_rx = tf.get_variable('W_emb_rx', shape=(self.num_rx, self.rx_emb_size), dtype=tf.float32)
self.W_emb_rx_ = tf.nn.relu(W_emb_rx)
self.W_emb_dx_ = tf.nn.relu(W_emb_dx)#
# self.W_emb_rx_ = tf.nn.l2_normalize(W_emb_rx)#
# self.W_emb_dx_ = tf.nn.l2_normalize(W_emb_dx)
#label
self.dx_var = tf.placeholder(tf.int32, shape=(None, self.max_visit_per_person, self.max_dx_per_visit), name='dx_var')
self.rx_var = tf.placeholder(tf.int32, shape=(None, self.max_visit_per_person, self.max_dx_per_visit, self.max_rx_per_dx), name='rx_var')
self.dx_label=tf.placeholder(tf.int32, shape=(None, self.max_visit_per_person, self.max_dx_per_visit), name='dx_label')
self.dx_mask=tf.placeholder(tf.int32, shape=(None, self.max_visit_per_person, self.max_dx_per_visit), name='dx_mask')
self.dx_visit = tf.nn.embedding_lookup(self.W_emb_dx_, tf.reshape(self.dx_var, (-1, self.max_dx_per_visit)))
dx_visit = tf.reshape(self.dx_visit, (-1, self.dx_emb_size))#
rx_visit = tf.nn.embedding_lookup(self.W_emb_rx_, tf.reshape(self.rx_var, (-1, self.max_rx_per_dx)))
preVec=tf.reshape(rx_visit,(-1,self.max_visit_per_person,self.max_dx_per_visit*self.max_rx_per_dx,self.rx_emb_size))
rx_visit = tf.reduce_sum(rx_visit, axis=1)
W_dr = tf.Variable(tf.truncated_normal([self.dx_emb_size,self.rx_emb_size], stddev=0.1), name="W_dr")
b_dr = tf.Variable(tf.constant(0.1, shape=[self.rx_emb_size]), name="b_dr")
dr_visit=tf.matmul(dx_visit,W_dr) + b_dr
dx_visit=tf.nn.relu(dr_visit)
dr_visit = dx_visit * rx_visit
dx_obj = dx_visit + dr_visit#
# dx_obj =dr_visit #
W_dx = tf.Variable(tf.truncated_normal([self.rx_emb_size,self.visit_emb_size], stddev=0.1), name="W_dx")
b_dx = tf.Variable(tf.constant(0.1, shape=[self.visit_emb_size]), name="b_dx")
dx_obj=tf.matmul(dx_obj,W_dx) + b_dx
dx_obj=tf.nn.relu(dx_obj)
#
pre_visit = tf.reshape(dx_obj, (-1,self.max_dx_per_visit, self.visit_emb_size))
visit = tf.reduce_sum(pre_visit, axis=1)
W_output = tf.Variable(tf.truncated_normal([self.visit_emb_size,self.num_dx], stddev=0.1), name="W_output")
b_output = tf.Variable(tf.constant(0.1, shape=[self.num_dx]), name="b_outpt")
dx_output=tf.matmul(dx_obj,W_output) + b_output
dx_output=tf.nn.relu(dx_output)
seq_visit = tf.reshape(visit, (-1, self.max_visit_per_person, self.visit_emb_size))
dx_output=tf.reshape(dx_output,(-1, self.max_visit_per_person, self.num_dx))
#
all_rx_exp=tf.reduce_sum(tf.exp(tf.matmul(self.W_emb_rx_,tf.transpose(self.W_emb_rx_,[1,0]))),axis=1)
rx_exp_visit=tf.nn.embedding_lookup(all_rx_exp,tf.reshape(self.rx_var,(-1,self.max_rx_per_dx)))
rx_exp_visit=tf.reshape(rx_exp_visit,(-1,self.max_visit_per_person,self.max_dx_per_visit*self.max_rx_per_dx))
all_dx_exp = tf.reduce_sum(tf.exp(tf.matmul(self.W_emb_dx_, tf.transpose(self.W_emb_dx_, [1, 0]))), axis=1)
dx_exp_visit = tf.nn.embedding_lookup(all_dx_exp, tf.reshape(self.dx_var, (-1, self.max_dx_per_visit)))
dx_exp_visit = tf.reshape(dx_exp_visit, (-1, self.max_visit_per_person))
with tf.name_scope("outputs"):
self.output=tf.reshape(visit, (-1, self.max_visit_per_person*self.visit_emb_size))
dx_target=tf.one_hot(self.dx_label,self.num_dx)
dx_target=tf.reduce_sum(dx_target,axis=2)
self.prediction=tf.nn.softmax(dx_output)
if self.v_window_size==3:
self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.prediction, labels=dx_target)
# self.loss1 = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.prediction[:,1:], labels=dx_target[:,1:])
# self.loss2 = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.prediction[:,:-1], labels=dx_target[:,:-1])
# self.loss=tf.concat([self.loss,self.loss1],1)
# self.loss = tf.concat([self.loss, self.loss2],1)
self.loss = self.loss / self.max_visit_per_person
self.loss = tf.reduce_mean(self.loss)
if self.c_window_size==3:
norms1=tf.exp(tf.reduce_sum(preVec[:,:,1:,:]*preVec[:,:,:-1,:],axis=3))
norms2=tf.exp(tf.reduce_sum(preVec[:,:,:-1,:]*preVec[:,:,1:,:],axis=3))
# norms1=-tf.log(tf.clip_by_value(norms1/tf.clip_by_value(rx_exp_visit[:,:,:-1],1e-12,1.0),1e-12,1.0))
# norms2=-tf.log(tf.clip_by_value(norms2/tf.clip_by_value(rx_exp_visit[:,:,1:],1e-12,1.0),1e-12,1.0))
# norms1 = -tf.log(tf.clip_by_value(norms1 / tf.clip_by_value(rx_exp_visit[:, :, :-1], 1e-12, 1.0), 1e-12, 1.0))
# norms2 = -tf.log(tf.clip_by_value(norms2 / tf.clip_by_value(rx_exp_visit[:, :, 1:], 1e-12, 1.0), 1e-12, 1.0))
norms1 = -tf.log(norms1 / (rx_exp_visit[:, :, :-1]+ 1e-8)+1e-8)
norms2 = -tf.log(norms2 / (rx_exp_visit[:, :, 1:]+1e-8)+1e-8)
self.loss1=tf.reduce_mean(norms1+norms2)
elif self.c_window_size==5:
norms1=tf.exp(tf.reduce_sum(preVec[:,:,1:,:]*preVec[:,:,:-1,:],axis=3))
norms2=tf.exp(tf.reduce_sum(preVec[:,:,:-1,:]*preVec[:,:,1:,:],axis=3))
norms3=tf.exp(tf.reduce_sum(preVec[:,:,2:,:]*preVec[:,:,:-2,:],axis=3))
norms4=tf.exp(tf.reduce_sum(preVec[:,:,:-2,:]*preVec[:,:,2:,:],axis=3))
norms1 = -tf.log(norms1 / (rx_exp_visit[:, :, :-1] + 1e-8) + 1e-8)
norms2 = -tf.log(norms2 / (rx_exp_visit[:, :, 1:] + 1e-8) + 1e-8)
norms3 = -tf.log(norms3 / (rx_exp_visit[:, :, :-2] + 1e-8) + 1e-8)
norms4 = -tf.log(norms4 / (rx_exp_visit[:, :, 2:] + 1e-8) + 1e-8)
self.loss1=tf.reduce_mean(norms1+norms2+norms3+norms4)
elif self.c_window_size==7:
norms1=tf.exp(tf.reduce_sum(preVec[:,:,1:,:]*preVec[:,:,:-1,:],axis=3))
norms2=tf.exp(tf.reduce_sum(preVec[:,:,:-1,:]*preVec[:,:,1:,:],axis=3))
norms3=tf.exp(tf.reduce_sum(preVec[:,:,2:,:]*preVec[:,:,:-2,:],axis=3))
norms4=tf.exp(tf.reduce_sum(preVec[:,:,:-2,:]*preVec[:,:,2:,:],axis=3))
norms5=tf.exp(tf.reduce_sum(preVec[:,:,3:,:]*preVec[:,:,:-3,:],axis=3))
norms6=tf.exp(tf.reduce_sum(preVec[:,:,:-3,:]*preVec[:,:,3:,:],axis=3))
norms1 = -tf.log(norms1 / (rx_exp_visit[:, :, :-1] + 1e-8) + 1e-8)
norms2 = -tf.log(norms2 / (rx_exp_visit[:, :, 1:] + 1e-8) + 1e-8)
norms3 = -tf.log(norms3 / (rx_exp_visit[:, :, :-2] + 1e-8) + 1e-8)
norms4 = -tf.log(norms4 / (rx_exp_visit[:, :, 2:] + 1e-8) + 1e-8)
norms5 = -tf.log(norms5 / (rx_exp_visit[:, :, :-3] + 1e-8) + 1e-8)
norms6 = -tf.log(norms6 / (rx_exp_visit[:, :, 3:] + 1e-8) + 1e-8)
self.loss1=tf.reduce_mean(norms1+norms2+norms3+norms4+norms5+norms6)
with tf.name_scope("loss"):
self.loss+=self.loss1+ self.L2 * tf.reduce_mean(preVec ** 2)+ self.L2 * tf.reduce_sum(seq_visit ** 2)
| StarcoderdataPython |
12834200 | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) <NAME> 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# schedule_lib/schedexport.py
#
# system imports
import datetime
# framework imports
from google.appengine.ext import ndb
import cloudstorage
import xlsxwriter
# app imports
import schedule
from reports import exportexcel
def worksheet_write_wrapper(wksheet, row, col, text):
wksheet.write(row, col, text)
def worksheet_merge_wrapper(wksheet, row_start, col_start, row_end, col_end, text):
wksheet.merge_range(row_start, col_start, row_end, col_end, text)
def write_title_row(sched, day, worksheet):
row = 0
col = 2
for t in sched.tracks(day):
worksheet_write_wrapper(worksheet, row, col, t)
col += 1
def write_tracks(row, col, day, slot, sched, worksheet):
for t in sched.tracks(day):
worksheet_write_wrapper(worksheet, row, col, schedule.talkTitle(sched.get_assignment(day, t, slot)))
col += 1
def write_plenary(row, col, description, track_count, worksheet):
#worksheet.merge_range(row, col, row, col+track_count, description)
worksheet_merge_wrapper(worksheet, row, col, row, (col+track_count-1), description)
def write_slots_and_content(sched, day, worksheet):
row = 1
for slot in sched.orderd_slot_keys(day):
col = 0
worksheet_write_wrapper(worksheet, row, col, sched.slots(day)[slot].start_time.strftime("%H:%M"))
col += 1
worksheet_write_wrapper(worksheet, row, col, sched.slots(day)[slot].end_time.strftime("%H:%M"))
col += 1
if sched.slots(day)[slot].slot_type == "Tracks":
write_tracks(row, col, day, slot, sched, worksheet)
else:
write_plenary(row, col,
schedule.talkTitle(sched.get_assignment(day, "Plenary", slot)),
len(sched.tracks(day)),
worksheet)
row += 1
def write_days(sched, workbook):
for day in sched.day_names():
worksheet = workbook.add_worksheet(name=day)
write_title_row(sched, day, worksheet)
write_slots_and_content(sched, day, worksheet)
def schedule_to_excel(sched):
fullname, url = exportexcel.mk_filename("Schedule", datetime.datetime.now())
with cloudstorage.open(fullname, "w",
content_type="text/plain; charset=utf-8",
options={'x-goog-acl': 'public-read'}) as output:
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
write_days(sched, workbook)
workbook.close()
output.close()
return url
| StarcoderdataPython |
3567840 | import os
import shutil
import sys
sys.stdout = open("deletion_result.txt", "a")
for name in range(1500):
directory_list = next(os.walk('simulation_data/' + str(name)))[1]
directory_no = len(directory_list)
if (directory_no != 47) and (directory_no != 44):
print(name)
shutil.rmtree('simulation_data/' + str(name))
else:
directory_list.remove('VTK')
for something in directory_list:
shutil.rmtree('simulation_data/' + str(name) + '/' + something)
sys.stdout.close() | StarcoderdataPython |
8062781 | from typing import Any
class ExtraError(Exception):
"""Base exception for all extra-related erorrs."""
def __init__(self, extra_name: str, *args: Any) -> None:
super().__init__(*args)
self._extra_name = extra_name
class ExtraImportError(ExtraError, ImportError):
"""The extra depends on a component that isn't available."""
def __init__(self, extra_name: str) -> None:
msg = (
f'The "{extra_name}" extra depends on a component that isn\'t available. '
f'Did you forget to specify the "{extra_name}" extra during install? '
f'Try again with, e.g., "poetry install --extras {extra_name}"'
)
super().__init__(extra_name, msg)
| StarcoderdataPython |
3281073 | import requests
'''
responce = requests.get('http://127.0.0.1:5000/status')
print(responce.status_code)
print(responce.text)
print(responce.json())
message = { 'username': 'Ju', 'text': 'Hello'}
responce = requests.post('http://127.0.0.1:5000/send', json = message)
print(responce.status_code)
print(responce.text)
print(responce.json())
'''
def send_message(username, password, text):
message = {'username': username, 'password': password, 'text': text}
responce = requests.post('http://127.0.0.1:5000/send', json = message)
return responce.status_code == 200
username = input('Enter your Name: ')
password = input('Enter password: ')
while True:
text = input()
result = send_message(username, password, text)
if result is False:
print('Error')
| StarcoderdataPython |
3350288 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-19 17:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('armery', '0003_armor_others'),
('heroquest', '0002_auto_20160819_1747'),
]
operations = [
migrations.AddField(
model_name='player',
name='armor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='armor', to='armery.Armor', verbose_name='Armaduras'),
),
migrations.AddField(
model_name='player',
name='others',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='others', to='armery.Others', verbose_name='Otros'),
),
]
| StarcoderdataPython |
4988815 | """
fujitsu
Supports Fujitsu compiler function.
This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
"""
from numpy.distutils.fcompiler import FCompiler
compilers = ['FujitsuFCompiler']
class FujitsuFCompiler(FCompiler):
compiler_type = 'fujitsu'
description = 'Fujitsu Fortran Compiler'
possible_executables = ['frt']
version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
# $ frt --version
# frt (FRT) x.x.x yyyymmdd
executables = {
'version_cmd' : ["<F77>", "--version"],
'compiler_f77' : ["frt", "-Fixed"],
'compiler_fix' : ["frt", "-Fixed"],
'compiler_f90' : ["frt"],
'linker_so' : ["frt", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-KPIC']
module_dir_switch = '-M'
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
def runtime_library_dir_option(self, dir):
return f'-Wl,-rpath={dir}'
def get_libraries(self):
return ['fj90f', 'fj90i', 'fjsrcinfo']
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('fujitsu').get_version())
| StarcoderdataPython |
1923676 | <reponame>mehrdad-shokri/retdec-regression-tests-framework
"""
Tests for the
:module`regression_tests.parsers.c_parser.exprs.unary_ops.pre_increment_op`
module.
"""
from tests.parsers.c_parser import WithModuleTests
class PreIncrementOpExprTests(WithModuleTests):
"""Tests for `PreIncrementOpExpr`."""
def test_pre_increment_op_expr_is_pre_increment_op(self):
pre_increment_op_expr = self.get_expr('++a', 'int')
self.assertTrue(pre_increment_op_expr.is_pre_increment_op())
def test_pre_increment_op_expr_is_no_other_expr(self):
pre_increment_op_expr = self.get_expr('++a', 'int')
self.assertFalse(pre_increment_op_expr.is_eq_op())
self.assertFalse(pre_increment_op_expr.is_neq_op())
self.assertFalse(pre_increment_op_expr.is_gt_op())
self.assertFalse(pre_increment_op_expr.is_gt_eq_op())
self.assertFalse(pre_increment_op_expr.is_lt_op())
self.assertFalse(pre_increment_op_expr.is_lt_eq_op())
self.assertFalse(pre_increment_op_expr.is_add_op())
self.assertFalse(pre_increment_op_expr.is_sub_op())
self.assertFalse(pre_increment_op_expr.is_mul_op())
self.assertFalse(pre_increment_op_expr.is_mod_op())
self.assertFalse(pre_increment_op_expr.is_div_op())
self.assertFalse(pre_increment_op_expr.is_and_op())
self.assertFalse(pre_increment_op_expr.is_or_op())
self.assertFalse(pre_increment_op_expr.is_bit_and_op())
self.assertFalse(pre_increment_op_expr.is_bit_or_op())
self.assertFalse(pre_increment_op_expr.is_bit_xor_op())
self.assertFalse(pre_increment_op_expr.is_bit_shl_op())
self.assertFalse(pre_increment_op_expr.is_bit_shr_op())
self.assertFalse(pre_increment_op_expr.is_not_op())
self.assertFalse(pre_increment_op_expr.is_neg_op())
self.assertFalse(pre_increment_op_expr.is_assign_op())
self.assertFalse(pre_increment_op_expr.is_address_op())
self.assertFalse(pre_increment_op_expr.is_deref_op())
self.assertFalse(pre_increment_op_expr.is_array_index_op())
self.assertFalse(pre_increment_op_expr.is_comma_op())
self.assertFalse(pre_increment_op_expr.is_ternary_op())
self.assertFalse(pre_increment_op_expr.is_call())
self.assertFalse(pre_increment_op_expr.is_cast())
self.assertFalse(pre_increment_op_expr.is_post_increment_op())
self.assertFalse(pre_increment_op_expr.is_pre_decrement_op())
self.assertFalse(pre_increment_op_expr.is_post_decrement_op())
self.assertFalse(pre_increment_op_expr.is_compound_assign_op())
self.assertFalse(pre_increment_op_expr.is_struct_ref_op())
self.assertFalse(pre_increment_op_expr.is_struct_deref_op())
def test_repr_returns_correct_repr(self):
add_op_expr = self.get_expr('++a', 'int')
self.assertEqual(repr(add_op_expr), '<PreIncrementOpExpr op=a>')
def test_str_returns_correct_str(self):
add_op_expr = self.get_expr('++a', 'int')
self.assertEqual(str(add_op_expr), '++a')
| StarcoderdataPython |
1902544 | <reponame>Danielhiversen/aiotractive
class DataObject:
def __init__(self, api, data):
self._api = api
self.id = data["_id"]
self.type = data["_type"]
self.version = data["_version"]
def __repr__(self):
return f"<{self.__class__.__name__} id={self.id} type={self.type} version={self.version}>"
| StarcoderdataPython |
6633798 | <gh_stars>0
# Alternate scraping of https://getmyvaccine.org/zips/99301
import requests
import json
from ScraperBase import ScraperBase
from Common import Status, SaveHtmlToTable
import logging
from typing import List
class RiteAidAlternate(ScraperBase):
def __init__(self):
self.URL = "https://www.riteaid.com/pharmacy/covid-qualifier"
self.API_URL = "https://vaccine-zip.herokuapp.com/api/zips?zip=99301"
self.FailureCase = ""
self.Keys = None # Set before saving to table
self.LocationName = "Rite Aid"
self.store_numbers = []
for line in open('RiteAidStoreNumbers.csv'):
self.store_numbers.append(line.strip('\n'))
def MakeGetRequest(self) -> None:
"""
This function does NOT save anything to the table. It checks statuses, and calls
self.SaveToTable multiple times for each key & status. It's a little bit hacky because
there's no easy way to do multiple keys & statuses yet.
"""
resp = requests.get(self.API_URL)
d = json.loads(resp.text)
available_store_numbers = []
for location in d['availability']['rite_aid']['data']:
available_store_numbers.append(location['attributes']['store_number'])
for store_number in self.store_numbers:
self.Keys = [f'riteaid_{store_number}']
status = Status.YES if store_number in available_store_numbers else Status.NO
self.SaveToTable(status, resp.text)
if status == Status.YES:
print(f'Rite Aid #{store_number} is scheduling')
else:
print(f'Rite Aid #{store_number} is not scheduling')
@SaveHtmlToTable
def SaveToTable(self, status, html):
"""This function actually saves to the table, is called for each location"""
return self.Keys, status, html
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
RiteAidAlternate().MakeGetRequest()
| StarcoderdataPython |
3384015 | <gh_stars>1-10
import random
from tqdm import tqdm
import gym
import gym_numberworld
env = gym.make('numberworld-v0',
grid_size=10, # pass environment arguments to gym.make
n_objects=10,
removed_objects=[('red', '3')]) # red 3 will not appear in environment
env.seed(1) # seed environment
n_episodes = 100_000
successful_episodes = 0
for episode in tqdm(range(n_episodes)):
# environment must be reset at the start of every episode
(observation, instruction) = env.reset()
done = False
while not done:
# pick random action
action = random.randint(0, 3)
# perform action
(observation, instruction), reward, done, info = env.step(action)
if done:
if reward == env.positive_reward:
successful_episodes += 1
success_percent = successful_episodes/n_episodes
print(f'Successfully found the correct object {success_percent*100:.3}% of the time')
| StarcoderdataPython |
6645425 | #!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class TestGoodFeaturesToTrack_test(NewOpenCVTests):
def test_goodFeaturesToTrack(self):
arr = self.get_sample('samples/data/lena.jpg', 0)
original = arr.copy(True)
threshes = [ x / 100. for x in range(1,10) ]
numPoints = 20000
results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assertTrue(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(1):
results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
for t in threshes:
self.assertTrue(len(results2[t]) == len(results[t]))
for i in range(len(results[t])):
self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0)
for t0,t1 in zip(threshes, threshes[1:]):
r0 = results[t0]
r1 = results[t1]
# Increasing thresh should make result list shorter
self.assertTrue(len(r0) > len(r1))
# Increasing thresh should monly truncate result list
for i in range(len(r1)):
self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| StarcoderdataPython |
1828198 | <filename>src/Translations.py
from constants import FONT_BIG, FONT_SMALL
TRANSLATION_ID = {'en': 0, 'zh': 1}
TR_FONTS = (
( # en
FONT_BIG, FONT_SMALL),
( # cht
('Microsoft JhengHei', '12'), ('Segoe UI', '9')))
TRANSLATIONS = (
( # en
'Kitava-Touched', 'Innocence-Touched', 'Shakari-Touched', 'Abberath-Touched',
'Tukohama-Touched', 'Brine King-Touched', 'Arakaali-Touched', 'Solaris-Touched',
'Lunaris-Touched', 'Effigy', 'Empowered Elements', 'Crystal-Skinned', 'Invulnerable',
'Corrupter', 'Mana Siphoner', 'Storm Strider', 'Mirror Image', 'Magma Barrier',
'Evocationist', 'Corpse Detonator', 'Flame Strider', 'Soul Eater', 'Ice Prison',
'Frost Strider', 'Treant Horde', 'Temporal Bubble', 'Entangler', 'Drought Bringer', 'Hexer',
'Executioner', 'Rejuvenating', 'Necromancer', 'Trickster', 'Assassin', 'Empowered Minions',
'Heralding Minions', 'Arcane Buffer', 'Berserker', 'Bloodletter', 'Bombardier',
'Bonebreaker', 'Chaosweaver', 'Consecrator', 'Deadeye', 'Dynamo', 'Echoist', 'Flameweaver',
'Frenzied', 'Frostweaver', 'Gargantuan', 'Hasted', 'Incendiary', 'Juggernaut',
'Malediction', 'Opulent', 'Overcharged', 'Permafrost', 'Sentinel', 'Soul Conduit',
'Steel-Infused', 'Stormweaver', 'Toxic', 'Vampiric', 'Trash'),
( # cht
'奇塔弗之觸', '善之觸', '夏卡莉之觸', '艾貝拉斯之觸', '圖克哈瑪之觸', '海洋王之觸', '艾爾卡莉之觸', '日神之觸', '月神之觸', '雕像',
'強化元素', '晶瑩剔透', '刀槍不入', '腐化者', '魔靈吸取', '風行者', '鏡像幻影', '熔岩屏障', '招魂師', '陰屍爆破', '炎行者', '嗜魂者',
'冰牢', '霜行者', '樹人部落', '短暫幻想', '尾隨魔', '乾旱先鋒', '咒術師', '劊子手', '振興', '死靈師', '詐欺師', '刺客', '增幅召喚物',
'先鋒召喚物', '奧術緩衝', '狂戰士', '放血者', '投彈手', '裂骨者', '混沌編織', '奉獻使徒', '銳眼', '發電機', '回聲者', '烈焰編織',
'喪心病狂', '冰霜編織', '龐然大物', '急速', '縱火', '勇士', '憎惡', '豐饒', '超負荷', '永凍土', '哨兵', '魂靈牽引', '鑄鋼',
'風暴編織', '毒素', '吸血魔', '垃圾'))
class Translation:
def __init__(self, id='en') -> None:
self._tr_idx = TRANSLATION_ID.get(id, 0)
self._tr_tbl = None if self._tr_idx == 0 else dict(
zip(TRANSLATIONS[0], TRANSLATIONS[self._tr_idx]))
def get_text(self, text) -> str:
if self._tr_tbl is None:
return text
elif text[0] == 'x':
_splitted = text.split(' ', 1)
_splitted[1] = self._tr_tbl.get(_splitted[1], 'N/A')
tr_str = ' '.join(_splitted)
else:
tr_str = self._tr_tbl.get(text, 'N/A')
return tr_str
def get_font_big(self):
return TR_FONTS[self._tr_idx][0]
def get_font_small(self):
return TR_FONTS[self._tr_idx][1]
| StarcoderdataPython |
9632962 | <filename>moar/_compat.py
# coding=utf-8
"""
Utilities for writing code that runs on Python 2 and 3.
"""
import sys
try:
from urllib.parse import urlparse
from urllib.parse import quote as url_quote
from urllib.request import urlopen
except ImportError:
import urlparse
from urllib import quote as url_quote
from urllib import urlopen
PY2 = sys.version_info[0] == 2
string_types = (basestring, ) if PY2 else (str, )
| StarcoderdataPython |
3555297 | <gh_stars>1-10
"""
footing.ls
~~~~~~~~~~
Lists all footing templates and projects spun up with those templates
"""
import footing.forge
@footing.utils.set_cmd_env_var('ls')
def ls(forge, template=None):
"""Lists all templates under a root path or list all projects spun up under
a root path and a template path.
The ``root`` path must be either a Github organization/user (e.g. github.com/organization)
or a Gitlab group (e.g. gitlab.com/my/group).
Note that the `footing.constants.FOOTING_ENV_VAR` is set to 'ls' for the duration of this
function.
Args:
root (str): A root git storage path. For example, a Github organization
(github.com/Organization) or a gitlab group (gitlab.com/my/group).
template (str, default=None): An optional template path. If provided, the
returned values are projects under ``root`` created using the template.
Returns:
dict: A dictionary of repository information keyed on the url.
Raises:
`InvalidForgeError`: When ``forge`` is invalid
"""
client = footing.forge.from_path(forge)
return client.ls(forge, template)
| StarcoderdataPython |
9602041 | from ssr.surface_rec.tasks.task import Task
class ReductionTask(Task):
def __init__(self, colmap_idp, mesh_odp):
self.__dict__.update(locals())
# delete self, since it is redundant and a circular reference
del self.self
| StarcoderdataPython |
9797414 | from __future__ import absolute_import
import unittest
from main.maincontroller import MainController
from tests.xroad_parse_users_inputs import xroad_parse_user_inputs
class XroadDisableWsdlInputs(unittest.TestCase):
"""
UC SERVICE_11 (UC SERVICE 13/4) Parse User Input (WSDL URL)
RIA URL: https://jira.ria.ee/browse/XT-270, https://jira.ria.ee/browse/XTKB-54
Depends on finishing other test(s): None
Requires helper scenarios: None
X-Road version: 6.16.0
"""
def __init__(self, methodName='test_parse_user_input_SS_41'):
unittest.TestCase.__init__(self, methodName)
def test_parse_user_input_SS_41(self):
main = MainController(self)
'''Set test name and number'''
main.test_number = 'SERVICE_11'
main.test_name = self.__class__.__name__
main.log('TEST: SERVICE 13/4 PARSE DISABLE WSDL INPUTS')
main.url = main.config.get('ss2.host')
main.username = main.config.get('ss2.user')
main.password = main.config.get('<PASSWORD>')
try:
'''Open webdriver'''
main.reset_webdriver(main.url, main.username, main.password)
'''Run the test'''
test_func = xroad_parse_user_inputs.test_disable_wsdl_inputs()
test_func(main)
except:
main.log('XroadDisableWsdlInputs: Failed to to parse user inputs')
main.save_exception_data()
assert False
finally:
'''Test teardown'''
main.tearDown()
| StarcoderdataPython |
1754929 | <gh_stars>0
import typing
from mixin import mixin
from .codegen import Codegen
from .expression import ExpressionMixin
class Let(Codegen):
def __init__(self, name: str, value: ExpressionMixin):
self._name: str = name
self._expr: ExpressionMixin = value
| StarcoderdataPython |
3456106 | class UserError(Exception):
def __init__(self,message):
self.message = message
class UserNotExistError(UserError):
pass
class UserAreadyRegisteredError(UserError):
pass
class InvalideEmailError(UserError):
pass
class IncorrectPasswordError(UserError):
pass
class IncorrectDatabaseStatment(UserError):
pass | StarcoderdataPython |
4922839 | <gh_stars>0
# long_repeat
# Created by JKChang
# 18/04/2018, 10:53
# Tag:
# Description: This mission is the first one of the series. Here you should find the length of the longest substring
# that consists of the same letter. For example, line "aaabbcaaaa" contains four substrings with the same letters "aaa",
# "bb","c" and "aaaa". The last substring is the longest one which makes it an answer.
# Input: String.
# Output: Int.
def long_repeat(line):
"""
length the longest substring that consists of the same char
"""
count = ['', 0]
maxCount = 0
for i in list(line):
if i == count[0]:
count[1] += 1
else:
count[0] = i
count[1] = 1
if count[1] > maxCount:
maxCount = count[1]
return maxCount
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert long_repeat('sdsffffse') == 4, "First"
assert long_repeat('ddvvrwwwrggg') == 3, "Second"
assert long_repeat('abababaab') == 2, "Third"
assert long_repeat('') == 0, "Empty"
print('"Run" is good. How is "Check"?')
| StarcoderdataPython |
6513387 | <reponame>jacobmas/btrdb-python<gh_stars>0
import btrdb
from btrdb.conn import BTrDB
from functools import partial
def register_serializer(conn_str=None, apikey=None, profile=None):
"""
Register serializer for BTrDB Object
Parameters
----------
conn_str: str, default=None
The address and port of the cluster to connect to, e.g. `192.168.1.1:4411`.
If set to None, will look in the environment variable `$BTRDB_ENDPOINTS`
(recommended).
apikey: str, default=None
The API key used to authenticate requests (optional). If None, the key
is looked up from the environment variable `$BTRDB_API_KEY`.
profile: str, default=None
The name of a profile containing the required connection information as
found in the user's predictive grid credentials file
`~/.predictivegrid/credentials.yaml`.
"""
try:
import ray
except ImportError:
raise ImportError("must pip install ray to register custom serializer")
try:
import semver
except ImportError:
raise ImportError("must pip install semver to register custom serializer")
assert ray.is_initialized(), "Need to call ray.init() before registering custom serializer"
# TODO: check the version using the 'semver' package?
ver = semver.VersionInfo.parse(ray.__version__)
if ver.major == 0:
ray.register_custom_serializer(
BTrDB, serializer=btrdb_serializer, deserializer=partial(btrdb_deserializer, conn_str=conn_str, apikey=apikey, profile=profile))
elif ver.major == 1 and ver.minor in range(2, 4):
# TODO: check different versions of ray?
ray.util.register_serializer(
BTrDB, serializer=btrdb_serializer, deserializer=partial(btrdb_deserializer, conn_str=conn_str, apikey=apikey, profile=profile))
else:
raise Exception("Ray version %s does not have custom serialization. Please upgrade to >= 1.2.0" % ray.__version__)
def btrdb_serializer(_):
"""
sererialize function
"""
return None
def btrdb_deserializer(_, conn_str=None, apikey=None, profile=None):
"""
deserialize function
Parameters
----------
conn_str: str, default=None
The address and port of the cluster to connect to, e.g. `192.168.1.1:4411`.
If set to None, will look in the environment variable `$BTRDB_ENDPOINTS`
(recommended).
apikey: str, default=None
The API key used to authenticate requests (optional). If None, the key
is looked up from the environment variable `$BTRDB_API_KEY`.
profile: str, default=None
The name of a profile containing the required connection information as
found in the user's predictive grid credentials file
`~/.predictivegrid/credentials.yaml`.
Returns
-------
db : BTrDB
An instance of the BTrDB context to directly interact with the database.
"""
return btrdb.connect(conn_str=conn_str, apikey=apikey, profile=profile)
| StarcoderdataPython |
1753312 | <filename>lib/surface/storage/ls.py
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list Cloud Storage resources."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import cloud_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.storage import encryption_util
from googlecloudsdk.command_lib.storage import errors
from googlecloudsdk.command_lib.storage import flags
from googlecloudsdk.command_lib.storage import storage_url
from googlecloudsdk.command_lib.storage.tasks import task_executor
from googlecloudsdk.command_lib.storage.tasks.ls import cloud_list_task
class Ls(base.Command):
"""List Cloud Storage buckets and objects."""
# pylint:disable=g-backslash-continuation
detailed_help = {
'DESCRIPTION': """\
List your Cloud Storage buckets in a project and objects in a bucket.
This command treats forward slashes in object names as directories. See
below for examples of how to use wildcards to get the listing behavior
you want.
""",
'EXAMPLES': """\
The following command lists the buckets in the default project:
$ {command}
The following command lists the buckets in the specified project:
$ {command} --project=my-project
The following command lists the contents of a bucket:
$ {command} gs://my-bucket
You can use wildcards to match multiple paths (including multiple
buckets). Bucket wildcards are expanded to match only buckets contained in
your current project. The following command matches ``.txt'' objects that
begin with ``log'' and that are stored in buckets in your project that
begin with ``my-b'':
$ {command} gs://my-b*/log*.txt
The following wildcards are valid and match only within the current
directory:
*: Matches zero or more characters
?: Matches zero or one characters
[]: Matches a character range (ex. [a-z] or [0-9])
You can use double-star wildcards to match zero or more directory levels
in a path. The following command matches all ``.txt'' objects in a bucket.
$ {command} gs://my-bucket/**/*.txt
Double-star expansion can not be combined with other expressions in a
given path segment and operates as a single star in that context. For
example:
gs://my-bucket/dir**/log.txt is treated as:
gs://my-bucket/dir*/log.txt and instead should be written as:
gs://my-bucket/dir*/**/log.txt to get the recursive behavior.
The following command lists all items recursively with formatting by
using `--recursive`:
$ {command} --recursive gs://bucket
Recursive listings are similar to ``**'' except recursive listings include
line breaks and header formatting for each subdirectory.
"""
}
# pylint:enable=g-backslash-continuation
@staticmethod
def Args(parser):
"""Edit argparse.ArgumentParser for the command."""
parser.add_argument(
'path',
nargs='*',
help='The path of objects and directories to list. The path must begin'
' with gs:// and is allowed to contain wildcard characters.')
parser.add_argument(
'-a', '--all-versions',
action='store_true',
help='Include non-current object versions in the listing. This flag is'
' typically only useful for buckets with'
' [object versioning](https://cloud.google.com/storage/docs/object-versioning)'
' enabled. If combined with the `--long` option, the metageneration'
' for each listed object is also included.'
)
parser.add_argument(
'-b',
'--buckets',
action='store_true',
help='When given a bucket URL, only return buckets. Useful for'
' avoiding the rule that prints the top-level objects of buckets'
' matching a query. Typically used in combination with `--full` to get'
' the full metadata of buckets.')
parser.add_argument(
'-e',
'--etag',
action='store_true',
help='Include ETag metadata in listings that use the `--long` flag.')
parser.add_argument(
'--readable-sizes',
action='store_true',
help='When used with `--long`, print object sizes in human'
' readable format, such as 1 KiB, 234 MiB, or 2 GiB.')
parser.add_argument(
'-R',
'-r',
'--recursive',
action='store_true',
help='Recursively list the contents of any directories that match the'
' path expression.')
output_styles = parser.add_group(mutex='True')
output_styles.add_argument(
'-l',
'--long',
action='store_true',
help='For objects only. List size in bytes, creation time, and URL.'
' Note: Creation time not available for S3.')
output_styles.add_argument(
'-L',
'--full',
action='store_true',
help='List all available metadata about items in rows.')
output_styles.add_argument(
'-j',
'--json',
action='store_true',
help='List all available metadata about items as a JSON dump.')
flags.add_encryption_flags(parser)
def Run(self, args):
"""Command execution logic."""
encryption_util.initialize_key_store(args)
if args.path:
storage_urls = [storage_url.storage_url_from_string(path)
for path in args.path]
for url in storage_urls:
if not isinstance(url, storage_url.CloudUrl):
raise errors.InvalidUrlError('Ls only works for cloud URLs.'
' Error for: {}'.format(url.url_string))
else:
storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]
if args.full:
display_detail = cloud_list_task.DisplayDetail.FULL
elif args.json:
display_detail = cloud_list_task.DisplayDetail.JSON
elif args.long:
display_detail = cloud_list_task.DisplayDetail.LONG
else:
display_detail = cloud_list_task.DisplayDetail.SHORT
tasks = []
for url in storage_urls:
tasks.append(
cloud_list_task.CloudListTask(
url,
all_versions=args.all_versions,
buckets_flag=args.buckets,
display_detail=display_detail,
include_etag=args.etag,
readable_sizes=args.readable_sizes,
recursion_flag=args.recursive))
task_executor.execute_tasks(tasks, parallelizable=False)
| StarcoderdataPython |
4891470 | <filename>src/LianjiaSpider/spiders/LianjiaSpider.py
# -*- coding=utf-8 -*-
import scrapy
from ..items import LianjiaspiderItem
class LianjiaSpider(scrapy.spiders.Spider):
name = 'LianjiaSpider' # 爬虫名称
start_urls = [] # 存储爬虫URL
for i in range(1,101):
start_urls.append('https://bj.lianjia.com/zufang/pg'+str(i)) # 存储链家网北京租房信息前100页的URL
allowed_domains = ["linajia.com"]
def parse(self, response):
item = LianjiaspiderItem() # 定义item类的对象用于储存数据
title_list = response.xpath("//li[@data-el='zufang']/div[2]/h2/a/text()").extract() # 获取所有获取租房标题
location_list = response.xpath("//li[@data-el='zufang']/div[2]/div[1]/div[1]/a/span/text()").extract() # 获取所有房子地理位置
zone_list = response.xpath("//li[@data-el='zufang']/div[2]/div[1]/div[1]/span[1]/span/text()").extract() # 获取所有房子厅室信息
meters_list = response.xpath("//li[@data-el='zufang']/div[2]/div[1]/div[1]/span[2]/text()").extract() # 获取所有房子的平米
direction_list = response.xpath("//li[@data-el='zufang']/div[2]/div[1]/div[1]/span[3]/text()").extract() # 获取所有房子朝向
money_list = response.xpath("//li[@data-el='zufang']/div[2]/div[2]/div[1]/span/text()").extract() # 获取所有房子的月租
for i, j, k, l, m, n in zip(title_list, location_list, zone_list, meters_list, direction_list, money_list): # 将每种信息组合在一起,并传给一个item
item['title'] = i
item['location'] = j
item['zone'] = k
item['meters'] = l
item['direction'] = m
item['money'] = n
yield item
| StarcoderdataPython |
9624918 | <reponame>maw41/FRIBs_Voting
import struct
import socket
import ssl
import sys
import thread
import random
import copy #TEMP
import time
from Queue import Queue
from time import sleep
class Server:
def loadResultLut(path):
return 0
class RemoteServer(Server):
def __init__(self, ip=None, seed=None, rport=None, lport=None, sport=None):
self.ip = ip
self.rport = rport
self.lport = lport if lport else rport
self.scheduler_port = sport
self.random = random.Random()
self.random.seed(seed)
def obfuscate(self, state):
return self.olut[state]
def send(self, b):
self.ssl_sock.sendall(b)
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.ssl_sock = ssl.wrap_socket(self.sock,cert_reqs=ssl.CERT_REQUIRED, ca_certs='server_cert.pem')
try:
self.ssl_sock.connect((self.ip, self.rport))
print 'Connected'
return True
except:
return False
def stop(self):
return self.close()
def close(self):
return self.ssl_sock.close()
def loadLut(self, path):
self.olut = []
f = open(path, 'rb')
while True:
b = f.read(1)
if not b:
break
self.olut.append(b)
f.close()
class LocalServer(Server):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
self.qosb = Queue()
self.qosc = Queue()
self.voteaddQ = Queue()
self.randomQ = Queue()
self.conn_sockets = []
self.tally_window = []
self.votes_processed = 0
self.states_reached = []
self.states_trans = []
def stop(self):
self.ssl_bsock.close()
self.ssl_csock.close()
self.bsock.close()
self.csock.close()
for s in self.conn_sockets:
try:
s.shutdown(socket.SHUT_RDWR)
except:
pass
s.close()
def initiateTally(self, value, length):
assert(length % 3 == 0)
self.tally_length = length
self.tally = value
def listeningB(self):
self.ssl_bsock.listen(10)
conn, addr = self.ssl_bsock.accept()
self.conn_sockets.append(conn)
print 'Listen Connected'
while True:
b1 = conn.recv(1)
if len(b1) == 0:
break
ri = struct.unpack("<B", b1)[0]
osb = self.qosb.get()
osbf = 0
for i in range(4):
row_bytes = []
for j in range(32):
row_bytes.append(j)
self.b.random.shuffle(row_bytes)
osb_new = row_bytes.index(osb)
for j in range(32):
tmp_r = self.b.random.randint(0,31)
if i == ri and j == osb_new:
osbf = tmp_r
osbi = osb_new
ran = random.randint(0,31)
self.randomQ.put(ran)
osbf = osbf ^ ran
conn.sendall(struct.pack("<B", osbi) + struct.pack("<B", osbf))
self.ssl_bsock.close()
self.bsock.close()
conn.close()
thread.exit()
def listeningC(self):
self.ssl_csock.listen(10)
conn, addr = self.ssl_csock.accept()
self.conn_sockets.append(conn)
print 'Listen Connected'
while True:
b4 = conn.recv(4)
if len(b4) == 0:
break
osc = self.qosc.get() * 32
rbs = ''
for i in range(4):
i = (struct.unpack("<B",b4[i])[0] * 32 * 32) + osc
row_bytes = self.result_lut[i:i+32]
self.c.random.shuffle(row_bytes)
for rb in row_bytes:
tmp_r = self.c.random.randint(0,31)
rbs += struct.pack("<B", struct.unpack("<B", rb)[0] ^ tmp_r)
conn.sendall(rbs)
self.ssl_csock.close()
self.csock.close()
conn.close()
thread.exit()
def listen(self):
self.bsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.bsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.bsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.csock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bsock.bind(('0.0.0.0', self.b.lport))
self.csock.bind(('0.0.0.0', self.c.lport))
self.ssl_bsock = ssl.wrap_socket(self.bsock, keyfile='server_key.pem', certfile='server_cert.pem', server_side=True)
self.ssl_csock = ssl.wrap_socket(self.csock, keyfile='server_key.pem', certfile='server_cert.pem', server_side=True)
thread.start_new_thread(self.listeningB, ())
thread.start_new_thread(self.listeningC, ())
thread.start_new_thread(self.addVote, ())
def loadResultLut(self, path):
self.result_lut = []
f = open(path, 'rb')
while True:
b = f.read(1)
if not b:
break
self.result_lut.append(b)
f.close()
def parreduces(self, pstates):
pvis = []
for states in pstates:
vis = []
sendb = ""
sendc = ""
for state in states:
osa = self.a.obfuscate(state)
self.qosb.put(struct.unpack("<B", self.b.obfuscate(state))[0])
self.qosc.put(struct.unpack("<B", self.c.obfuscate(state))[0])
vec = [osa]
i = 0
while i < 3:
r = struct.pack("<B", random.randint(0, 31))
if r not in vec:
vec.append(r)
i += 1
random.shuffle(vec)
vi = vec.index(osa)
vis.append(vi)
sendb += vec[0]+vec[1]+vec[2]+vec[3]
sendc += struct.pack("<B", vi)
self.b.ssl_sock.sendall(sendb)
self.c.ssl_sock.sendall(sendc)
pvis.append(vis)
presults = []
for pi in range(len(pstates)):
results = []
for si in range(len(pstates[pi])):
small_lut = []
rows = self.b.ssl_sock.recv(128)
row = rows[32 * vis[si]: 32 * vis[si] + 32]
for by in row:
small_lut.append(by)
index = struct.unpack("<B", self.c.ssl_sock.recv(1))[0]
flip = struct.unpack("<B", self.c.ssl_sock.recv(1))[0]
ran = self.randomQ.get()
results.append(struct.unpack("<B", small_lut[index])[0] ^ flip ^ ran)
presults.append(results)
return presults
def parreduce(self, states):
vis = []
sendb = ""
sendc = ""
for state in states:
osa = self.a.obfuscate(state)
self.qosb.put(struct.unpack("<B", self.b.obfuscate(state))[0])
self.qosc.put(struct.unpack("<B", self.c.obfuscate(state))[0])
vec = [osa]
i = 0
while i < 3:
r = struct.pack("<B", random.randint(0, 31))
if r not in vec:
vec.append(r)
i += 1
random.shuffle(vec)
vi = vec.index(osa)
vis.append(vi)
sendb += vec[0]+vec[1]+vec[2]+vec[3]
sendc += struct.pack("<B", vi)
self.b.ssl_sock.sendall(sendb)
self.c.ssl_sock.sendall(sendc)
results = []
for si in range(len(states)):
small_lut = []
rows = self.b.ssl_sock.recv(128)
row = rows[32 * vis[si]: 32 * vis[si] + 32]
for by in row:
small_lut.append(by)
index = struct.unpack("<B", self.c.ssl_sock.recv(1))[0]
flip = struct.unpack("<B", self.c.ssl_sock.recv(1))[0]
ran = self.randomQ.get()
results.append(struct.unpack("<B", small_lut[index])[0] ^ flip ^ ran)
return results
def addVote(self):
# Testing Vars
prev_rstates = []
for i in range(32):
self.states_reached.append(0)
self.states_trans.append(0)
carry_window = []
vote_window_len = self.tally_length / 4
tand = 15
for i in range(vote_window_len):
self.tally_window.append((self.tally & tand) >> (i * 4))
carry_window.append(None)
while True:
carry_window[0] = self.voteaddQ.get()
states = []
for i in range(vote_window_len):
carry = carry_window[i]
if carry == None:
continue
states.append((self.tally_window[i] << 1) + carry)
#rstates = self.parreduces([states, states, states])[0] # Example for multiple tallies
rstates = self.parreduce(states)
for s in rstates:
self.states_reached[s] += 1
for s in range(len(prev_rstates)):
if prev_rstates[s] == 1:
self.states_trans[rstates[s]] += 1
prev_rstates = rstates
rsi = 0
for i in range(vote_window_len):
if carry_window[i] == None:
continue
carry_window[i] = (rstates[rsi] & 16) >> 4
self.tally_window[i] = (rstates[rsi] & 15)
rsi += 1
for i in range(vote_window_len-1, 0, -1):
carry_window[i] = carry_window[i-1]
self.votes_processed += 1
return
def getTally(self):
return self.votes_processed, self.tally_window
def getStats(self):
return self.states_reached, self.states_trans
| StarcoderdataPython |
335536 | #!/router/bin/python-2.7.4
import trex_root_path
from client.trex_client import *
from common.trex_exceptions import *
import cmd
import termstyle
import os
from argparse import ArgumentParser
from pprint import pprint
import json
import time
import socket
import errno
class InteractiveTRexClient(cmd.Cmd):
intro = termstyle.green("\nInteractive shell to play with Cisco's TRex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
prompt = '> '
def __init__(self, trex_host, max_history_size = 100, trex_port = 8090, verbose_mode = False ):
cmd.Cmd.__init__(self)
self.verbose = verbose_mode
self.trex = CTRexClient(trex_host, max_history_size, trex_daemon_port = trex_port, verbose = verbose_mode)
self.DEFAULT_RUN_PARAMS = dict( m = 1.5,
nc = True,
p = True,
d = 100,
f = 'avl/sfr_delay_10_1g.yaml',
l = 1000)
self.run_params = dict(self.DEFAULT_RUN_PARAMS)
self.decoder = json.JSONDecoder()
def do_push_files (self, filepaths):
"""Pushes a custom file to be stored locally on TRex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
try:
filepaths = filepaths.split(' ')
print termstyle.green("*** Starting pushing files ({trex_files}) to TRex. ***".format (trex_files = ', '.join(filepaths)) )
ret_val = self.trex.push_files(filepaths)
if ret_val:
print termstyle.green("*** End of TRex push_files method (success) ***")
else:
print termstyle.magenta("*** End of TRex push_files method (failed) ***")
except IOError as inst:
print termstyle.magenta(inst)
def do_show_default_run_params(self,line):
"""Outputs the default TRex running parameters"""
pprint(self.DEFAULT_RUN_PARAMS)
print termstyle.green("*** End of default TRex running parameters ***")
def do_show_run_params(self,line):
"""Outputs the currently configured TRex running parameters"""
pprint(self.run_params)
print termstyle.green("*** End of TRex running parameters ***")
def do_update_run_params(self, json_str):
"""Updates provided parameters on TRex running configuration. Provide using JSON string"""
if json_str:
try:
upd_params = self.decoder.decode(json_str)
self.run_params.update(upd_params)
print termstyle.green("*** End of TRex parameters update ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal JSON string. Please try again.\n[", inst,"]")
else:
print termstyle.magenta("JSON configuration string is missing. Please try again.")
def do_show_status (self, line):
"""Prompts TRex current status"""
print self.trex.get_running_status()
print termstyle.green("*** End of TRex status prompt ***")
def do_show_trex_files_path (self, line):
"""Prompts the local path in which files are stored when pushed to trex server from client"""
print self.trex.get_trex_files_path()
print termstyle.green("*** End of trex_files_path prompt ***")
def do_show_reservation_status (self, line):
"""Prompts if TRex is currently reserved or not"""
if self.trex.is_reserved():
print "TRex is reserved"
else:
print "TRex is NOT reserved"
print termstyle.green("*** End of reservation status prompt ***")
def do_reserve_trex (self, user):
"""Reserves the usage of TRex to a certain user"""
try:
if not user:
ret = self.trex.reserve_trex()
else:
ret = self.trex.reserve_trex(user.split(' ')[0])
print termstyle.green("*** TRex reserved successfully ***")
except TRexException as inst:
print termstyle.red(inst)
def do_cancel_reservation (self, user):
"""Cancels a current reservation of TRex to a certain user"""
try:
if not user:
ret = self.trex.cancel_reservation()
else:
ret = self.trex.cancel_reservation(user.split(' ')[0])
print termstyle.green("*** TRex reservation canceled successfully ***")
except TRexException as inst:
print termstyle.red(inst)
def do_restore_run_default (self, line):
"""Restores original TRex running configuration"""
self.run_params = dict(self.DEFAULT_RUN_PARAMS)
print termstyle.green("*** End of restoring default run parameters ***")
def do_run_until_finish (self, sample_rate):
"""Starts TRex and sample server until run is done."""
print termstyle.green("*** Starting TRex run_until_finish scenario ***")
if not sample_rate: # use default sample rate if not passed
sample_rate = 5
try:
sample_rate = int(sample_rate)
ret = self.trex.start_trex(**self.run_params)
self.trex.sample_to_run_finish(sample_rate)
print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
print termstyle.red(inst)
def do_run_and_poll (self, sample_rate):
"""Starts TRex and sample server manually until run is done."""
print termstyle.green("*** Starting TRex run and manually poll scenario ***")
if not sample_rate: # use default sample rate if not passed
sample_rate = 5
try:
sample_rate = int(sample_rate)
ret = self.trex.start_trex(**self.run_params)
last_res = dict()
while self.trex.is_running(dump_out = last_res):
obj = self.trex.get_result_obj()
if (self.verbose):
print obj
# do WHATEVER here
time.sleep(sample_rate)
print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
print termstyle.red(inst)
def do_run_until_condition (self, sample_rate):
"""Starts TRex and sample server until condition is satisfied."""
print termstyle.green("*** Starting TRex run until condition is satisfied scenario ***")
def condition (result_obj):
return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000
if not sample_rate: # use default sample rate if not passed
sample_rate = 5
try:
sample_rate = int(sample_rate)
ret = self.trex.start_trex(**self.run_params)
ret_val = self.trex.sample_until_condition(condition, sample_rate)
print ret_val
print termstyle.green("*** End of TRex run ***")
except ValueError as inst:
print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
except TRexException as inst:
print termstyle.red(inst)
def do_start_and_return (self, line):
"""Start TRex run and once in 'Running' mode, return to cmd prompt"""
print termstyle.green("*** Starting TRex run, wait until in 'Running' state ***")
try:
ret = self.trex.start_trex(**self.run_params)
print termstyle.green("*** End of scenario (TRex is probably still running!) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_poll_once (self, line):
"""Performs a single poll of TRex current data dump (if TRex is running) and prompts and short version of latest result_obj"""
print termstyle.green("*** Trying TRex single poll ***")
try:
last_res = dict()
if self.trex.is_running(dump_out = last_res):
obj = self.trex.get_result_obj()
print obj
else:
print termstyle.magenta("TRex isn't currently running.")
print termstyle.green("*** End of scenario (TRex is posssibly still running!) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_stop_trex (self, line):
"""Try to stop TRex run (if TRex is currently running)"""
print termstyle.green("*** Starting TRex termination ***")
try:
ret = self.trex.stop_trex()
print termstyle.green("*** End of scenario (TRex is not running now) ***")
except TRexException as inst:
print termstyle.red(inst)
def do_kill_indiscriminately (self, line):
"""Force killing of running TRex process (if exists) on the server."""
print termstyle.green("*** Starting TRex termination ***")
ret = self.trex.force_kill()
if ret:
print termstyle.green("*** End of scenario (TRex is not running now) ***")
elif ret is None:
print termstyle.magenta("*** End of scenario (TRex termination aborted) ***")
else:
print termstyle.red("*** End of scenario (TRex termination failed) ***")
def do_exit(self, arg):
"""Quits the application"""
print termstyle.cyan('Bye Bye!')
return True
if __name__ == "__main__":
parser = ArgumentParser(description = termstyle.cyan('Run TRex client API demos and scenarios.'),
usage = """client_interactive_example [options]""" )
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
action="store", help="Specify the hostname or ip to connect with TRex server.",
metavar="HOST" )
parser.add_argument("-p", "--trex-port", type=int, default = 8090, metavar="PORT", dest="trex_port",
help="Select port on which the TRex server listens. Default port is 8090.", action="store")
parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
help="Specify maximum history size saved at client side. Default size is 100.", action="store")
parser.add_argument("--verbose", dest="verbose",
action="store_true", help="Switch ON verbose option at TRex client. Default is: OFF.",
default = False )
args = parser.parse_args()
try:
InteractiveTRexClient(args.trex_host, args.hist_size, args.trex_port, args.verbose).cmdloop()
except KeyboardInterrupt:
print termstyle.cyan('Bye Bye!')
exit(-1)
except socket.error, e:
if e.errno == errno.ECONNREFUSED:
raise socket.error(errno.ECONNREFUSED, "Connection from TRex server was terminated. Please make sure the server is up.")
| StarcoderdataPython |
3226089 | #!/usr/bin/env python
"""Tests for grr.server.grr_response_server.hunts.results."""
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib.rdfvalues import flows as rdf_flows
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server.hunts import results as hunts_results
from grr.test_lib import aff4_test_lib
from grr.test_lib import test_lib
class ResultTest(aff4_test_lib.AFF4ObjectTest):
def testEmptyQueue(self):
# Create and empty HuntResultCollection.
collection_urn = rdfvalue.RDFURN("aff4:/testEmptyQueue/collection")
hunts_results.HuntResultCollection(collection_urn)
# The queue starts empty, and returns no notifications.
results = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(None, results[0])
self.assertEqual([], results[1])
def testNotificationsContainTimestamps(self):
collection_urn = rdfvalue.RDFURN(
"aff4:/testNotificationsContainTimestamps/collection")
with data_store.DB.GetMutationPool() as pool:
for i in range(5):
hunts_results.HuntResultCollection.StaticAdd(
collection_urn,
rdf_flows.GrrMessage(request_id=i),
mutation_pool=pool)
# If we claim results, we should get all 5.
results = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(collection_urn, results[0])
self.assertEqual(5, len(results[1]))
# Read all the results, using the contained (ts, suffix) pairs.
values_read = []
collection = hunts_results.HuntResultCollection(collection_urn)
for message in collection.MultiResolve(
[r.value.ResultRecord() for r in results[1]]):
values_read.append(message.request_id)
self.assertEqual(sorted(values_read), range(5))
def testNotificationClaimsTimeout(self):
collection_urn = rdfvalue.RDFURN(
"aff4:/testNotificationClaimsTimeout/collection")
with data_store.DB.GetMutationPool() as pool:
for i in range(5):
hunts_results.HuntResultCollection.StaticAdd(
collection_urn,
rdf_flows.GrrMessage(request_id=i),
mutation_pool=pool)
results_1 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(5, len(results_1[1]))
# Check that we have a claim - that another read returns nothing.
results_2 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(0, len(results_2[1]))
# Push time forward past the default claim timeout, then we should be able
# to re-read (and re-claim).
with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
rdfvalue.Duration("45m")):
results_3 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(results_3, results_1)
def testDelete(self):
collection_urn = rdfvalue.RDFURN("aff4:/testDelete/collection")
with data_store.DB.GetMutationPool() as pool:
for i in range(5):
hunts_results.HuntResultCollection.StaticAdd(
collection_urn,
rdf_flows.GrrMessage(request_id=i),
mutation_pool=pool)
results_1 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(5, len(results_1[1]))
hunts_results.HuntResultQueue.DeleteNotifications(
results_1[1], token=self.token)
# Push time forward past the default claim timeout, then we should still
# read nothing.
with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
rdfvalue.Duration("45m")):
results_2 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(0, len(results_2[1]))
def testNotificationsSplitByCollection(self):
# Create two HuntResultCollections.
collection_urn_1 = rdfvalue.RDFURN(
"aff4:/testNotificationsSplitByCollection/collection_1")
collection_urn_2 = rdfvalue.RDFURN(
"aff4:/testNotificationsSplitByCollection/collection_2")
# Add 100 records to each collection, in an interleaved manner.
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
hunts_results.HuntResultCollection.StaticAdd(
collection_urn_1,
rdf_flows.GrrMessage(request_id=i),
mutation_pool=pool)
hunts_results.HuntResultCollection.StaticAdd(
collection_urn_2,
rdf_flows.GrrMessage(request_id=100 + i),
mutation_pool=pool)
# The first result was added to collection 1, so this should return
# all 100 results for collection 1.
results_1 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(collection_urn_1, results_1[0])
self.assertEqual(100, len(results_1[1]))
# The first call claimed all the notifications for collection 1. These are
# claimed, so another call should skip them and give all notifications for
# collection 2.
results_2 = hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
token=self.token)
self.assertEqual(collection_urn_2, results_2[0])
self.assertEqual(100, len(results_2[1]))
values_read = []
collection_2 = hunts_results.HuntResultCollection(collection_urn_2)
for message in collection_2.MultiResolve(
[r.value.ResultRecord() for r in results_2[1]]):
values_read.append(message.request_id)
self.assertEqual(sorted(values_read), range(100, 200))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| StarcoderdataPython |
6429961 | import logging
import openpathsampling as paths
logger = logging.getLogger(__name__)
from .shoot_snapshots import ShootFromSnapshotsSimulation
class SShootingSimulation(ShootFromSnapshotsSimulation):
""" S-Shooting simulations.
Parameters
----------
storage : :class:`.Storage`
the file to store simulations in
engine : :class:`.DynamicsEngine`
the dynamics engine to use to run the simulation
state_S : :class:`.Volume`
the volume representing saddle region S.
randomizer : :class:`.SnapshotModifier`
the method used to modify the input snapshot before each shot
initial_snapshots : list of :class:`.Snapshot`
initial snapshots to use.
trajectory_length : int
Trajectory length l of backward/forward shot, total length of generated
trajectories is 2*l+1. The harvested subtrajectories have length l+1.
"""
def __init__(self, storage, engine=None, state_S=None, randomizer=None,
initial_snapshots=None, trajectory_length=None):
# Defintion of state S (A and B are only required for analysis).
self.state_S = state_S
# Set forward/backward shot length.
self.trajectory_length = trajectory_length
l = self.trajectory_length
# Define backward ensemble:
# trajectory starts in S and has fixed length l.
backward_ensemble = paths.SequentialEnsemble([
paths.LengthEnsemble(l),
paths.AllInXEnsemble(state_S) & paths.LengthEnsemble(1)
])
# Define forward ensemble:
# CAUTION: first trajectory is in backward ensemble,
# then continues with fixed length l.
forward_ensemble = paths.SequentialEnsemble([
paths.LengthEnsemble(l),
paths.AllInXEnsemble(state_S) & paths.LengthEnsemble(1),
paths.LengthEnsemble(l)
])
super(SShootingSimulation, self).__init__(
storage=storage,
engine=engine,
starting_volume=state_S,
forward_ensemble=forward_ensemble,
backward_ensemble=backward_ensemble,
randomizer=randomizer,
initial_snapshots=initial_snapshots
)
# Create backward mover (starting from single point).
self.backward_mover = paths.BackwardExtendMover(
ensemble=self.starting_ensemble,
target_ensemble=self.backward_ensemble
)
# Create forward mover (starting from the backward ensemble).
self.forward_mover = paths.ForwardExtendMover(
ensemble=self.backward_ensemble,
target_ensemble=self.forward_ensemble
)
# Create mover combining forward and backward shooting. No condition
# here, shots in both directions are executed in any case.
self.mover = paths.NonCanonicalConditionalSequentialMover([
self.backward_mover,
self.forward_mover
])
def to_dict(self):
ret_dict = {
'state_S' : self.state_S,
'trajectory_length' : self.trajectory_length
}
return ret_dict
@classmethod
def from_dict(cls, dct):
sshooting = cls.__new__(cls)
# replace automatically created attributes with stored ones
sshooting.state_S = dct['state_S']
sshooting.trajectory_length = dct['trajectory_length']
return sshooting
| StarcoderdataPython |
1673456 | """Distance util functions."""
from __future__ import annotations
from numbers import Number
from typing import Callable
from homeassistant.const import (
LENGTH,
LENGTH_CENTIMETERS,
LENGTH_FEET,
LENGTH_INCHES,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
LENGTH_MILLIMETERS,
LENGTH_YARD,
UNIT_NOT_RECOGNIZED_TEMPLATE,
)
VALID_UNITS: tuple[str, ...] = (
LENGTH_KILOMETERS,
LENGTH_MILES,
LENGTH_FEET,
LENGTH_METERS,
LENGTH_CENTIMETERS,
LENGTH_MILLIMETERS,
LENGTH_INCHES,
LENGTH_YARD,
)
TO_METERS: dict[str, Callable[[float], float]] = {
LENGTH_METERS: lambda meters: meters,
LENGTH_MILES: lambda miles: miles * 1609.344,
LENGTH_YARD: lambda yards: yards * 0.9144,
LENGTH_FEET: lambda feet: feet * 0.3048,
LENGTH_INCHES: lambda inches: inches * 0.0254,
LENGTH_KILOMETERS: lambda kilometers: kilometers * 1000,
LENGTH_CENTIMETERS: lambda centimeters: centimeters * 0.01,
LENGTH_MILLIMETERS: lambda millimeters: millimeters * 0.001,
}
METERS_TO: dict[str, Callable[[float], float]] = {
LENGTH_METERS: lambda meters: meters,
LENGTH_MILES: lambda meters: meters * 0.000621371,
LENGTH_YARD: lambda meters: meters * 1.09361,
LENGTH_FEET: lambda meters: meters * 3.28084,
LENGTH_INCHES: lambda meters: meters * 39.3701,
LENGTH_KILOMETERS: lambda meters: meters * 0.001,
LENGTH_CENTIMETERS: lambda meters: meters * 100,
LENGTH_MILLIMETERS: lambda meters: meters * 1000,
}
def convert(value: float, unit_1: str, unit_2: str) -> float:
"""Convert one unit of measurement to another."""
if unit_1 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_1, LENGTH))
if unit_2 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_2, LENGTH))
if not isinstance(value, Number):
raise TypeError(f"{value} is not of numeric type")
if unit_1 == unit_2 or unit_1 not in VALID_UNITS:
return value
meters: float = TO_METERS[unit_1](value)
return METERS_TO[unit_2](meters)
| StarcoderdataPython |
4937061 | <reponame>lectorvin/rpg
# ! - do in right now; P - programming; L - logic
#P TODO: module solid object, monsters, hero etc
#P TODO: like tkinter.Tk() but without interruption
#! TODO: hero's name
#! TODO: basic attributes (strength, fallout system?) and make them better (strength-ups)
#! TODO: quests!!!!11!1
#! TODO: door with key, npc sell key; puzzles; button whick opens door(?)
# TODO: dragon-boss
# TODO: graphics like Starcraft
# TODO: not only sword
# TODO: different classes
#L TODO: not only peaceful mobs
#PL FIXED: battles like normal battles
#L TODO: random mobs' bites; random hit...
#L TODO: good dialogs with npc, make your choice
#P TODO: new program for generating pixel text
# TODO: mobs spawner
# TODO: use tactics
#! TODO: make you character by yourself; fallout system?
# TODO: levels not in line (bonus levels?); make your choice
# TODO: more things in inventary
# TODO: chest with...
# TODO: traps, poison...
# TODO: moar information
# TODO: make wiki choice: narrative RPG or Dungeon crawler; mass effect of diablo
import tkinter
import pygame
import pyganim
import camera
from pygame import *
pygame.init()
WIDTH = 95
HEIGHT = 100
SCREENWIDTH = 900
SCREENHEIGHT = 600
PLATWIDTH = PLATHEIGHT = 50
BACKGROUND = ((250,250,250))
screen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
inventary = []
level_up = False
level_down = False
running = level = 1
def say(title, path):
"""if you want to say something to user, like advice from npc;
path - path of image with text, title - title of window"""
root = tkinter.Tk()
root.geometry('+500+300')
root.title(title)
img = tkinter.PhotoImage(file=path)
button1 = tkinter.Button(root, image=img, command=root.destroy)
button1.pack()
root.mainloop()
def main_attr(hero):
def strength(root):
hero.strength += 1
def agility(root):
hero.agility += 1
def intelligence(root):
hero.intelligence += 1
def luck(root):
hero.luck += 1
root = tkinter.Tk()
root.geometry('+500+300')
root.title('Choose')
label1 = tkinter.Label(root, text='Choose your main attribute')
button1 = tkinter.Button(root, text='strength', command=root.destroy)
button2 = tkinter.Button(root, text='agility', command=root.destroy)
button3 = tkinter.Button(root, text='intelligence', command=root.destroy)
button4 = tkinter.Button(root, text='luck', command=root.destroy)
label1.pack()
button1.pack()
button1.bind('<Button-1>',strength)
button2.pack()
button2.bind('<Button-1>',agility)
button3.pack()
button3.bind('<Button-1>',intelligence)
button4.pack()
button4.bind('<Button-1>',luck)
root.mainloop()
def collide_the_wall(smbd, wall, xvel, yvel):
"""function for check, if smbd collide the wall(or other object);
xvel, yvel - direction of moving"""
if xvel > 0:
smbd.rect.right = wall.rect.left
if isinstance(smbd, Monster):
smbd.yvel = smbd.speed
smbd.xvel = 0
elif xvel < 0:
smbd.rect.left = wall.rect.right
if isinstance(smbd, Monster):
smbd.yvel = -smbd.speed
smbd.xvel = 0
if yvel > 0:
smbd.rect.bottom = wall.rect.top
if isinstance(smbd, Monster):
smbd.xvel = -smbd.speed
smbd.yvel = 0
elif yvel < 0:
smbd.rect.top = wall.rect.bottom
if isinstance(smbd, Monster):
smbd.xvel = smbd.speed
smbd.yvel = 0
def smth_in_inventary(type2):
"""check if some object is in inventary"""
r = False
for i in inventary:
if i.type1 == type2:
r = True
return r
class SolidObject(sprite.Sprite):
"""parent class for all fixed objects"""
def __init__(self, x, y, path):
sprite.Sprite.__init__(self)
self.rect = pygame.rect.Rect(x, y, PLATWIDTH, PLATHEIGHT)
self.image = pygame.image.load(path)
def action(self):
pass
"""all fixed objects. begin"""
class LifeBlock(SolidObject):
def action(self):
global hero
if hero.life < 20:
hero.life += 1
self.kill()
class Coin(SolidObject):
def action(self):
global hero
hero.money += 10
self.kill()
class Bed(SolidObject):
def action(self):
global hero, doing
if doing:
print('sleep...')
pygame.time.delay(200)
imga = pygame.image.load('img/text/sleep.png')
imgarect = imga.get_rect()
screen.blit(imga, imgarect)
pygame.display.flip()
pygame.time.delay(4000)
hero.life = 20
doing = False
class PlatExit(SolidObject):
def action(self):
if smth_in_inventary('sword'):
global level_up, level
pygame.time.delay(300)
level_up = True
level += 1
else:
say('Dangerous', 'img/text/dangerous.gif')
class PlatLevelDown(SolidObject):
def action(self):
global level_down, level
level -= 1
level_down = True
class NPC(SolidObject):
def action(self):
global doing
if doing:
global level
say('advice', 'img/text/text' + str(level) + '.gif')
doing = False
"""solid objects. end"""
class Monster(sprite.Sprite):
"""very peaceful monster, just running. attack, only if was attacked"""
def __init__(self, x, y, path):
sprite.Sprite.__init__(self)
self.StartX = x
self.StartY = y
self.speed = 3
self.life = 6
self.strength = 1
self.xvel = 0
self.yvel = -2
self.image = pygame.image.load(path)
self.rect = pygame.rect.Rect(x, y, PLATWIDTH, PLATHEIGHT)
def move(self, entities):
global hero
for f in entities:
if ((sprite.collide_rect(self, f))
and isinstance(f, Player) and attack):
if attack:
print('Monster hits player: ', self.strength)
pygame.time.delay(50)
self.xvel = 0
self.yvel = 0
hero.life -= self.strength
self.collide(self.xvel, 0, entities)
self.rect.x += self.xvel
self.collide(0, self.yvel, entities)
self.rect.y += self.yvel
def collide(self, xvel, yvel, entities):
for f in entities:
if (sprite.collide_rect(self, f)) and not(isinstance(f, Player))\
and not(isinstance(f, LifeBlock)):
collide_the_wall(self, f, xvel, yvel)
class RealObject(sprite.Sprite):
"""things, that will be in hero's inventary"""
def __init__(self, x, y, path, type1):
sprite.Sprite.__init__(self)
self.type1 = type1
self.image = pygame.image.load(path)
self.rect = pygame.rect.Rect(x, y, PLATWIDTH, PLATHEIGHT)
def invent_append(self, type1):
if not(smth_in_inventary(type1)):
inventary.append(self)
self.kill()
global hero
hero.strength += 2
hero.anim()
class Player(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.xvel = 0
self.yvel = 0
selfStartX = x
selfStartY = y
self.image = Surface((WIDTH, HEIGHT))
self.rect = pygame.rect.Rect(x, y, WIDTH, HEIGHT)
self.image.set_colorkey(BACKGROUND)
def define(self,strength,agility,intelligence,luck):
self.strength = strength
self.agility = agility
self.intelligence = intelligence
self.luck = luck
self.speed = 4
self.xp = 0
self.level = 1
self.life = 20
self.money = 0
def hero_level(self):
if self.xp >= self.level*20:
self.level += 1
self.xp = 0
def anim(self):
Animation_Delay = 0.1
if not(smth_in_inventary('sword')):
Animation_Down = [('img/startHero/heroW2.png'),
('img/startHero/heroW3.png')]
Animation_Stay = [('img/startHero/heroW1.png', Animation_Delay)]
Animation_Right = [('img/startHero/heroR1.png'),
('img/startHero/heroR2.png'),
('img/startHero/heroR3.png')]
Animation_Left = [('img/startHero/heroL1.png'),
('img/startHero/heroL2.png'),
('img/startHero/heroL3.png')]
Animation_Attack = [('img/startHero/heroW1.png'),
('img/startHero/heroAtt.png')]
else:
Animation_Down = [('img/swordHero/heroW2.png'),
('img/swordHero/heroW3.png')]
Animation_Stay = [('img/swordHero/heroW1.png',0.1)]
Animation_Right = [('img/swordHero/heroR1.png'),
('img/swordHero/heroR2.png'),
('img/swordHero/heroR3.png')]
Animation_Left = [('img/swordHero/heroL1.png'),
('img/swordHero/heroL2.png'),
('img/swordHero/heroL3.png')]
Animation_Attack = [('img/swordHero/heroW1.png'),
('img/swordHero/heroAtt.png')]
#define animation of player; use pyganim
boltAnim = []
for anim in Animation_Down:
boltAnim.append((anim, Animation_Delay))
self.boltAnimDown = pyganim.PygAnimation(boltAnim)
self.boltAnimDown.play()
boltAnim = []
self.boltAnimStay = pyganim.PygAnimation(Animation_Stay)
self.boltAnimStay.play()
self.boltAnimStay.blit(self.image, (0, 0))
boltAnim = []
for anim in Animation_Right:
boltAnim.append((anim, Animation_Delay))
self.boltAnimRight = pyganim.PygAnimation(boltAnim)
self.boltAnimRight.play()
boltAnim = []
for anim in Animation_Left:
boltAnim.append((anim,Animation_Delay))
self.boltAnimLeft = pyganim.PygAnimation(boltAnim)
self.boltAnimLeft.play()
self.image.fill(BACKGROUND)
boltAnim = []
for anim in Animation_Attack:
boltAnim.append((anim,Animation_Delay * 5))
self.boltAnimAttack = pyganim.PygAnimation(boltAnim)
self.boltAnimAttack.play()
self.image.fill(BACKGROUND)
def update(self, left, right, up, down, platforms):
self.hero_level()
self.image.fill(BACKGROUND)
for m in monsters:
if sprite.collide_rect(self, m) and attack:
print('Player hits monster: ', self.strength)
m.life -= self.strength
pygame.time.delay(50)
if m.life < 1:
m.kill()
self.xp += 10
pf = Coin(m.rect.x-15, m.rect.y-15,
'img/SolidObjects/coin.png')
entities.add(pf)
if left:
self.xvel = -self.speed
self.boltAnimLeft.blit(self.image, (0, 0))
elif right:
self.xvel = self.speed
self.boltAnimRight.blit(self.image, (0, 0))
elif up:
self.yvel = -self.speed
self.boltAnimDown.blit(self.image, (0, 0))
elif down:
self.yvel = self.speed
self.boltAnimDown.blit(self.image, (0, 0))
if attack:
self.xvel = 0
self.boltAnimAttack.blit(self.image, (0, 0))
if not(left or right or up or down or attack):
self.xvel = 0
self.yvel = 0
self.boltAnimStay.blit(self.image, (0, 0))
self.rect.x += self.xvel
self.collide(self.xvel, 0, platforms)
self.rect.y += self.yvel
self.collide(0, self.yvel, platforms)
def collide(self, xvel, yvel, platforms):
for e in entities:
if sprite.collide_rect(self, e):
"""special collides, not tree and wall"""
if isinstance(e, SolidObject):
e.action()
elif isinstance(e, RealObject):
e.invent_append(e.type1)
if not(isinstance(e, Player)):
collide_the_wall(self, e, xvel,yvel)
def generate_hero(x, y):
"""this function generate hero near door to previous or next level
door to previous level has x == 0
door to next level has x == SreenWidth
if y is too big (bigger than 300) hero can spawn on tree"""
global hero
if x == 0:
x1 = 100
else:
x1 = x - 100
if y > 300: #FIXEDME: wrong logic?
y1 = y - 10
else:
y1 = y + 10
if level == 1 and level_up:
hero = Player(x1, y1)
else:
hero.selfStartX = x1
hero.selfStartY = y1
hero.xvel = 0
hero.yvel = 0
hero.rect = pygame.rect.Rect(x1, y1, WIDTH, HEIGHT)
entities.add(hero)
hero.anim()
def generate():
global monsters, entities, platforms, camera_, back_image
monsters = pygame.sprite.Group()
entities = pygame.sprite.Group()
platforms = []
x = y = 0
""" open file with next or previous level, initial all object;
add them in entities; objects, that will be on map whole of
game, will be pushed in platforms to check if hero collide them"""
with open('levels\level' + str(level) + '.txt') as f:
for row in f:
for col in row:
if col == '&': # sword
pf = RealObject(x, y, 'img/RealObjects/sword1.png', 'sword')
entities.add(pf)
elif col == 'P': # npc
pf = NPC(x, y, 'img/npc/npc' + str(level) + '.png')
entities.add(pf)
platforms.append(pf)
elif col == 'S': # bed
pf = Bed(x, y, 'img/SolidObjects/bed.png')
entities.add(pf)
platforms.append(pf)
elif col == '%': # life
pf = LifeBlock(x+10, y+10, 'img/SolidObjects/life.png')
entities.add(pf)
elif col == '-': # tree
pf = SolidObject(x, y, 'img/SolidObjects/tree.png')
platforms.append(pf)
entities.add(pf)
elif col == '/': # level_up, door to next level
pf = PlatExit(x, y, 'img/SolidObjects/exit.png')
platforms.append(pf)
entities.add(pf)
if level_down:
# if we return from previous level,
# we spawn near door to next level
generate_hero(x,y)
elif col == '!': #level_down, door to previous level
# on first level there isn't door on previous
# level so we need add 2 trees instead of it
if level != 1:
pf = PlatLevelDown(x, y, 'img/SolidObjects/exit.png')
platforms.append(pf)
entities.add(pf)
else:
pf = SolidObject(x, y, 'img/SolidObjects/tree.png')
platforms.append(pf)
entities.add(pf)
pf = SolidObject(x, y+50, 'img/SolidObjects/tree.png')
platforms.append(pf)
entities.add(pf)
if level_up or level == 1:
# if we goint to next level,
# we spawn near door to previous level
generate_hero(x, y)
elif col == 'D': # walls of house
pf = SolidObject(x, y, 'img/SolidObjects/wall.png')
platforms.append(pf)
entities.add(pf)
elif col == '#': #water
pf = SolidObject(x, y, 'img/SolidObjects/water.png')
platforms.append(pf)
entities.add(pf)
elif col == '*': # mobs
pf = Monster(x, y, 'img/monsters/monster.png')
monsters.add(pf)
x += PLATWIDTH
y += PLATHEIGHT
x = 0
camera_ = camera.Camera(camera.camera_configure, len(row) * PLATWIDTH, y)
path = "img/background/img" + str(level) + ".png"
back_image = camera.BackImage(path)
level_up = True
generate()
timer = pygame.time.Clock()
level_up = left = right = up = down = attack = doing = False
say('advice', 'img/text/text0.gif')
hero.define(2,5,5,5)
main_attr(hero)
while running:
# right now, while window isn't closed
if level_up or level_down:
generate()
level_up = level_down = False
print('------------------')
if not(hero.life):
say('Dead','img/text/dead.gif')
level = 2
generate()
for m in monsters:
m.move(entities)
timer.tick(60)
hero.update(left, right, up, down, platforms)
camera_.update(hero)
back_image.show(camera_, screen)
for e in entities:
screen.blit(e.image, camera_.apply(e))
for m in monsters:
screen.blit(m.image, camera_.apply(m))
pygame.display.set_caption('Level ' + str(level) +\
' life ' + str(hero.life) + ' money ' +\
str(hero.money) + ', level: '+ str(hero.level) +\
', xp: ' + str(hero.xp) +\
', Attributes: strength - ' + str(hero.strength) +\
', agility - ' + str(hero.agility) + ', luck - ' +\
str(hero.luck) + ', intelligence - ' +\
str(hero.intelligence) )
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = 0
if not(up or down or right or left or attack):
if event.type == KEYDOWN:
if event.key == K_UP or event.key == K_w:
up = True
elif event.key == K_LEFT or event.key == K_a:
left = True
elif event.key == K_DOWN or event.key == K_s:
down = True
elif event.key == K_RIGHT or event.key == K_d:
right = True
elif event.key == K_f:
attack = True
elif event.key == K_e:
doing = True
elif event.key == K_F1:
say('Pause', 'img/text/pause.gif')
if event.type == KEYUP:
if event.key == K_UP or event.key == K_w:
up = False
elif event.key == K_LEFT or event.key == K_a:
left = False
elif event.key == K_DOWN or event.key == K_s:
down = False
elif event.key == K_RIGHT or event.key == K_d:
right = False
elif event.key == K_f:
attack = False
pygame.display.flip()
pygame.display.quit()
| StarcoderdataPython |
1724937 | <reponame>chembl/chembl_new_webservices
__author__ = 'mnowotka'
import StringIO
from chembl_beaker.beaker.draw import cairoCanvas
from chembl_beaker.beaker import draw
from collections import defaultdict
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ImportError:
Chem = None
Draw = None
AllChem = None
try:
from chembl_beaker.beaker.draw import DrawingOptions
except ImportError:
DrawingOptions = None
NEW_RENDER_ENGINE = False
try:
from rdkit.Chem.Draw import rdMolDraw2D
if hasattr(rdMolDraw2D, 'MolDraw2DCairo') and hasattr(rdMolDraw2D, 'MolDraw2DSVG'):
NEW_RENDER_ENGINE = True
except:
pass
try:
import indigo
from indigo import IndigoException
import indigo_renderer
indigoObj = indigo.Indigo()
except ImportError:
indigo = None
indigo_renderer = None
try:
import cairo
cffi = False
except ImportError:
import cairocffi
cairocffi.install_as_pycairo()
cffi = True
import io
import cairo
if not hasattr(cairo, 'HAS_PDF_SURFACE'):
cairo.HAS_PDF_SURFACE = False
if not hasattr(cairo, 'HAS_SVG_SURFACE'):
cairo.HAS_SVG_SURFACE = True
# ----------------------------------------------------------------------------------------------------------------------
options = DrawingOptions()
options.useFraction = 1.0
options.dblBondOffset = .13
options.bgColor = None
NUMBER_FILTERS = ['exact', 'range', 'gt', 'gte', 'lt', 'lte', 'in', 'isnull']
FLAG_FILTERS = ['exact', 'isnull']
CHAR_FILTERS = ['exact', 'iexact', 'contains', 'icontains', 'istartswith', 'startswith', 'endswith', 'iendswith',
'search', 'regex', 'iregex', 'isnull', 'in']
DATE_FILTERS = ['exact', 'year', 'month', 'day', 'week_day', 'isnull']
STANDARD_RDKIT_COLORS = {16: (0.8, 0.8, 0), 1: (0.55, 0.55, 0.55), 35: (0.5, 0.3, 0.1), 17: (0, 0.8, 0),
0: (0.5, 0.5, 0.5), 7: (0, 0, 1), 8: (1, 0, 0), 9: (0.2, 0.8, 0.8), 15: (1, 0.5, 0)}
# ----------------------------------------------------------------------------------------------------------------------
COLOR_NAMES = {
'aliceblue': (0.941176, 0.972549, 1),
'antiquewhite': (0.980392, 0.921569, 0.843137),
'aquamarine': (0.498039, 1, 0.831373),
'azure': (0.941176, 1, 1),
'beige': (0.960784, 0.960784, 0.862745),
'bisque': (1, 0.894118, 0.768627),
'black': (0, 0, 0),
'blanchedalmond': (1, 0.921569, 0.803922),
'blue': (0, 0, 1),
'blueviolet': (0.541176, 0.168627, 0.886275),
'brown': (0.647059, 0.164706, 0.164706),
'burlywood': (0.870588, 0.721569, 0.529412),
'cadetblue': (0.372549, 0.619608, 0.627451),
'chartreuse': (0.498039, 1, 0),
'chocolate': (0.823529, 0.411765, 0.117647),
'coral': (1, 0.498039, 0.313725),
'cornflowerblue': (0.392157, 0.584314, 0.929412),
'cornsilk': (1, 0.972549, 0.862745),
'crimson': (0.862745, 0.0784314, 0.235294),
'cyan': (0, 1, 1),
'darkblue': (0, 0, 0.545098),
'darkcyan': (0, 0.545098, 0.545098),
'darkgoldenrod': (0.721569, 0.52549, 0.0431373),
'darkgray': (0.662745, 0.662745, 0.662745),
'darkgreen': (0, 0.392157, 0),
'darkgrey': (0.662745, 0.662745, 0.662745),
'darkkhaki': (0.741176, 0.717647, 0.419608),
'darkmagenta': (0.545098, 0, 0.545098),
'darkolivegreen': (0.333333, 0.419608, 0.184314),
'darkorange': (1, 0.54902, 0),
'darkorchid': (0.6, 0.196078, 0.8),
'darkred': (0.545098, 0, 0),
'darksalmon': (0.913725, 0.588235, 0.478431),
'darkseagreen': (0.560784, 0.737255, 0.560784),
'darkslateblue': (0.282353, 0.239216, 0.545098),
'darkslategray': (0.184314, 0.309804, 0.309804),
'darkslategrey': (0.184314, 0.309804, 0.309804),
'darkturquoise': (0, 0.807843, 0.819608),
'darkviolet': (0.580392, 0, 0.827451),
'deeppink': (1, 0.0784314, 0.576471),
'deepskyblue': (0, 0.74902, 1),
'dimgray': (0.411765, 0.411765, 0.411765),
'dimgrey': (0.411765, 0.411765, 0.411765),
'dodgerblue': (0.117647, 0.564706, 1),
'firebrick': (0.698039, 0.133333, 0.133333),
'floralwhite': (1, 0.980392, 0.941176),
'forestgreen': (0.133333, 0.545098, 0.133333),
'gainsboro': (0.862745, 0.862745, 0.862745),
'ghostwhite': (0.972549, 0.972549, 1),
'gold': (1, 0.843137, 0),
'goldenrod': (0.854902, 0.647059, 0.12549),
'gray': (0.745098, 0.745098, 0.745098),
'green': (0, 1, 0),
'greenyellow': (0.678431, 1, 0.184314),
'grey': (0.745098, 0.745098, 0.745098),
'honeydew': (0.941176, 1, 0.941176),
'hotpink': (1, 0.411765, 0.705882),
'indianred': (0.803922, 0.360784, 0.360784),
'indigo': (0.294118, 0, 0.509804),
'ivory': (1, 1, 0.941176),
'khaki': (0.941176, 0.901961, 0.54902),
'lavender': (0.901961, 0.901961, 0.980392),
'lavenderblush': (1, 0.941176, 0.960784),
'lawngreen': (0.486275, 0.988235, 0),
'lemonchiffon': (1, 0.980392, 0.803922),
'lightblue': (0.678431, 0.847059, 0.901961),
'lightcoral': (0.941176, 0.501961, 0.501961),
'lightcyan': (0.878431, 1, 1),
'lightgoldenrod': (0.933333, 0.866667, 0.509804),
'lightgoldenrodyellow': (0.980392, 0.980392, 0.823529),
'lightgray': (0.827451, 0.827451, 0.827451),
'lightgreen': (0.564706, 0.933333, 0.564706),
'lightgrey': (0.827451, 0.827451, 0.827451),
'lightpink': (1, 0.713725, 0.756863),
'lightsalmon': (1, 0.627451, 0.478431),
'lightseagreen': (0.12549, 0.698039, 0.666667),
'lightskyblue': (0.529412, 0.807843, 0.980392),
'lightslateblue': (0.517647, 0.439216, 1),
'lightslategray': (0.466667, 0.533333, 0.6),
'lightslategrey': (0.466667, 0.533333, 0.6),
'lightsteelblue': (0.690196, 0.768627, 0.870588),
'lightyellow': (1, 1, 0.878431),
'limegreen': (0.196078, 0.803922, 0.196078),
'linen': (0.980392, 0.941176, 0.901961),
'magenta': (1, 0, 1),
'maroon': (0.690196, 0.188235, 0.376471),
'mediumaquamarine': (0.4, 0.803922, 0.666667),
'mediumblue': (0, 0, 0.803922),
'mediumorchid': (0.729412, 0.333333, 0.827451),
'mediumpurple': (0.576471, 0.439216, 0.858824),
'mediumseagreen': (0.235294, 0.701961, 0.443137),
'mediumslateblue': (0.482353, 0.407843, 0.933333),
'mediumspringgreen': (0, 0.980392, 0.603922),
'mediumturquoise': (0.282353, 0.819608, 0.8),
'mediumvioletred': (0.780392, 0.0823529, 0.521569),
'midnightblue': (0.0980392, 0.0980392, 0.439216),
'mintcream': (0.960784, 1, 0.980392),
'mistyrose': (1, 0.894118, 0.882353),
'moccasin': (1, 0.894118, 0.709804),
'navajowhite': (1, 0.870588, 0.678431),
'navy': (0, 0, 0.501961),
'navyblue': (0, 0, 0.501961),
'oldlace': (0.992157, 0.960784, 0.901961),
'olivedrab': (0.419608, 0.556863, 0.137255),
'orange': (1, 0.647059, 0),
'orangered': (1, 0.270588, 0),
'orchid': (0.854902, 0.439216, 0.839216),
'palegoldenrod': (0.933333, 0.909804, 0.666667),
'palegreen': (0.596078, 0.984314, 0.596078),
'paleturquoise': (0.686275, 0.933333, 0.933333),
'palevioletred': (0.858824, 0.439216, 0.576471),
'papayawhip': (1, 0.937255, 0.835294),
'peachpuff': (1, 0.854902, 0.72549),
'peru': (0.803922, 0.521569, 0.247059),
'pink': (1, 0.752941, 0.796078),
'plum': (0.866667, 0.627451, 0.866667),
'powderblue': (0.690196, 0.878431, 0.901961),
'purple': (0.627451, 0.12549, 0.941176),
'red': (1, 0, 0),
'rosybrown': (0.737255, 0.560784, 0.560784),
'royalblue': (0.254902, 0.411765, 0.882353),
'saddlebrown': (0.545098, 0.270588, 0.0745098),
'salmon': (0.980392, 0.501961, 0.447059),
'sandybrown': (0.956863, 0.643137, 0.376471),
'seagreen': (0.180392, 0.545098, 0.341176),
'seashell': (1, 0.960784, 0.933333),
'sgibeet': (0.556863, 0.219608, 0.556863),
'sgibrightgray': (0.772549, 0.756863, 0.666667),
'sgibrightgrey': (0.772549, 0.756863, 0.666667),
'sgichartreuse': (0.443137, 0.776471, 0.443137),
'sgidarkgray': (0.333333, 0.333333, 0.333333),
'sgidarkgrey': (0.333333, 0.333333, 0.333333),
'sgilightblue': (0.490196, 0.619608, 0.752941),
'sgilightgray': (0.666667, 0.666667, 0.666667),
'sgilightgrey': (0.666667, 0.666667, 0.666667),
'sgimediumgray': (0.517647, 0.517647, 0.517647),
'sgimediumgrey': (0.517647, 0.517647, 0.517647),
'sgiolivedrab': (0.556863, 0.556863, 0.219608),
'sgisalmon': (0.776471, 0.443137, 0.443137),
'sgislateblue': (0.443137, 0.443137, 0.776471),
'sgiteal': (0.219608, 0.556863, 0.556863),
'sgiverydarkgray': (0.156863, 0.156863, 0.156863),
'sgiverydarkgrey': (0.156863, 0.156863, 0.156863),
'sgiverylightgray': (0.839216, 0.839216, 0.839216),
'sgiverylightgrey': (0.839216, 0.839216, 0.839216),
'sienna': (0.627451, 0.321569, 0.176471),
'skyblue': (0.529412, 0.807843, 0.921569),
'slateblue': (0.415686, 0.352941, 0.803922),
'slategray': (0.439216, 0.501961, 0.564706),
'slategrey': (0.439216, 0.501961, 0.564706),
'snow': (1, 0.980392, 0.980392),
'springgreen': (0, 1, 0.498039),
'steelblue': (0.27451, 0.509804, 0.705882),
'tan': (0.823529, 0.705882, 0.54902),
'thistle': (0.847059, 0.74902, 0.847059),
'tomato': (1, 0.388235, 0.278431),
'turquoise': (0.25098, 0.878431, 0.815686),
'violet': (0.933333, 0.509804, 0.933333),
'violetred': (0.815686, 0.12549, 0.564706),
'wheat': (0.960784, 0.870588, 0.701961),
'white': (1, 1, 1),
'whitesmoke': (0.960784, 0.960784, 0.960784),
'yellow': (1, 1, 0),
'yellowgreen': (0.603922, 0.803922, 0.196078),
}
# ----------------------------------------------------------------------------------------------------------------------
def render_indigo(mol, options, frmt, margin, size, colors, ignoreCoords):
renderer = indigo_renderer.IndigoRenderer(indigoObj)
if options and hasattr(options, 'bgColor') and options.bgColor:
indigoObj.setOption("render-background-color", "%s, %s, %s" % options.bgColor)
indigoObj.setOption("render-output-format", frmt)
indigoObj.setOption("render-margins", margin, margin)
indigoObj.setOption("render-image-size", size, size)
indigoObj.setOption("render-coloring", colors)
indigoObj.setOption("ignore-stereochemistry-errors", "true")
if ignoreCoords:
mol.layout()
image = renderer.renderToBuffer(mol)
return image.tostring()
# ----------------------------------------------------------------------------------------------------------------------
def render_rdkit(mol, highlight, options, frmt, size, colors, ignoreCoords):
leg = mol.GetProp("_Name") if mol.HasProp("_Name") else None
matching = []
if highlight:
matching = highlight
if ignoreCoords:
AllChem.Compute2DCoords(mol)
if NEW_RENDER_ENGINE:
return render_rdkit_modern_rendering(mol, matching, options, frmt, size, colors, leg)
else:
return render_rdkit_legacy(mol, matching, options, frmt, size, colors, leg)
# ----------------------------------------------------------------------------------------------------------------------
def render_rdkit_modern_rendering(mol, highlight, options, frmt, size, colors, legend):
if frmt == 'png':
drawer = rdMolDraw2D.MolDraw2DCairo(size, size)
elif frmt == 'svg':
drawer = rdMolDraw2D.MolDraw2DSVG(size, size)
else:
return
opts = drawer.drawOptions()
if hasattr(options, 'bgColor') and options.bgColor:
opts.setBackgroundColour(options.bgColor)
else:
opts.clearBackground = False
if not colors:
opts.useBWAtomPalette()
else:
opts.useDefaultAtomPalette()
Chem.GetSSSR(mol)
drawer.DrawMolecule(mol, highlightAtoms=highlight, legend=legend)
drawer.FinishDrawing()
return drawer.GetDrawingText()
# ----------------------------------------------------------------------------------------------------------------------
def render_rdkit_legacy(mol, highlight, options, frmt, size, colors, legend):
if not colors:
dd = defaultdict(lambda: (0, 0, 0))
options.elemDict = dd
else:
options.elemDict = STANDARD_RDKIT_COLORS
if frmt == 'png':
buf = StringIO.StringIO()
image = draw.MolToImage(mol, size=(size, size), legend=legend, fitImage=True, options=options,
highlightAtoms=highlight)
image.save(buf, "PNG")
return buf.getvalue()
elif frmt == 'svg':
if cffi and cairocffi.version <= (1, 10, 0):
imageData = io.BytesIO()
else:
imageData = StringIO.StringIO()
surf = cairo.SVGSurface(imageData, size, size)
ctx = cairo.Context(surf)
canv = cairoCanvas.Canvas(ctx=ctx, size=(size, size), imageType='svg')
draw.MolToImage(mol, size=(size, size), legend=legend, canvas=canv, fitImage=True, options=options,
highlightAtoms=highlight)
canv.flush()
surf.finish()
return imageData.getvalue()
# ----------------------------------------------------------------------------------------------------------------------
def highlight_substructure_rdkit(molstring, smarts):
mol = Chem.MolFromMolBlock(str(molstring), sanitize=True)
if not mol:
return
mol.UpdatePropertyCache(strict=False)
patt = Chem.MolFromSmarts(str(smarts))
if not patt:
return
Chem.GetSSSR(patt)
Chem.GetSSSR(mol)
match = mol.HasSubstructMatch(patt)
if not match:
return
matching = mol.GetSubstructMatch(patt)
return mol, matching
# ----------------------------------------------------------------------------------------------------------------------
def highlight_substructure_indigo(molstring, smarts):
try:
mol = indigoObj.loadMolecule(str(molstring))
patt = indigoObj.loadSmarts(str(smarts))
match = indigoObj.substructureMatcher(mol).match(patt)
except IndigoException:
return
if not match:
return
return match.highlightedTarget()
# ----------------------------------------------------------------------------------------------------------------------
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
# ----------------------------------------------------------------------------------------------------------------------
def list_flatten(l, a=None):
if a is None:
a = []
for i in l:
if isinstance(i, list):
list_flatten(i, a)
else:
a.append(i)
return a
# ----------------------------------------------------------------------------------------------------------------------
def unpack_request_params(params):
ret = []
for x in params:
first, second = x
if type(second) == list and len(second) == 1 and isinstance(second[0], basestring):
ret.append((first, second[0]))
else:
ret.append(x)
return ret
# ---------------------------------------------------------------------------------------------------------------------- | StarcoderdataPython |
227449 | ####################################################################################################
from stomasimulator.geom.point import Point
import stomasimulator.mesh.elements as elem
####################################################################################################
class TestElements(object):
def get_hex8(self, a=1.0, b=1.0, c=1.0, skew=0.0):
pts = ( Point(0, 0, skew, allocate_id=True ),
Point(a, 0, 0, allocate_id=True ),
Point(a, b, 0, allocate_id=True ),
Point(0, b, 0, allocate_id=True ),
Point(0, 0, c + skew, allocate_id=True ),
Point(a, 0, c + skew, allocate_id=True ),
Point(a, b, c + skew, allocate_id=True ),
Point(0, b, c + skew, allocate_id=True ) )
pts_map = { pt.id: pt for pt in pts }
ele = elem.Hex8Element( [ pt.id for pt in pts ] )
return ele, pts_map
def test_jacobian_cube(self):
ele, pts_map = self.get_hex8()
Jratio = ele.jacobian_ratio( pts_map )
assert Jratio == 1.0
def test_jacobian_rect(self):
ele, pts_map = self.get_hex8( a=1.0, b=2.0, c=3.0 )
Jratio = ele.jacobian_ratio( pts_map )
assert Jratio == 1.0
def test_jacobian_rect_skew(self):
ele, pts_map = self.get_hex8( skew=1.0 )
# tweak one point to make the faces non-parallel
pts_map[ 10 ] = Point( 0, 0, -1 )
Jratio = ele.jacobian_ratio( pts_map )
assert Jratio != 1.0
####################################################################################################
| StarcoderdataPython |
1948879 |
import aiomisc
import os
import zipfile
import tempfile
from errors_module.errors import APIBadRequest
from loguru import logger
import shutil
import hashlib as hash
from typing import Any, Dict, Tuple, Callable
import datetime
# from memory_profiler import profile
# @profile
@aiomisc.threaded_separate
def extract(src_path: str, dst_path_prefix: str, config: Dict[str, Any], datasource_name: str, username: str) -> Tuple[str, str]:
"""
src_path : where the user has downloaded their ZIP file,
temp_directory = tempfile.TemporaryDirectory()
"""
# temp_directory = tempfile.mkdtemp()
logger.info("Entered into the extract function")
if not os.path.exists(src_path):
raise APIBadRequest("This path doesnt exists")
# try:
# the_zip_file = zipfile.ZipFile(src_path)
# except:
# raise APIBadRequest("Invalid zip file")
# logger.info(f"Testing zip {src_path} file")
# ret = the_zip_file.testzip()
# if ret is not None:
# raise APIBadRequest(f"Invalid zip datasource_name file")
_checksum = checksum(src_path)
archives_present = get_archives(config[datasource_name]["tables"]["archives_table"], _checksum)
if archives_present:
raise APIBadRequest("Zip file already have been uploaded")
utc_timestamp = datetime.datetime.utcnow().strftime("%d-%m-%Y")
dst_path_suffix = f"{utc_timestamp}-{_checksum}"
logger.info(f"This is the new destination suffix {dst_path_suffix}")
dst_path = os.path.join(dst_path_prefix, username, dst_path_suffix)
try:
with zipfile.ZipFile(src_path) as zf:
zf.extractall(dst_path)
#shutil.unpack_archive(src_path, extract_dir=dst_path, format=None)
except MemoryError:
logger.error(f"We ran out of memory while processing {datasource_name}, Please try again")
raise Exception(f"We ran out of memory while processing {datasource_name}, Please try again")
except:
raise APIBadRequest(f"Invalid zip {datasource_name} file")
logger.info(f"Setting new archival for {datasource_name} ")
set_archives(config[datasource_name]["tables"]["archives_table"], dst_path, username, _checksum)
logger.info(f"This is the dst_path for {datasource_name} is {dst_path}")
return _checksum, dst_path
# @profile
@aiomisc.threaded_separate
def extract(src_path: str, dst_path_prefix: str, config: Dict[str, Any], datasource_name: str, username: str) -> Tuple[str, str]:
"""
src_path : where the user has downloaded their ZIP file,
temp_directory = tempfile.TemporaryDirectory()
"""
# temp_directory = tempfile.mkdtemp()
logger.info("Entered into the extract function")
if not os.path.exists(src_path):
raise APIBadRequest("This path doesnt exists")
_checksum = checksum(src_path)
archives_present = get_archives(config[datasource_name]["tables"]["archives_table"], _checksum)
if archives_present:
raise APIBadRequest("Zip file already have been uploaded")
utc_timestamp = datetime.datetime.utcnow().strftime("%d-%m-%Y")
dst_path_suffix = f"{utc_timestamp}-{_checksum}"
logger.info(f"This is the new destination suffix {dst_path_suffix}")
dst_path = os.path.join(dst_path_prefix, username, dst_path_suffix)
if src_path.endswith(".mbox"):
logger.debug("This is a bare Mbox file from Google takeout")
mail_path = os.path.join(dst_path, "Takeout/Mail")
os.makedirs(mail_path)
shutil.copy(src_path, mail_path)
else:
try:
with zipfile.ZipFile(src_path) as zf:
zf.extractall(dst_path)
#shutil.unpack_archive(src_path, extract_dir=dst_path, format=None)
except MemoryError:
logger.error(f"We ran out of memory while processing {datasource_name}, Please try again")
raise Exception(f"We ran out of memory while processing {datasource_name}, Please try again")
except:
raise APIBadRequest(f"Invalid zip {datasource_name} file")
logger.info(f"Setting new archival for {datasource_name} ")
set_archives(config[datasource_name]["tables"]["archives_table"], dst_path, username, _checksum)
logger.info(f"This is the dst_path for {datasource_name} is {dst_path}")
return _checksum, dst_path
def remove_temporary_archive(self, file_name):
logger.warning(f"Removing temporary backup file {file_name}")
try:
os.removedirs(file_name)
except Exception as e:
logger.info(f"couldnt remove temporary archive file {file_name} with error {e}")
return
def get_archives(archives_table, checksum):
try:
archives_table\
.select()\
.where(archives_table.checksum==checksum).get()
return True
except Exception as e:
logger.info(f"Archieve couldnt be found {e}")
return False
return
def set_archives(archives_table, path, username, checksum):
try:
archives_table\
.insert(
path= path,
username= username,
checksum=checksum
)\
.execute()
except Exception as e:
logger.info(f"Archive insertion couldnt be found {e}")
return False
return
def checksum(src_path: str) -> str:
# Specify how many bytes of the file you want to open at a time
BLOCKSIZE = 65536
sha = hash.sha256()
with open(src_path, 'rb') as kali_file:
file_buffer = kali_file.read(BLOCKSIZE)
while len(file_buffer) > 0:
sha.update(file_buffer)
file_buffer = kali_file.read(BLOCKSIZE)
return sha.hexdigest()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.